uuid int64 541B 3,299B | dataset stringclasses 1
value | text stringlengths 1 4.29M |
|---|---|---|
1,314,259,994,963 | arxiv | \section{Introduction}
The Dirichlet eta function is defined by the following
Dirichlet series, which converges for any complex number
having real part greater than $0$
\begin{equation}\label{eq:dirichlet}
\eta(s) = \sum_{n=1}^{\infty} \frac{(-1)^{n-1}}{n^s} = \frac{1}{1^s} - \frac{1}{2^s} + \frac{1}{3^s} - \frac{1}{4^s} + \cdots
\end{equation}
This Dirichlet series is the alternating sum corresponding
to the Dirichlet series expansion of the Riemann zeta function
$\zeta(s)$ and for this reason the Dirichlet eta function
is also known as the alternating zeta function. The following
relation holds:
$$\eta(s) = \left(1-2^{1-s}\right) \zeta(s).$$
The starting point of our work is a result from \cite{biane2001probability} (p. 456).
In their paper, they show that the sum (\ref{eq:dirichlet})
can be approximated using an array of coefficients $(a_{n,N},\, 1\leq n\leq N)$.
Let
\begin{equation}\label{eq:anN}
a_{n,N} \overset{\mathrm{def}}{=} \frac{1}{2} \prod_{j=1,\,j\neq n}^N \frac{ j^2}{ j^2-n^2 }
= (-1)^{n-1}\frac{\dbinom{2N}{N-n}}{\dbinom{2N}{N}}.
\end{equation}
then
\begin{theorem}[Biane and al.]\label{th:biane}
For $\varepsilon_i,\, 1\leq i\leq N$ independent standard exponential variables, and $\Re(s)>-2N$
\begin{equation}\label{eq:expectation}
\Esp{\left( \sum_{n=1}^N \frac{\varepsilon_n}{n^2} \right)^{\frac{s}{2}} } = s \Gamma\left(\frac{s}{2}\right) \sum_{n=1}^N \frac{a_{n,N}}{n^s}
\end{equation}
where the $a_{n,N}$ are defined by (\ref{eq:anN}), and
\begin{equation}\label{eq:cvseries}
\eta_N(s) \overset{\mathrm{def}}{=} \sum_{n=1}^N \frac{a_{n,N}}{n^s} \longrightarrow \eta(s)\mbox{ as } N\rightarrow\infty
\end{equation}
uniformly on every compact subset of $\Cc$.
\end{theorem}
\begin{remark}
The signs of the coefficients $a_{n,N}$ have been modified with respect
to those given in the previously cited article, in order to obtain convergence
in equation (\ref{eq:cvseries}) towards $\eta(s)$ rather than $-\eta(s)$.
\end{remark}
The second part of the theorem shows that, for any
$s\in\Cc$ the alternating Zeta function can be obtained by weighting
the first $N$ terms of the original series which is defined only for
$\Re(s)>0$. In this paper, we will show that the weighted finite series
$\eta_N(s)$ defined in (\ref{eq:cvseries}) can be written as
a determinant (section \ref{subsec:determinant},
proposition \ref{prop:determinant}). We will then show that this determinant
can be written as the expectation of a functional of a Dixon-Anderson random vector
(section \ref{subsec:integral}, theorem \ref{th:Dixon}).
This result is new (up to our knowledge) and seems to show that there is a
relation between the Zeta function and the theory of random matrices.
In section \ref{sec:AltenatingSeries}
we give a generalization of the representations given in section
\ref{sec:Representations} for general series and, by computing the expectation of
these representations, we obtain the evaluation of two Selberg integrals
involving a generalized Vandermonde determinant
(theorem \ref{th:Averaged-Gen-Vandermonde}).
\section{New Representations of the Alternating Zeta Function}
\label{sec:Representations}
\subsection{Determinant Representation}
\label{subsec:determinant}
We start with the following result
\begin{proposition}\label{prop:determinant}
With the notations and conditions given in theorem (\ref{th:biane}) we have
$$
\eta_N(s) =
\frac{1}{2}
\begin{vmatrix}
1 & \frac{1}{3!} & \ldots & \frac{1}{(2N-1)!} \\
2^{1-s} & \frac{ 2^3}{3!} & \ldots & \frac{2^{2N-1}}{(2N-1)!} \\
\vdots & \vdots & & \vdots \\
N^{1-s} & \frac{N^3}{3!} & \ldots & \frac{N^{2N-1}}{(2N-1)!}
\end{vmatrix}.
$$
\end{proposition}
\begin{proof}
Let $a_n = n^{2}$, $Q_N(x) = \prod_{n=1}^N \left(1-x/a_n \right)$,
and $P_N$ a polynomial of degree $N-1$ over $\Cc[X]$, with the
convention $ P_N(x) = \sum_{n=1}^N c_{n,N} x^{n-1}$. An adaptation
of the arguments given in annex \ref{sec:pfd} shows that
$$
\frac{P_N(x)}{Q_N(x)} = \sum_{n=1}^N P_N\left({a_n}\right)
\prod_{ \substack{j=1\\ j\neq n} }^N
\left(\frac{1}{\left(1-\frac{a_n}{a_j}\right)} \frac{1}{1-\frac{x}{a_j}}\right).
$$
Setting $x=0$, we have $Q_N(0)=1$ and we get
$$
P_N(0) = 2 \sum_{n=1}^N a_{n,N} P_N\left(n^2\right).
$$
We choose $P_N$ as the polynomial of degree $N-1$ such that
$P_N(a_n) = n^{-s}$ for $n=1,\ldots N$.
The coefficients $(c_{n,N})_{n=1}^N$ of $P_N$ are solutions of the
Vandermonde system
\begin{equation}
\label{eq:system}
\begin{pmatrix}
1 & 1 & \ldots & 1 \\
1 & 2^2 & \ldots & 2^{2(N-1)} \\
\vdots & \vdots & & \vdots \\
1 & N^2 & \ldots & N^{2(N-1)}
\end{pmatrix}
\begin{pmatrix}
c_{1,N}\\ c_{2,N}\\ \vdots \\ c_{N,N}
\end{pmatrix}
=
\begin{pmatrix}
1\\ 2^{-s}\\ \vdots \\ N^{-s}
\end{pmatrix}.
\end{equation}
The Vandermonde matrix is invertible (see annex \ref{app:inverse})
showing that the polynomial $P_N$ is uniquely determined.
We are only interested in $c_{1,N} = P_N(0)$. Let us denote $V_N$ the Vandermonde matrix
in (\ref{eq:system}) and $V^{(s)}_N$ the matrix obtained by replacing the first column
of $V_N$ by the right-end term.
Using Cramer's rule, we get
\begin{equation}\label{eq:c1N}
c_{1,N} =\frac{
\begin{vmatrix}
1 & 1 & \ldots & 1 \\
2^{-s} & 2^2 & \ldots & 2^{2(N-1)} \\
\vdots & \vdots & & \vdots \\
N^{-s} & N^2 & \ldots & N^{2(N-1)}
\end{vmatrix}
}
{\Det{V_N}} = \frac{\Det{V^{(s)}_N}}{\Det{V_N}}.
\end{equation}
The Vandermonde Determinant of $V_N$ is
\begin{equation}\label{eq:VN}
\Det{V_N}
= \prod_{1 \mathop \le i \mathop < j \mathop \le N} \left({j^2 - i^2}\right)
= \prod_{1 \mathop \le i \mathop < j \mathop \le N} \left({j - i}\right)\left({j + i}\right)
= \prod_{j=2}^{N} (j-1)! \frac{(2j-1)!}{j!}
= \frac{1}{N!} \prod_{n=1}^{N-1} (2n+1)!
\end{equation}
and thus, we have
\begin{equation*}\label{eq:c1Nbis}
c_{1,N} =
\begin{vmatrix}
1 & \frac{1}{3!} & \ldots & \frac{1}{(2N-1)!} \\
\frac{2}{2^s} & \frac{ 2^3}{3!} & \ldots & \frac{2^{2N-1}}{(2N-1)!} \\
\vdots & \vdots & & \vdots \\
\frac{N}{N^s} & \frac{N^3}{3!} & \ldots & \frac{N^{2N-1}}{(2N-1)!}
\end{vmatrix}.
\end{equation*}
ending the proof.
\end{proof}
\begin{remark}
Observe that using the expression (\ref{eq:c1N}),
it is immediate that $\eta_N(s) = \frac{1}{2}$ if $s=0$ and
$\eta_N(s) =0$ if $s=-2,-4,\ldots,-2(N-1)$.
\end{remark}
\begin{remark}
An alternative (and more direct) proof could have be used using the
representation of the generalized Vandermonde determinant given in lemma
\ref{lemma:SumGenVandermonde}.
\end{remark}
\subsection{Probabilistic Representation}
\label{subsec:integral}
We observe that $\Det{V^{(s)}_N}$
(equation \ref{eq:c1N}) is a generalized Vandermonde determinant.
Using the argument used in (\cite{ZHANG2008300}),
we get the following lemma
\begin{lemma}
\label{lemma:Gen-Vandermonde}
For $N>1$, let
$$
{\Gamma_{N-1}\left( \frac{s}{2} \right)}
\overset{\mathrm{def}}{=}\frac{2}{s}
\prod_{n=1}^{N-1}
\frac{\left(1+{\frac {1}{n}}\right)^{s/2}}{1+{\frac {s}{2n}}},\quad s\neq 0,-2,-4,\ldots
$$
then for all $s\in\Cc$, $s\neq -2, -4,\ldots, -2(N-1)$ we have
\begin{equation}\label{eq:DetVsN}
\Det{V^{(s)}_N}=
\frac{2(N-1)! }{s{\Gamma_{N-1}\left( \frac{s}{2} \right)}}
\int_{1}^{2^2}
dx_1
\int_{2^2}^{{3^2}}
dx_2
\ldots
\int_{(N-1)^2}^{{N^2}}
dx_{N-1}
\prod_{1\leq i<j\leq N-1} (x_{j}-x_{i})
\prod_{n=1}^{N-1} \left(\frac{x_n}{n(n+1)}\right)^{s/2}.
\end{equation}
\end{lemma}
\begin{proof}
Observe first that $\left(s\Gamma_{N-1}\left(\frac{s}{2}\right) \right)^{-1}$
is defined for all $N>1$ and all $s\in\Cc$. Next, we have
\begin{eqnarray*}
\Det{V^{(s)}_N} & = &
\begin{vmatrix}
1 & 1 & \ldots & 1 \\
2^{-s} & 2^2 & \ldots & 2^{2(N-1)} \\
3^{-s} & 3^2 & \ldots & 3^{2(N-1)} \\
\vdots & \vdots & & \vdots \\
N^{-s} & N^2 & \ldots & N^{2(N-1)}
\end{vmatrix}
= \prod_{n=2}^N \frac{1}{n^s}\times
\begin{vmatrix}
1 & 1 & \ldots & 1 \\
1 & 2^{2(1+s/2)}& \ldots & 2^{2(N-1+s/2)} \\
1 & 3^{2(1+s/2)}& \ldots & 3^{2(N-1+s/2)} \\
\vdots & \vdots & & \vdots \\
1 & N^{2(1+s/2)}& \ldots & N^{2(N-1+s/2)}
\end{vmatrix}\nonumber\\
& = & \prod_{n=2}^N \frac{1}{n^s}\times
\begin{vmatrix}
2^{2(1+s/2)}-1 & \ldots & 2^{2(N-1+s/2)}-1 \\
3^{2(1+s/2)}- 2^{2(1+s/2)} & \ldots & 3^{2(N-1+s/2)}-2^{2(N-1+s/2)} \\
\vdots & \vdots & \vdots \\
N^{2(1+s/2)}-(N-1)^{2(1+s/2)} & \ldots & N^{2(N-1+s/2)}-(N-1)^{2(N-1+s/2)}
\end{vmatrix}\nonumber\\
& = &
\prod_{n=1}^{N-1}\frac{n+s/2}{(n+1)^{s}}
\times
\begin{vmatrix}
\int_{1}^{2^2} x_1^{s/2}dx_1 & \ldots & \int_{1}^{2^2} x_1^{N-2+s/2}dx_1 \\
\int_{2^2}^{3^2} x_2^{s/2}dx_2& \ldots & \int_{2^2}^{3^2} x_2^{N-2+s/2}dx_2 \\
\vdots & \vdots & \vdots \\
\int_{(N-1)^2}^{N^2} x_{N-1}^{s/2}dx_{N-1} & \ldots & \int_{(N-1)^2}^{N^2} x_{N-1}^{N-2+s/2}dx_{N-1} \\
\end{vmatrix}\nonumber\\
& = &
\prod_{n=1}^{N-1}\frac{n+s/2}{(n+1)^{s}}
\int_{1}^{2^2}dx_1
\int_{2^2}^{{3^2}}dx_2
\ldots\int_{(N-1)^2}^{N^2}dx_{N-1}
\begin{vmatrix}
x_1^{s/2} & \ldots & x_1^{N-2+s/2} \\
\vdots & \vdots & \vdots \\
x_{N-1}^{s/2} & \ldots & x_{N-1}^{N-2+s/2} \\
\end{vmatrix}
\nonumber\\
& = &
\prod_{n=1}^{N-1}\frac{n+s/2}{(n+1)^{s}}
\int_{1}^{2^2}dx_{1}\int_{2^2}^{{3^2}}dx_{2}
\ldots\int_{(N-1)^2}^{N^2}dx_{N-1}
\begin{vmatrix}
1 & x_1 &\ldots & x_1^{N-2} \\
\vdots &\vdots &\vdots & \vdots \\
1 & x_{N-1} &\ldots & x_{N-1}^{N-2} \\
\end{vmatrix}
\prod_{n=1}^{N-1} x_n^{s/2} \nonumber\\
& = &
\frac{2(N-1)!}{s{\Gamma_{N-1}\left( \frac{s}{2} \right)}}
\int_{1}^{2^2}
dx_1
\int_{2^2}^{{3^2}}
dx_2
\ldots
\int_{(N-1)^2}^{{N^2}}
dx_{N-1}
\prod_{1\leq i<j\leq N-1} (x_{j}-x_{i})
\prod_{n=1}^{N-1} \left(\frac{x_n}{n(n+1)}\right)^{s/2}.
\end{eqnarray*}
\end{proof}
Let $\mathcal{D}_{N-1}(\mathbf{x};\boldsymbol{\alpha},\mathbf{a})$, with $\boldsymbol{\alpha}=(\alpha_1,\alpha_2,\ldots,\alpha_N)$,
$\alpha_n>0$ and $\mathbf{a}=(a_1,a_2,\ldots,a_N)$ with
$a_1<a_2<\ldots<a_N$, denote the Dixon-Anderson
probability density function (pdf) over the domain
$\mathcal{X}_{N-1} = \left\{a_1<x_1<a_2<\ldots<x_{N-1}<a_N\right\}$ (see \cite{forrester2010log}
page 138)
\begin{equation}\label{eq:Dixon-Gen}
\mathcal{D}_{N-1}(\mathbf{x};\boldsymbol{\alpha},\mathbf{a}) =
\frac{\Gamma\left(\sum_{n=1}^{N}\alpha_n\right)}{\prod_{n=1}^{N}\Gamma(\alpha_n)}
\frac{\prod_{1\leq i<j\leq N-1} (x_j-x_i)}
{\prod_{1\leq i<j\leq N} (a_j-a_i)^{\alpha_j+\alpha_i-1}}
\prod_{n=1}^{N-1}\prod_{i=1}^{N} |x_n-a_i|^{\alpha_i-1}.
\end{equation}
Taking $\boldsymbol{\alpha}=\mathbf{1}_{N}$ (i.e. $\alpha_1=\alpha_2=\ldots=\alpha_N=1$)
and $a_n=n^2,\; n=1,\ldots,N$, we get that
\begin{equation}\label{eq:Dixon}
\mathcal{D}_{N-1}(\mathbf{x};\boldsymbol{\alpha},\mathbf{a}) = (N-1)! \frac{\prod_{1\leq i<j\leq N-1} (x_j - x_i)}
{\prod_{1\leq i<j\leq N} (j^2 - i^2)}
\end{equation}
is a pdf over $\mathcal{X}_{N-1}=\left\{1<x_1<2^2<\ldots<x_{N-1}<N^2\right\}$.
From the previous lemma, we obtain the following theorem
\begin{theorem}\label{th:Dixon}
Let $\mathbf{X}=(X_1,\ldots,X_{N-1}) $ be a random vector with Dixon-Anderson
distribution given by (\ref{eq:Dixon}), then
\begin{equation}\label{eq:dixon-esperance}
\frac{1}{s}\frac{1}{\Gamma_{N-1}\left(\frac{s}{2}\right)}
\Esp{\prod_{n=1}^{N-1} \left(\frac{X_n}{n(n+1)}\right)^{s/2}}
\longrightarrow \eta(s)\mbox{ as } N\rightarrow\infty
\end{equation}
uniformly on every compact subset of $\Cc$.
\end{theorem}
\begin{proof}
Using the expression of $\Det{V_N}$ given in equation (\ref{eq:VN})
we find the expectation given in (\ref{eq:dixon-esperance}) for $N$ fixed.
The Gamma function can be defined as an infinite
product for all complex numbers $z$ except the non-positive
integers
$$\Gamma (z)=
\frac {1}{z}\prod _{n=1}^{\infty }
\frac {\left(1+{\frac {1}{n}}\right)^{z}}{1+{\frac {z}{n}}}
$$
and for any $s\in\Cc$ (the case $s=0$ is handled by continuity)
$$
\frac{2}{s\Gamma_{N-1}\left( \frac{s}{2} \right)}
\xrightarrow[N\rightarrow\infty]{}
\frac{1}{\Gamma\left( 1+\frac{s}{2} \right)}
$$
uniformly on every compact of $\Cc$.
For any $s\in \Cc$, we have
$$
\eta_N(s) =\frac{1}{2} \frac{\Det{V^{(s)}_N}}{\Det{V_N}}
= \frac{1}{s}\frac{1}{\Gamma_{N-1}\left(\frac{s}{2}\right)}
\Esp{\prod_{n=1}^{N-1} \left(\frac{X_n}{n(n+1)}\right)^{s/2}}
$$
and thus the conclusion of theorem \ref{th:biane} occurs as well.
\end{proof}
\subsection{A Result Related to Theorem \ref{th:Dixon}}
One interesting fact about the Dixon-Anderson distribution given in
(\ref{eq:Dixon}) is that it is invariant under some linear transformations.
More precisely if $\mathbf{X}$ is a Dixon-Anderson random vector with pdf
$\mathcal{D}_{N}(\mathbf{x};\boldsymbol{\alpha},\mathbf{a})$ and
$(u,v)\in \R^\star\times\R$ then $\mathbf{Y}=u\mathbf{X}+v\mathbf{1}_N$ is a Dixon-Anderson random
vector with pdf $\mathcal{D}_{N}(\mathbf{x};\boldsymbol{\alpha},u\mathbf{a}+v\mathbf{1}_N)$.
Using this property we can renormalize the random vector $\mathbf{X}$ over $[0,1]$
by using the change of variable $\mathbf{Y}=(\mathbf{X}-\mathbf{1}_{N-1})/(N^2-1)$ giving
us the identity
$$
\eta_N(s) = \frac{1}{s\Gamma_{N-1}\left( \frac{s}{2} \right)}
\left(\frac{(N^2-1)^{(N-1)}}{N!(N-1)!}\right)^{\frac{s}{2}}
\Esp{\prod_{n=1}^{N-1} \left({Y_n+\frac{1}{N^2-1}}\right)^{\frac{s}{2}} }.
$$
We have the following theorem
\begin{theorem}
Let $\psi_N(x;s)$ denote the application
$$
\psi_N(x;s) =\frac{2}{\Gamma\left(1+\frac{s}{2} \right)} \left(\frac{(N^2-1)^{(N-1)}}{N!(N-1)!}\right)^{\frac{s}{2}}
\Esp{\prod_{n=1}^{N-1} \left|{Y_n-\frac{x^2-1}{N^2-1}}\right|^{\frac{s}{2}} }.
$$
Then for all $n\in\N^\star$
$$
\psi_N\left({n;s} \right)
\xrightarrow[N\rightarrow\infty]{}\frac{1}{n^s}
$$
and for $n=0$
$$
\psi_N\left(0;s \right)
\xrightarrow[N\rightarrow\infty]{}
\eta(s).
$$
\end{theorem}
\begin{proof}
The case $n=0$ is a consequences of the theorem \ref{th:Dixon}.
Let $b_{n,N} = \frac{n^2-1}{N^2-1}$ for $n=1,\ldots,N$ and
$\mathcal{Y}_{N-1} = \left\{0=b_{1,N}<y_1<b_{2,N}<y_2<\ldots<y_{N-1}<b_{N,N}=1 \right\}$.
Taking $x=n$ we can compute the value of $\psi_N(x;s)$. We have
\begin{equation*}
\Esp{\prod_{n=1}^{N-1} \left|{Y_n-b_{n,N}}\right|^{\frac{s}{2}} }
= \frac{(N-1)!}{\displaystyle\prod_{1\leq i<j\leq N} \left(b_{j,N} - b_{i,N}\right)}
\int_{\mathcal{Y}_{N-1}}d\mathbf{y}
\prod_{1\leq i<j\leq N-1} (y_{j}-y_{i}) \prod_{k=1}^{N-1} |y_k - b_{n,N}|^{s/2}.
\end{equation*}
The integral of the right hand side is (Consider the pdf given in (\ref{eq:Dixon-Gen})
with $\alpha_k=1$ if $k\neq n$ and $\alpha_k=1+s/2$ otherwise)
\begin{multline*}
\int_{\mathcal{Y}_{N-1}}
d\mathbf{y}\prod_{1\leq i<j\leq N-1} (y_{j}-y_{i}) \prod_{k=1}^{N-1} (y_k - b_{n,N})^{s/2}
\\=
\frac{\Gamma\left(1+\frac{s}{2}\right)}{\Gamma\left(N+\frac{s}{2}\right)}
\displaystyle\prod_{1\leq i<j\leq N} \left(b_{j,N} - b_{i,N}\right)
\prod_{k=1}^{n-1} \left(b_{n,N} - b_{k,N}\right)^{s/2}
\prod_{k=n+1}^{N} \left(b_{k,N} - b_{n,N}\right)^{s/2}
\end{multline*}
From this we deduce that when $n\neq 1$ we have
\begin{eqnarray*}
\psi_N(n;s) &=& \left(\frac{\prod_{k=1}^{n-1} \left(n^2 - k^2\right)
\prod_{k=n+1}^{N} \left(k^2 - n^2\right)}{N!(N-1)!}\right)^{{s}/{2}}
\frac{\Gamma(N)}{\Gamma\left(N+\frac{s}{2}\right)}\\
& = &
\left(\frac{(N-n)!(N+n)!}{n^2\,N!(N-1)!}\right)^{{s}/{2}}
\frac{\Gamma(N)}{\Gamma\left(N+\frac{s}{2}\right)}
\\
&=& \left(\frac{1}{|a_{n,N}|n^2}\right)^{s/2}\,N^{s/2}\,
\frac{\Gamma(N)}
{\Gamma\left(N+\frac{s}{2}\right)}
\end{eqnarray*}
with $a_{n,N}$ defined in (\ref{eq:anN}).
In the case $n=1$ we find directly
$$
\psi_N(1;s) = N^{\frac{s}{2}}
\frac{\Gamma(N)}
{\Gamma\left(N+\frac{s}{2}\right)}
$$
Taking the limit and observing that
$\Gamma\left(N+\frac{s}{2}\right)\sim \Gamma\left(N\right)N^{s/2}$
as $N\rightarrow+\infty$ end the proof.
\end{proof}
\section{Averaged Alternating Random Series}
\label{sec:AltenatingSeries}
\subsection{A generalization of proposition \ref{prop:determinant}
and theorem \ref{th:Dixon}}
let $s\in\Cc$ with $s\neq -2,-4,\ldots,-2(N-1)$
and let $u_1<u_2<\ldots<u_N$ be an increasing sequence of real numbers
in $\R^\star$.
From this sequence, we define the $N\times N$ generalized Vandermonde determinant
\begin{equation*}
V_N^{(s/2)}(\mathbf{u}) =
\begin{vmatrix}
u_1^{-s/2} & u_1 & u_1^2 & \cdots & u_1^{N-1} \\
u_2^{-s/2} & u_2 & u_2^2 & \cdots & u_2^{N-1} \\
\vdots & \vdots & \ddots & \vdots & \vdots \\
u_N^{-s/2} & u_N & u_N^2 & \cdots & u_N^{N-1} \\
\end{vmatrix}
\end{equation*}
with $\mathbf{u}$ denoting the ordered vector $(u_1,\ldots,u_N)$\footnote{The
reader will be aware that in section \ref{sec:Representations},
$V_N^{(s/2)}$ represented a \textbf{matrix}, whereas from now the
notation $V_N^{(s/2)}(\mathbf{u})$ represents a \textbf{determinant}}.
\begin{lemma}
\label{lemma:SumGenVandermonde}
let $u_1<u_2<\ldots<u_N$ be an increasing sequence of real numbers
in $\R^\star$. The following hold
\begin{equation}\label{eq:Alternating-Sum}
\frac{V_N^{(s/2)}(\mathbf{u})}{V_N^{(0)}(\mathbf{u})}
= \sum_{n=1}^N (-1)^{n-1} \frac{1}{u_n^{s/2}}
\prod_{\substack {1\le j\le N\\j\ne n}} \frac{u_j}{|u_j-u_n|}.
\end{equation}
\end{lemma}
\begin{proof}
Observe that $V_N^{(0)}(\mathbf{u})$ denotes the usual determinant
of a Vandermonde matrix.
Let us denote by $V_N^{-n}$ the following determinant
$$
V_N^{-n} =
\begin{vmatrix}
u_1 & u_1^2 & \cdots & u_1^{N-1} \\
u_2 & u_2^2 & \cdots & u_2^{N-1} \\
\vdots & \vdots & \vdots & \vdots \\
u_{n-1} & u_{n-1}^2 & \cdots & u_{n-1}^{N-1} \\
u_{n+1} & u_{n+1}^2 & \cdots & u_{n+1}^{N-1} \\
\vdots & \vdots & \vdots & \vdots \\
u_N & u_N^2 & \cdots & u_N^{N-1} \\
\end{vmatrix},\qquad n=1,\ldots,N.
$$
Then, it is obvious that
$$
V_N^{-n} =
\left(\prod_{\substack {1\le j\le N\\j\ne n}} u_j\right)
V_{N-1}^{(0)}(u_1,\ldots,u_{n-1},u_{n+1},\ldots,u_N).
$$
By looking closely at the missing products, we obtain that
$$
V_N^{-n} =
\left(\prod_{\substack {1\le j\le N\\j\ne n}} u_j\right)
\frac{V_N^{(0)}(\mathbf{u})}
{\prod_{j=1}^{n-1} (u_n-u_j)
\prod_{l=n+1}^{N} (u_{l}-u_{n})}
= (-1)^{n-1} V_N^{(0)}(\mathbf{u})
\prod_{\substack {1\le j\le N \\j \ne n}} \frac{u_j}{u_j-u_n}.
$$
We have thus
\begin{equation*}
\label{eq:GenVandermondeDet}
\frac{V_N^{(s/2)}(\mathbf{u})}{ V_N^{(0)}(\mathbf{u}) }
= \sum_{n=1}^N (-1)^{n+1} \frac{1}{u_n^{s/2}}
\frac{V_N^{-n}}{ V_N^{(0)}(\mathbf{u})}
= \sum_{n=1}^N \frac{1}{u_n^{s/2}}
\prod_{\substack {1\le j\le N\\j\ne n}}
\frac {u_j}{u_j - u_n}.
\end{equation*}
As the sequence $(u_n,\, n=1,\ldots N)$ is strictly increasing,
the sum (\ref{eq:Alternating-Sum}) is alternating as announced.
\end{proof}
We have the following result which generalize theorem
\ref{th:Dixon}
\begin{proposition}
\label{prop:alternating-Sum}
Let $0<u_1<u_2<\ldots<u_N$ be an arbitrary increasing sequence of
positive real number, let $s\in\Cc$ with
$s\neq -2,-4,\ldots,-2(N-1)$ and let
$\mathbf{X}=(X_1,\ldots,X_{N-1})$ denote a random vector
with Dixon-Anderson density $\mathcal{D}_{N-1}(\mathbf{x};\mathbf{1}_{N-1},\mathbf{u})$ then
\begin{equation}
\label{eq:alternating-Sum}
\frac{2}{s\Gamma_{N-1}\left(\frac{s}{2}\right)}
{N^{s/2}}\Esp{ \frac{\prod_{n=1}^{N-1} X_n^{s/2}}{\prod_{n=1}^{N} u_n^{s/2}}}
=\frac{V_N^{(s/2)}(\mathbf{u})}{ V_N^{(0)}(\mathbf{u}) }=\sum_{n=1}^N (-1)^{n-1}\frac{1}{u_n^{s/2}}
\prod_{\substack{1 \le j\le N\\j\ne n}}\frac{u_j}{|u_j-u_n| }
\end{equation}
\end{proposition}
The proof follows the same steps as in lemma
\ref{lemma:Gen-Vandermonde} and is left to the reader.
The reader can also note that taking $u_n=n^2$ we get
the expression obtained in section \ref{subsec:integral}.
\begin{remark}
It is clear that if the left hand side of the equation (\ref{eq:alternating-Sum})
converges in some sense as $N\rightarrow\infty$ to a well defined function in $s$,
then this function will be equal to $1$ when $s=0$ and equal to $0$ when
$s=-2k$, $k\in\N^\star$.
\end{remark}
Finally we note that a similar lemma have been proved in \cite{biane2001probability}
using exponential random variables
\begin{lemma}
For $(\varepsilon_n,1\leq n\leq N)$ independent standard exponential variables,
and $u_1,u_2,\ldots,u_N$ an arbitrary sequence of numbers all distincts and strictly
positive, if $\Re(s/2)>-N$
$$
\Esp{\left(\sum_{n=1}^N \frac{\varepsilon_n}{u_n} \right)^{s/2}} = \Gamma\left(1+\frac{s}{2} \right)
\sum_{n=1}^N \left(\frac{1}{u_n} \right)^{s/2}
\prod_{\substack{1 \le j\le N\\j\ne n}}\frac{u_j}{u_j-u_n}.
$$
\end{lemma}
\subsection{A family of joint density probability}
Let $\mathbf{u}\in \R^N$ and $\mathbf{x}\in\R^{N-1}$ be two interlacing vectors
in the sense that they lie in the region $\mathcal{X}_N^\prime$ defined as
$$
\mathcal{X}_N^\prime=\left\{0<u_1<x_1<u_2<\ldots < u_{N-1} < x_{N-1} < u_N\right\}.
$$
Let $g$ denote a positive function over $\R_+$ to be precised hereafter.
We define a joint density probability over $\mathcal{X}_N^\prime$ by putting
\begin{eqnarray}\label{eq:pdf-jointe}
f_{\mathbf{X},\mathbf{U}}(\mathbf{x},\mathbf{u})& =& \frac{(N-1)!N!}{Z_N}
{V_{N-1}^{(0)}(\mathbf{x})}\;\ V_N^{(0)}(\mathbf{u})
\prod_{n=1}^N g(u_n) \nonumber\\
&=&
\mathcal{D}_{N-1}(\mathbf{x};\mathbf{1}_{N-1},\mathbf{u})\;\frac{N!}{Z_N}
\left(V_N^{(0)}(\mathbf{u})\right)^2
\prod_{n=1}^N g(u_n).
\end{eqnarray}
It is quite evident that if $(\mathbf{X},\mathbf{U})$ are two random interlacing
vectors with such distribution, then the distribution
of $\mathbf{X}$ conditional to $\mathbf{U}=\mathbf{u}$ is a Dixon-Anderson random
vector of density $\mathcal{D}_{N-1}(\mathbf{x};\mathbf{1}_{N-1}, \mathbf{u})$.
The marginal distributions of $\mathbf{X}$ and $\mathbf{U}$ are obtained
by integrating the probability density function (\ref{eq:pdf-jointe})
with respect to $\mathbf{u}$ and $\mathbf{x}$ respectively.
Integrating with respect to $\mathbf{x}$, we find that the density of $\mathbf{U}$ is
\begin{equation}\label{eq:pdf-u}
f_{\mathbf{U}}(\mathbf{u})= \frac{N!}{Z_N} \left(V_N^{(0)}(\mathbf{u})\right)^2
\prod_{n=1}^N g(u_n).
\end{equation}
over the domain $\mathcal{U}_N=\left\{0<u_1<u_2<\ldots < u_N\right\}$.
Note that, as $f_{\mathbf{U}}$ is invariant under permutation, we have
$$
Z_n = \int_0^{\infty} du_1\ldots \int_0^{\infty} du_n
\left(V_N^{(0)}(\mathbf{u})\right)^2
\prod_{n=1}^N g(u_n)
$$
assuming the integral exists. Thus if $(\mathbf{X},\mathbf{U})$ are random vectors
with joint probability density function (\ref{eq:pdf-jointe})
and $s\neq -2, -4, \ldots, -2(N-1)$ then it follows from
identity given in (\ref{eq:alternating-Sum}) that
\begin{equation}\label{eq:equality}
\frac{2}{s\Gamma_{N-1}\left(\frac{s}{2}\right)}
N^{s/2}\Esp{ \frac{\prod_{n=1}^{N-1} X_n^{s/2}}{ \prod_{n=1}^{N}{U_n^{s/2}}} }
=\Esp{
\frac{V_N^{(s/2)}(\mathbf{U})}{V_N^{(0)}(\mathbf{U})}
}
\end{equation}
assuming again that the expectations involved in this equality exist and are finite.
There is two obvious choices for $g$ allowing us
to compute theses expectations: the Jacobi ensemble and the Laguerre ensemble.
\subsubsection{The Jacobi Ensemble}
We set $g(u)= u^{a-1} (1-u)^{b-1} \mathbb{1}_{(0,1)}(u)$ with $a,b>0.$
In this case, the distribution of $\mathbf{U}$ conditional to
$\mathbf{X}=\mathbf{x}$ is a Dixon-Anderson random
vector of density $D_{N}\left(\mathbf{u};(a,\mathbf{1}_{N-1},b), (0,\mathbf{x},1)\right)$
and the marginal distribution of $\mathbf{U}$ is a Selberg density $S_N(\mathbf{u};a,b,1)$
(see \cite{forrester2012fuchsian}) with $S_N(\mathbf{u};a,b,\lambda)$ given by
\begin{equation}\label{eq-Selberg-Density}
S_N(\mathbf{u};a,b,\lambda)=\frac{N!}{S_N(a,b,\lambda)}\left(V_N^{(0)}(\mathbf{u})\right)^{2\lambda}
\prod_{n=1}^N u_n^{a-1} (1-u_n)^{b-1}
\end{equation}
when supported on $\mathcal{U}_N=\left\{ 0<u_1<u_2<\ldots < u_N<1\right\}$.
$S_N(a,b,\lambda)$ denotes the Selberg's integral formula
(see \cite{andrews_askey_roy_1999}, chapitre 8. We choose
the definition given in this reference rather than the one given
in \cite{forrester2012fuchsian}). We have thus
$$
Z_N = S_N(a,b,1) =
\prod_{n=0}^{N-1} \frac{\Gamma(a+n)\Gamma(b+n)\Gamma(2+n)}
{\Gamma(a+b-1+N+n)}
$$
Integrating (\ref{eq:pdf-jointe}) with respect to $\mathbf{u}$
gives the marginal density of $\mathbf{X}$
\begin{eqnarray}\label{eq:pdf-jacobi-x}
f_{\mathbf{X}}(\mathbf{x}; a, b)&=&\frac{(N-1)!N!}{ S_N(a,b,1)} \frac{\Gamma(a)\Gamma(b)}
{\Gamma(a+b-1+N)}
\left(V^{(0)}_{N-1}(\mathbf{x})\right)^2
\prod_{n=1}^{N-1} x_n^{a} (1-x_n)^{b}\nonumber\\
&=& \frac{(N-1)!}{S_{N-1}(a+1,b+1,1)} \left(V^{(0)}_{N-1}(\mathbf{x})\right)^2
\prod_{n=1}^{N-1} x_n^{a} (1-x_n)^{b}
\end{eqnarray}
i.e. the marginal density of $\mathbf{X}$ is the Selberg density $S_{N-1}(\mathbf{x};a+1,b+1,1)$
supported over
$$\mathcal{X}_{N-1}=\left\{ 0<x_1<x_2<\ldots <x_{N-1} <1\right\}.$$
\subsubsection{The Laguerre Ensemble}
We set now $g(u)= u^{a-1} e^{-u/b}\mathbb{1}_{(0,+\infty)}(u)$ with $a,b>0$.
The joint density of $(\mathbf{X},\mathbf{U})$ can be obtained as a limit of the Jacobi ensemble
case by changing variables $u_n = v_n/L$,
$x_n = y_n/L$, replacing $b-1$ by $L/\theta$ and by taking the limit
$L\rightarrow\infty$. We have in this case
$$
Z_N = W_N(a,\theta) = \lim_{L\rightarrow\infty}
\frac{S_N(a,L/\theta+1,1)}{L^{(a+N)N}}
= \theta^{(a+N)N} \prod_{n=0}^{N-1}\Gamma(a+n)\Gamma(2+n).
$$
The marginal distribution of $\mathbf{U}$ is a Laguerre density
$$
L(\mathbf{u};a,\theta) = \frac{N!}{W_N(a,\theta)} \left(V^{(0)}_{N}(\mathbf{u})\right)^2
\prod_{n=1}^{N} u_n^{a} e^{-u_n/\theta}
$$
supported on $\mathcal{U}_N=\left\{ 0<u_1<u_2<\ldots < u_N<1\right\}$.
Integrating (\ref{eq:pdf-jointe}) with respect to $\mathbf{u}$
gives the marginal density of $\mathbf{X}$
\begin{eqnarray}\label{eq:pdf-laguerre-x}
f_{\mathbf{X}}(\mathbf{x}; a, \theta)&=&\frac{(N-1)!N!}{ W_N(a,\theta)} \theta^{a+N}\Gamma(a)
\left(V^{(0)}_{N-1}(\mathbf{x})\right)^2\prod_{n=1}^{N-1} x_n^{a} e^{-x_n/\theta}\nonumber\\
&=& \frac{(N-1)!}{W_{N-1}(a+1,\theta)} \left(V^{(0)}_{N-1}(\mathbf{x})\right)^2
\prod_{n=1}^{N-1} x_n^{a} e^{-x_n/\theta}
\end{eqnarray}
i.e. the marginal density of $\mathbf{X}$ is a Laguerre density $L_{N-1}(\mathbf{x};a+1,\theta)$.
\subsection{Main Result}
\begin{theorem}
\label{th:Averaged-Gen-Vandermonde}
Let $a,b,\theta>0$ and $\Re(a-s/2)>0$ and let $\mathbf{U}$ be a random vector
of $\R^N$.
If the distribution of $\mathbf{U}$ is the Selberg density $S_N(\mathbf{u};a,b,1)$ supported
on $\mathcal{U}_N=\left\{ 0<u_1<u_2<\ldots < u_N<1\right\}$ then
$$
\Esp{\frac{V_N^{(s/2)}(\mathbf{U})}{ V_N^{(0)}(\mathbf{U})}}
= \frac{2N^{s/2}}{s\Gamma_{N-1}(s)}
\frac{\Gamma\left(a+b-1+N\right)}
{\Gamma\left(a-\frac{s}{2}+b-1+N\right)}
\frac{\Gamma\left(a-\frac{s}{2}\right)}
{\Gamma(a)}.
$$
If the distribution of $\mathbf{U}$ is the Laguerre ensemble density
$L(\mathbf{u};a,\theta)$ supported on $\mathcal{U}_N^\prime=\left\{ 0<u_1<u_2<\ldots < u_N\right\}$
then
$$
\Esp{\frac{V_N^{(s/2)}(\mathbf{U})}{ V_N^{(0)}(\mathbf{U})}}
= \frac{2}{s\Gamma_{N-1}(s)} \left(\frac{N}{\theta} \right)^{s/2}
\frac{\Gamma\left(a-\frac{s}{2}\right)}
{\Gamma(a)}.
$$
\end{theorem}
\begin{proof}
We have
\begin{eqnarray*}
\Esp{\frac{V_N^{(s/2)}(\mathbf{U})}{ V_N^{(0)}(\mathbf{U})}} &=& \frac{2}{s\Gamma_{N-1}\left(\frac{s}{2}\right)} N^{s/2}
\Esp{ \frac{\prod_{n=1}^{N-1} X_n^{s/2}}{ \prod_{n=1}^{N}{U_n^{s/2}}} }\\
& =&\frac{2N^{s/2}}{s\Gamma_{N-1}\left(\frac{s}{2}\right)}\frac{N!(N-1)!}{Z_N}
\int_{\mathcal{X}^\prime_N}
\frac{\prod_{n=1}^{N-1} x_n^{s/2}}{ \prod_{n=1}^{N}{u_n^{s/2}}}
\;V_{N-1}^{(0)}(\mathbf{x})\;
V_N^{(0)}(\mathbf{u})
\prod_{n=1}^N g(u_n) d\mathbf{u} d\mathbf{x}.
\end{eqnarray*}
We integrate with respect to $\mathbf{u}$. In the Jacobi ensemble case, we get
\begin{eqnarray*}
\Esp{\frac{V_N^{(s/2)}(\mathbf{U})}{ V_N^{(0)}(\mathbf{U})}}&=&
\frac{2N^{s/2}}{s\Gamma_{N-1}\left(\frac{s}{2}\right)}\frac{N!(N-1)!}{S_N(a,b,1)}
\frac{\Gamma\left(a-\frac{s}{2}\right)\Gamma(b)}{\Gamma\left(a-\frac{s}{2}+b-1+N\right)}
\int_{\mathcal{X}_N}
\left(V_{N-1}^{(0)}(\mathbf{x})\right)^2 \prod_{n=1}^N x_n^a (1-x_n)^b d\mathbf{x}\\
&=&
\frac{2N^{s/2}}{s\Gamma_{N-1}\left(\frac{s}{2}\right)}\frac{N!}{S_N(a,b,1)}
\frac{\Gamma\left(a-\frac{s}{2}\right)\Gamma(b)}{\Gamma\left(a-\frac{s}{2}+b-1+N\right)}
S_{N-1}(a+1,b+1,1)
\end{eqnarray*}
giving after some elementary simplifications the announced result.
The Laguerre ensemble case can be obtain either by integration, or by replacing
$b-1$ by $L/\theta$ and by taking the limit $L\rightarrow\infty$. We let the details
to the reader.
\end{proof}
Finally, we have the following corollary
\begin{corollary}
Let $a,b,\theta>0$ and $\Re(a-s/2)>0$. Then
\begin{multline*}
\int_0^1 du_1\ldots\int_0^1 du_n
V_N^{(s/2)}(\mathbf{u})V_N^{(0)}(\mathbf{u})
\prod_{n=1}^N u_n^{a-1} (1-u_n)^{b-1}\\=
\frac{2N^{s/2}S_N(a,b,1)}{s\Gamma_{N-1}(s)}
\frac{\Gamma\left(a+b-1+N\right)}
{\Gamma\left(a-\frac{s}{2}+b-1+N\right)}
\frac{\Gamma\left(a-\frac{s}{2}\right)}
{\Gamma(a)}
\end{multline*}
and
\begin{multline*}
\int_0^\infty du_1\ldots\int_0^\infty du_n
V_N^{(s/2)}(\mathbf{u})V_N^{(0)}(\mathbf{u})
\prod_{n=1}^N u_n^{a-1} e^{-u_n/\theta}\\=
\prod_{n=0}^{N-1}\Gamma(a+n)\Gamma(2+n)
\frac{2\theta^{(a+N)N}}{s\Gamma_{N-1}(s)}
\left(\frac{N}{\theta} \right)^{s/2}
\frac{\Gamma\left(a-\frac{s}{2}\right)}
{\Gamma(a)}.
\end{multline*}
\end{corollary}
\section{Conclusion}
It is well-known, even if it is not well understood, that there is a
connection between the random matrix theory and the Zeta function.
For example, Keating and its co-authors (\cite{keating2000random})
successfully use the characteristic polynomials $Z(U, \theta)$
of matrices $U$ in the Circular Unitary Ensemble (CUE)
to study the behavior of the correctly renormalized integral
$$
\int_0^T |\zeta(1/2+it)|^{2\lambda} dt
$$
Our work seems to be a first step in explaining this connection.
The Selberg integral plays a fundamental role in the theory of the various
$\beta$-ensembles (see \cite{forrester2010log}) and the Dixon-Anderson
probability distribution function is an intermediate step to the Selberg's
integral evaluation.
We hope that the results presented in this article will pave the way for
a deeper understanding of the links between these two fields.
\bibliographystyle{plain}
|
1,314,259,994,964 | arxiv | \section{Introduction}
\label{sec:intro}
There are various instruments in the world, among which the human voice is the most amazing and prevalent. From the objective point of view, different vocal fold conditions or shapes produce different vocal production qualities, also known as phonation modes.
According to Johan Sundberg \cite{1.1Sundberg}, there are four phonation modes in singing: breathy, neutral, flow and pressed. In breathy phonation, there is a reduced vocal fold adduction and minimal vocal fold contact area, which contribute to higher level of turbulent noise and higher harmonic-to-noise ratio (HNR) \cite{1.1Sundberg}. In neutral phonation, the closed phase is somewhat shortened and the airflow during the opening phase is considerably increased \cite{1.1Childers}. Pressed phonation displays a long closed phase, with reduced airflow during the opening phase \cite{3.1prout}. Flow phonation is produced by lower larynx, where the maximal airflow is achieved retaining a closure of the vocal folds during the closed phase \cite{1.1Sundberg}. Unlike other phonations, it is generally considered as a singing skill.
Automatic classification of phonation modes is of great significance. On one hand, the use of different phonation modes indicates different control capability over glottis and vocal folds. E.g., the vocal music teachers can judge the students' singing level from phonation modes; analysis of irregular phonation modes can help to diagnose certain pathological voice. On the other hand, detection of different phonation modes can be the basis of other high-level music information retrieval (MIR) tasks, such as singing evaluation, music emotion recognition, musical genre classification, etc.
In this paper, we propose a novel deep learning method to automatically classify the phonation modes. A Residual Attention based convolutional neural network is built to automatically extract discriminative features from Mel-spectrogram. Gradient-weighted class activation map (Grad-CAM) is utilized for analyzing which part of the spectrogram is important for the classification.
The rest of the paper is organized as follows. Section 2 introduces the related work. Section 3 describes the methodology in detail. In Section 4, experimental results are presented and analysed, and in Section 5, we make further conclusion.
\section{Related work}
Several studies have investigated the classification of phonation modes from the perspectives of aerodynamics, voice quality and spectrum.
The aerodynamic features reflect the principle of pronunciation and usually collected by aerodynamic detector. For example, the glottal resistance is the ratio of the difference between the glottal pressure and the average glottal airflow, which can reflect the pressure under the glottis and the area of the glottis. The vocal efficiency is the ratio of the sound intensity to the average glottal airflow rate, which is determined by factors such as the vocal chord function, the amplitude of the vocal cord vibration, and the uniformity of the pressure in the larynx. Grillo and Katherine proved the effectiveness of laryngeal resistance and vocal efficiency in distinguishing different phonation modes \cite{1.1grillo}. Nonetheless, collecting aerodynamic characteristics is a complicated and expensive task.
Voice quality features are usually calculated from inverse filtering. In the speech vocalization scenario, \cite{1.1millgaard} proved that the acoustic parameters such as normalized amplitude quotient (NAQ), amplitude, glottal closed entropy, energy ratio around 1000 Hz, etc. have a certain consistency with the results of expert judgment in discriminating phonation modes. \cite{1.1airas} \cite{1.1alku} showed that compared with the traditional glottal amplitude entropy feature, the normalized amplitude quotient, which characterizes the glottal excitation, can distinguish the four phonation modes more robustly. Standardized amplitude quotients achieved a 73\% consistency score with expert judgement on singing vocal pressure values \cite{1.1sundberg2}. Acoustic features such as peak slope \cite{1.1kane1}, maximum dispersion quotient (MDQ) \cite{1.1kane2}, and significant cepstral peaks can be used as remarkable features to differentiate breathy and pressed phonation \cite{1.1hille}. Because of inverse filtering problems from singing voice, voice quality features alone are not sufficient for classification \cite{1.1kadiri1}I. In order to improve the time-frequency resolution, Sudarsana used the improved zero-frequency filtering method and the cepstrum coefficient, proposing a zero-time window method \cite{1.1kadiri2}.
In recent studies, researchers have focused more on spectral features. For example, harmonic amplitudes, formant frequencies bandwidths and amplitudes, harmonic-to-noise ratio, etc., are combined with sound quality characteristics to classify the phonation mode \cite{3.1rouas}. The frequency domain features can perform well under certain scenarios, but due to different vowel, pitch and other conditions, some features may not applicable in all situations.
\section{METHODOLOGY}
In this section, we first describe the dataset and data processing methods. Then we introduce the detailed design of network architecture.
\subsection{Dataset}
To compare with the work in \cite{3.1Yes}, we use the same four datasets and divide them into training and test sets in the same way. Each dataset contains recordings of individual sustained vowels with different pitches and phonation modes.
\textbf{Dataset-1} \cite{3.1prout}: The first available dataset (DS-1) for phonation mode is published by Proutskova in 2013. The dataset contains a total of 909 audio clips, which were sung by a professional Russian soprano singer. The recording includes 9 Russian vowels, ranging in pitch from $A3$ to $C6$.
\textbf{Dataset-2} \cite{3.1rouas}: The second phonation dataset (DS-2) is published by Rouas and Ioannidis in 2016. The dataset contains 487 recordings sung by a professional baritone singer. The pitch varies from ${\settoheight{\dimen0}{C}C\kern-.05em \resizebox{!}{\dimen0}{\raisebox{\depth}{$\sharp$}}}2$ to ${\settoheight{\dimen0}{G}G\kern-.05em \resizebox{!}{\dimen0}{\raisebox{\depth}{$\sharp$}}}4$.
\textbf{Dataset-3} \cite{3.1Yes}: The third dataset (DS-3) used for automatic phonation classification is recorded by Universitat Pompeu Fabra (UPF). It includes 515 recordings sung by a professional female soprano singer in 2018. The pitch varies from $A3$ to $C6$.
\textbf{Dataset-4} \cite{3.1Yes}: The fourth dataset (DS-4) is also published by UPF. There are 240 recordings sung by a professional female soprano singer in this dataset. The pitch varies from ${\settoheight{\dimen0}{F}F\kern-.05em \resizebox{!}{\dimen0}{\raisebox{\depth}{$\sharp$}}}3$ to $F4$.
\begin{table}[t]
\begin{center}
\caption{The number of audio clips in the four datasets. After data augmentation, the training data size has increased by about twice, while the test data size remains the same.} \label{tab:table1}
\setlength{\tabcolsep}{1.6mm}{
\begin{tabular}{|c|c|c|c|}
\hline
& Audio data type & Original data & Augmented data
\\
\hline
\multirow{2}{*}{DS-1}
& Training & 727 & 1563 \\
~ & Test & 182 & 182 \\
\hline
\multirow{2}{*}{DS-2}
& Training & 389 & 877 \\
~ & Test & 98 & 98 \\
\hline
\multirow{2}{*}{DS-3}
& Training & 412 & 925 \\
~ & Test & 103 & 103 \\
\hline
\multirow{2}{*}{DS-4}
& Training & 192 & 548 \\
~ & Test & 48 & 48 \\
\hline
\end{tabular}
}
\end{center}
\end{table}
\begin{figure}[t]
\begin{minipage}[b]{1.0\linewidth}
\centering
\centerline{\epsfig{figure=fig_origin_new.jpg,width=8.5cm}}
\vspace{-0.1cm}
\caption{The Mel-spectrograms for different phonation modes. Frequencies are shown increasing up the vertical axis, and time on the horizontal axis. The brightness increases with the magnitude.}
\label{fig:fig_mel_spectrogram}
\end{minipage}
\end{figure}
\begin{figure*}[t]
\begin{minipage}[b]{1.0\linewidth}
\centering
\centerline{\epsfig{figure=fig_network_new2.png,width=16cm}}
\vspace{0.3cm}
\caption{The illustration of the proposed Residual Attention based network with soft mask branch.}
\label{fig:fig_network}
\end{minipage}
\end{figure*}
\subsection{Data preprocessing and augmentation}
First, the original audio data is resampled to 44.1kHz and blank audio segments are removed in the preprocessing. Then, because the Mel-scaled frequency match closely the human auditory perception, we choose Mel-spectrograms as raw feature maps. To extract feature maps, we use 12.5\%-overlapping windows of 2048 frames, and transform each window into a 128 band Mel-scaled magnitude spectrum. The Mel-spectrograms of four phonation modes are shown in Figure \ref{fig:fig_mel_spectrogram}.
To increase the amount of data available for training, we perform data augmentation on training sets. In \cite{3.1Yes}, only the middle 500 ms segments of audios are selected as valid data, and the rest are discarded because they hold that the phonation mode is not stable on these segments. But in fact, this approach is a waste of useful data to some extent. In contrast, we only remove the potentially unstable 128 ms at the head and tail of each spectrogram, and segment the spectrogram with a 500 ms window and 128 ms overlapping length. By this way, the training data is expanded about twice, as shown in Table \ref{tab:table1}. Note that none of the data contains blank segments, and there are no augmented data in test sets.
\subsection{Frequency-biased filters}
In the implementation of convolutional neural network (CNN), squared filters are most commonly used as the convolution kernel. Squared filters are easy to understand and can be widely used in various scenarios. Research in computer vision has achieved significant results by using CNN with squared filters, but adjusting the shape of filters in different tasks is still an effective tuning method.
\cite{2.3Pons} explores the application of different filter shapes in MIR research, and points out that in the processing of audio, using \textit{1-by-n} or \textit{n-by-1} filters can sometimes achieve better results. Unlike the images in the field of computer vision, the two dimensions of audio spectrograms have different meanings, which respectively represent the changes in the frequency and time domain of the sound. \textit{1-by-n} filters, also known as temporal filters, are more conducive to learning high-level features in the time domain, while \textit{n-by-1} filters, also known as frequency filters, are more conducive to learning high-level features in the frequency domain.
In phonation mode classification, by observing the spectrogram, it can be found that different phonation modes show different energy distribution in frequency bands, and the difference in the frequency domain is more significant than in the time domain. Therefore, this experiment adopts the idea mentioned above, using \textit{n-by-m (m $<$ n)} filters, which we call frequency-biased filters, to make the model more focus on the learning of frequency-domain features.
\subsection{Residual Attention based networks}
\begin{table*}[t]
\begin{center}
\caption{Average F-measure values of the proposed methods
and other designed experiments \protect\\in comparison to the previous work, with standard deviation of 10 folds in brackets. }
\label{tab:table2}
\begin{tabular}{|c|c|c|c|c|}
\hline
& DS-1 & DS-2 & DS-3 & DS-4 \\
\hline
Yesiler \cite{3.1Yes} & 0.897 (0.036) & \textbf{0.972 (0.021)} & 0.922 (0.0031) & 0.855 (0.097) \\
\hline
Simple CNN & 0.907 (0.0290) & 0.851 (0.0726) & 0.935 (0.0386) & 0.858 (0.1078) \\
\hline
Original RA based & \textbf{0.922 (0.0269)} & 0.941 (0.0152) & 0.909 (0.0274) & 0.742 (0.1036) \\
\hline
Augmented RA based & 0.914 (0.0371) & 0.855 (0.0525) & \textbf{0.945 (0.0240)} & \textbf{0.927 (0.0988)} \\
\hline
\end{tabular}
\end{center}
\end{table*}
\begin{table*}[t]
\begin{center}
\caption{Average accuracy score values of the proposed methods and other designed experiments \protect\\in comparison to the previous work, with standard deviation of 10 folds in brackets. }
\label{tab:table3}
\begin{tabular}{|c|c|c|c|c|c|}
\hline
& DS-1 & DS-2 & DS-3 & DS-4 \\
\hline
Yesiler \cite{3.1Yes} & 89.81\% (0.036) & \textbf{97.21\% (0.021)} & 92.29\% (0.031) & 85.83\% (0.100) \\
\hline
Simple CNN & 90.79\% (0.0289) & 86.48\% (0.0606) & 93.50\% (0.0380) & 86.36\% (0.1009) \\
\hline
Original RA based & \textbf{92.25\% (0.0264)} & 94.10\% (0.0152) & 91.01\% (0.0270) & 75.52\% (0.0850) \\
\hline
Augmented RA based & 91.52\% (0.0358) & 86.22\% (0.0498) & \textbf{94.58\% (0.0236)} & \textbf{92.92\% (0.0974)} \\
\hline
\end{tabular}
\end{center}
\end{table*}
Inspired by the application of attention mechanism in fine-grained image recognition, this paper builds a network with reference to the Residual Attention Network proposed in \cite{2.4Wang} to capture subtle difference in four phonation modes. The attention module in Residual Attention Network consists of two parts: a mask branch and a trunk branch. The trunk branch performs feature processing while the mask branch is used for controlling gates of the trunk branch. The input data $x$ is sent into trunk branch and $T(x)$ is the output. After getting the weighted attention map $M(x)$, the values in which ranges within $[0, 1]$, an element-wise operation is performed with $T(x)$ produced by the trunk branch. The final output of the module is:
\begin{eqnarray}
H_{i,c}(x) &=& (1 + M_{i,c}(x)) * T_{i,c}(x)
\label{eq:eq1}
\end{eqnarray}
\noindent where $i$ ranges over all spatial positions and $c \in \{1, ..., C \}$ is the index of the channel.
The attention module enables the network focus on a specific area, and can also enhance its characteristics. The bottom-up top-down feedforward structure is used to unfold the feedforward and feedback attention process into a single feedforward process. The residual mechanism helps to mitigate the gradient vanishing problem.
Based on the idea mentioned above, we build a Residual Attention based network for automatic classification of phonation modes. The structure of the network can be seen in Figure \ref{fig:fig_network}.
We first construct a convolutional neural network with frequency-biased filters. The network mainly consists of 4 convolutional layers and 2 fully connected layers. The sizes of the filters used in convolutional layers are shown in Figure \ref{fig:fig_network}. Due to the relatively small size of the spectrogram, we perform max pooling only after the first two convolutional layers to reduce the loss of valid data. Then, for the third and fourth convolutional layers, we add a soft mask branch similar to which in attention modules of the Residual Attention Network.
Different from the networks for image classification, the proposed network is relatively shallow because of the limited training data. Moreover, for this task, the input data is spectrogram, each part of which makes sense because there is no noise or blank segment. Therefore, it is not necessary to stack attention modules to extract too many levels of attention information, and only one bottom-up top-down feedforward structure is adapted in the proposed network.
\section{Experiments}
Three experiments are designed to verify the effects of the designed network and data augmentation, using the network without soft mask branch trained with augmented data (referred to as "Simple CNN"), and the Residual Attention based network trained with original data (referred to as "Original RA based") and augmented data (referred to as "Augmented RA based") respectively. The details of these experiments are described below, and the results are showed in Table \ref{tab:table2} and Table \ref{tab:table3}.
In this section, we first introduce the experimental setup, and then describe the three tasks and discuss experimental results.
\subsection{Experimental setup}
The framework for the training process was developed in Python using PyTorch. Training data is divided as 64 samples in each mini-batch, and is trained with GPU Nvidia GTX1070. In order to make the training process more robust, an Adam optimizer is applied as an adaptive optimizer for better performance with weight decay rate of 0.0001. Cross entropy is used as the loss function. The learning rate is 0.001, and its annealing rate is set to 0.5 per 20 epochs.
To compare with the work in \cite{3.1Yes}, ten-fold cross-validation is utilized to make the results more reliable. The training set is divided into ten subsets and in each fold, nine out of ten subsets are used for training, and the remained one subset for validation. After training, we use the test set to evaluate each model, and take the average value of ten folds as the experimental result.
\subsection{Experimental results and analysis}
\subsubsection{Validation of data augmentation}
By data augmentation in Section 3.2, the training set has been expanded to approximately twice. We verified the effect of data augmentation by training the designed network on original data (referred to as "Original RA based") and augmented data (referred to as "Augmented RA based") respectively.
The experimental results show that the data augmentation is effective on relatively small datasets. For DS-4, the trained model has a growth of more than 15\% in classification accuracy. But on larger datasets, such as DS-1 and DS-3, there is no obvious effect. We infer that the augmented data does not bring in more useful information for these datasets. Besides, the accuracy is reduced on DS-2 by 8.61\%. By further exploration, the length of stable audio segments in DS-2 is shorter than the other three datasets. After the augmentation, some unstable and ambiguous samples are even introduced, which leads to the worse results on DS-2.
\begin{figure}[t]
\begin{minipage}[b]{1.0\linewidth}
\centering
\centerline{\epsfig{figure=fig_normal_new.jpg,width=8.5cm}}
\vspace{-0.15cm}
\caption{Generated activation maps of flow mode after each convolutional layer in the network without soft mask branch.}
\label{fig:fig_simplecnn}
\end{minipage}
\end{figure}
\begin{figure}[t]
\begin{minipage}[b]{1.0\linewidth}
\centering
\centerline{\epsfig{figure=fig_special_new.jpg,width=8.5cm}}
\vspace{-0.15cm}
\caption{Generated activation maps of flow mode after each convolutional layer in the Residual Attention based network.}
\label{fig:fig_rabased}
\end{minipage}
\end{figure}
\subsubsection{Verification of the effect of the soft mask branch}
To validate the effect of the proposed attention mechanism on the models, we designed an experiment to compare the designed network with a similar network without relevant modules (referred to as "Simple CNN"). Because these modules are designed as a branch, namely soft mask branch, it is easy to remove them. For the remaining trunk network, the same super parameters are used to train models with the augmented datasets.
The experimental results show that the accuracy of the models on DS-1, DS-3 and DS-4 improves to some extent by adopting the idea. Especially on DS-4, the F-measure of "Augmented RA based" is 0.069 higher than that of "Simple CNN". Even on DS-2, where the accuracy score is slightly worse, the F-measure achieves 0.855, 0.004 higher than that of "Simple CNN".
To further explain the use of the mask, in Section 4.3, we use Grad-CAM to analyze the gradient, visualizing the network attention of different layers.
\subsubsection{Comparison with previous works}
We compare the results of our work with previous studies. Yesiler \cite{3.1Yes} trained a simple neural network model with hand-crafted features, and achieved relatively good results. Different from his work, the proposed method automatically learns the discriminating features from the time-frequency representation of the signal.
The experimental results show that the trained models in three experiments surpass the baseline in most cases. It proves that the approach of extracting discriminative features through CNN automatically is effective.
Among the results, there are 0.025, 0.023 and 0.072 F-measure improvement on DS-1, DS-3 and DS-4 respectively. For DS-2, the best result of the three experiments is "Original RA based", achieving 0.941 F-measure and 94.10\% accuracy, which is slightly worse than the previous work. For the most commonly used dataset DS-1, "Original RA based" achieves state-of-the-art result 0.922 for F-measure and 92.25\% for accuracy score) compared with previous attempts (0.897 F-measure and 89.81\% accuracy by Yesiler \cite{3.1Yes}, 75\% accuracy by Proutskova \cite{3.1prout}, 0.841 F-measure by Ioannidis and Rouas \cite{1.1rouas}, and 0.868 F-measure by Stoller and Dixon \cite{3.3stoller}).
\subsection{Grad-CAM analysis}
Grad-CAM is a visualization technique for interpretability of deep convolutional networks proposed in 2016 \cite{3.4gradcam}. CNN are trained by back-propagation mechanism. Grad-CAM obtains the gradient value of the convolution kernel during the back-propagation, then multiplies the processed gradient value with the original feature map to get class activation map.
Taken flow mode as an example, we visualize the activation map during training with Grad-CAM method. As shown in Figure \ref{fig:fig_simplecnn}, for the model without soft mask branch, high attention is mainly focus on few frequency bands with significant fluctuations. On the contrary, the Residual Attention based model does decently well on Mel-spectrograms. From Figure \ref{fig:fig_rabased}, it can be seen that earlier gradient attention focuses on fundamental frequency and the harmonic parts of mid and high frequency band, while the latter focuses on the areas that more pertinent to characterize flow mode. The phenomenon is consistent with previous assumption, indicating that the regular fluctuations in mid and higher bands are dominant for discriminating the flow mode from other types.
\begin{figure}[h]
\begin{minipage}[b]{1.0\linewidth}
\centering
\centerline{\epsfig{figure=fig_masked_new.jpg,width=8.5cm}}
\vspace{-0.12cm}
\caption{Generated activation maps of four phonation modes with the Residual Attention based network.}
\label{fig:fig_masked}
\end{minipage}
\end{figure}
Figure \ref{fig:fig_masked} shows the gradient attention maps of four phonation modes. (a) The attention of breathy mode mainly focuses on the irregular and foggy parts in high frequency. (b) The attention of flow mode mainly concentrates on the fundamental frequency and the harmonic parts with obvious regular vibration. (c) The attention of neutral mode evenly focuses on low, medium and high frequency, while (d) the attention of pressed mode mainly concentrates on the middle frequency areas where the energy is relatively concentrated.
\section{Conclusion}
In this study, we proposed a Residual Attention based network for automatic classification of phonation modes.The network consists of a simple convolutional network performing feature processing and a soft mask branch enabling the network focus on a specific area. Comparison experiments show the effectiveness of the proposed network, and the data augmentation is proved to be effective in some scenarios. Furthermore, visualization of the class activation map using Grad-CAM method demonstrates the enhancement behavior for dominant classification features in the task.
\bibliographystyle{IEEEbib}
|
1,314,259,994,965 | arxiv | \section{Introduction}
In a geometric theorem, basically we are given a set of hypotheses which we have either to prove or disprove. Depending on these hypotheses, we figure out the whole geometric system. A list of fundamental postulates and previously proven theorems, are known. They are used to infer the related geometric facts from the given hypotheses. These derived geometric facts which have been discovered so far are used further to derive more geometric facts until the conclusion is reached about the claim of the given theorem. Alternatively, it is possible to figure out the geometric facts which must be true if the claim is to be true. To do so, one needs to use the fundamental geometric postulates and apply the process of logical inference. Consequently, the theorem-prover infers what other geometric facts are required to be true if the previously derived geometric facts are to be remained satisfied. The process is carried on until the theorem-prover discovers that the required facts for the validity of the final claim are given as the hypotheses of the theorem. An `intelligent thinker' thinks in both ways to generate a particular algorithm to prove a theorem.
\textit{Automated Theorem Proving (ATP)} is enabling a machine (computer) to figure out an algorithm to prove a given theorem by the mechanization of the above mentioned process.
ATP has been established as a branch of Artificial Intelligence for several decades. In 1954 Martin Davis, an American Mathematician programmed Presburger’s algorithm \cite{davis1957computer}. Later Allen Newell, Herbert A. Simon and J. C. Shaw developed Logic Theory Machine around 1955-56 \cite{newell1956logic}. In 1959 they created General Problem Solver (G.P.S.) \cite{newell1963guide} which was able to solve any symbolic problem. Gelernter, J. R. Hanson and D.W. Loveland worked on geometric theorem proving implementing traditional proof method \cite{gelernter1960empirical}. However, their method suffers difficulties of the explosion of the search space. Later Wen-Tsun Wu developed an algebraic method \cite{elias2006automated} which could prove geometric theorems more efficiently, but this method involves lots of calculations with polynomials which make the proof hardly readable. Chou, Gao and Zhang \cite{chou1994machine} developed `area method' which is able to produce short and readable proofs of geometric theorems. In his paper, David A. Plaisted \cite{plaisted2014automated} reviewed different techniques of ATP. Among these techniques are: propositional proof procedures \cite{malik2009boolean,moskewicz2001chaff}, first order logic \cite{fitting2014possible}, clause linking \cite{lee1992eliminating}, instance-based procedures \cite{plaisted2000ordered}, model evolution \cite{baumgartner2014model}, modulo theories \cite{de2011satisfiability}, unification and resolution \cite{lassez1991computational} and combined systems \cite{bridge2013case,armando2009new}. In another paper, Joran Elias \cite{elias2006automated} discussed Wu’s method on geometric theorem proving.
There are two broad categories of techniques to prove a geometric theorem. They are: Euclidean Logical Inference methods \cite{fu2014geometry} and Cartesian Algebraic methods \cite{franova2014cartesian}. The former method uses logical inference to reach at conclusion from a set of premises. On the other hand the later method converts a given set of premises into a set of algebraic equations and then solves those equations for unknown parameters. In this paper, we propose GraATP, an ATP combining both algebraic method (Cartesian Analytical Geometry) and logical inference method (Euclidian geometry) to prove geometric theorems. Our proposed method translates the geometric entities into nodes of a graph and the relations between them as edges of that graph. The automated system searches for different ways to reach the conclusion for a claim via graph traversal by which the validity of the geometric theorem is examined.
Rest of the paper is organized as follows: first we discuss the preliminaries required to figure out a geometric structure in Section~\ref{preli}. We describe Cartesian analytical geometry and traditional Euclidean proof using logical inference method in Section~\ref{sec21} and Section~\ref{sec22} respectively. In Section~\ref{secMet}, we propose our method combining these two methods to prove geometric theorems. Finally, we conclude the paper with an outline of the future work in Section~\ref{secCon}.
\section{Preliminaries\label{preli}}
To define a geometric system, we use four elementary concepts of geometry: point, straight line, angle and circular arc. Usually, we choose a point and a line passing through the point as an initial reference. Position of a point is specified by a distance from another previously defined point along a particular straight line. Orientation of a line is specified by the angle made by it with another previously specified line and the point of intersections between the lines. A circular arc is specified by the position of its central point and it radius. For example, following steps are required to derive a parallelogram in \figurename~\ref{f1}:
\begin{figure}
\begin{center}
\includegraphics[scale=0.5]{./fig1}
\end{center}
\caption{A geometric system of lines and points.}
\label{f1}
\end{figure}
\begin{enumerate}
\item $A$ is a reference point
\item $EF$ passing through $A$ is a reference line
\item Line $GH$ passes through $A$, angle $\angle FAH = x$
\item $C$ is a point on $GH$ where $AC = b$
\item Line $KL$ passes through $C$, angle $\angle KCG = x$
\item Line $IJ$ passes through $B$, angle $\angle FBJ = x$
\item $D$ is the intersection of the line $KL$ and $IJ$ is determinable since $KL$ and $IJ$ are specified
\item \{$AC, CD, DB, B$A\} is the parallelogram
\end{enumerate}
Once we able to figure out a complete geometric structure, we can explore different dimensions (lengths of the lines, angles between lines, etc) of the structure. Hence, we can test whether a certain claim is true or false knowing these dimensions.
\subsection{Cartesian Method\label{sec21}}
In Cartesian method, geometry is combined with algebra. Two axes, perpendicular to each other and their point of intersection, i.e. origin, are specified. A point on a plane is specified by pair of coordinates which are the distances of the point from the origin along the axes. Curves and straight lines are specified by algebraic equations. Solving these equations unknown dimensions are worked out. Finally, facts to be proven are verified.
\begin{figure}[h]
\begin{center}
\includegraphics[scale=0.5]{./fig2}
\end{center}
\caption{A geometric system of a parallelogram.}
\label{f2}
\end{figure}
Let’s consider the following example from \cite{elias2006automated}. We have to prove that diagonals of a parallelogram bisect each other. Please see \figurename~\ref{f2}. Here, the hypotheses are - i) $OACB$ is a parallelogram $\implies OB||AC, OA||BC$, $OC$ and $AB$ are diagonals, ii) $D$ is the point of intersection of $AB$ and $OC$. First, we have to decompose these statements into a couple of equations.
As mentioned earlier, we have to specify the points of our interest- $O, A, C, B$ and $D$ each with two coordinates. Let $O, A$ and $B$ are denoted by $(0, 0), (x, 0)$ and $(y, z)$ respectively. Here, $x, y$ and $z$ are arbitrary parameters what we have chosen. Once we choose $x, y$ and $z,$ the coordinates of $C$ and $D$ become fixed depending on $(x, y, z)$ according to the hypotheses. Let us assume that coordinates of $C$ and $D$ be $(u, v)$ and $(p, q)$.
Since $OB$ and $AC$ are parallel to each other, their slopes are equal too. Hence we get,
\begin{equation}
uz-zx=v y
\label{eq1}
\end{equation}
On the other hand, $OA$ and $BC$ are parallel to each other, their slopes are equal too. Hence we get,
\begin{equation}
v=z
\label{eq2}
\end{equation}
We can work out $u$ and $v$ in terms of $x, y$ and $z$ by solving Equation~\ref{eq1} and Equation~\ref{eq2}. Finally, we find out the length of $OD, DC, BD$ and $AD$ by using Pythagoras theorem. If we can show, $OD=DC$ and $BD=AD$ then the theorem is proved.
\subsection{Euclidean Logical Inference Method \label{sec22}}
In logical inference method, a set of axioms, previously proved theorems and hypotheses are used to discover the relationship among different entities (lengths of line segments or arcs, positions of points, amount of angles and equalities or similarities of finite regions like triangles) of a geometric structure. These relationships are used to proceed further to infer relationship among different other entities from the previously derived relationships. This process continues until the relationship between two particular entities of interest is discovered. Let's think about the previous example: diagonals of a parallelogram bisect each other.
We have to discover the relationship between the entities (here length of two line segments): $OD$ and $CD$ as well as $BD$ and $AD$. First of all, we will find out relations exploiting the hypotheses. Since $OACB$ is a parallelogram, $(OB, AC)$ and $(OA, BC)$ are opposite sides, they are parallel and equal to each other. $BA$ is the common sector of $OB$ and $AC$. Hence the $\angle OBA$ is equal to the $\angle BAC$. Here, we used a previously discovered theorem: if a line intersects two parallel lines then the alternate angles created in the points of intersection are equal. Similarly, we find out the relationship between $\angle BOC$ and $\angle OCA$. Since $D$ is a point on $AB$, angle $\angle OBA = \angle OBD$. Similarly, $\angle BAC = \angle DAC$. Again $D$ is a point on $OC$. Hence, $\angle BOC = \angle BOD$ and $\angle OCA = \angle DCA$. Now in $\triangle BOD$ and $\triangle ACD, OB = AC, \angle OBD = \angle DAC$ and $\angle BOD = \angle ACD$. Therefore, $\triangle BOD$ and $\triangle ACD$ are equal. Here, we used another previously discovered theorem: if two triangles have a side of equal length and two adjacent angles of equal amount each, then the triangles are equal. $OD$ is the opposite side of the $\angle OBD$ and $CD$ is the opposite side of the $\angle CAD$. Since $\triangle BOD$ and $\triangle ACD$ are equal and $\angle OBD= \angle CAD \implies OD = CD$. Similarly, $BD = AD$. This is the desired relationship to prove the theorem. Our process of searching information on how different entities are related with each other throughout the geometric structure stops here.
\section{GraATP: Our proposed ATP Framework\label{secMet}}
In the previous section, we discussed two manual approaches for geometric theorem proving. If we compare between two ways, at a first glance, Cartesian algebraic method seems complicated than the logical inference method. Algebraic method is mechanical, all we have to do is to fix the position coordinates of some particular points, discover equations of straight lines or curves appearing in the geometric structure and find out the coordinates of other points as functions of the co-ordinates of the previously fixed points. When we know all dimensions of the structure we test whether the final claim is true or false. On the other hand, Euclidean logical inference method requires more heuristic knowledge, i.e. more `intelligence' to discover the hidden relationship among different entities of the structure. Prover's skill to observe the geometric structure, and retrieve the previously discovered theorems, related to the problem, from the memory, play important role here. Moreover, whether the searching process (the process of discovering relationship among the entities) approaches towards the goal (testing the relationship which is supposed to be proven) depends on the prover’s intuition. By comparing the two methods, we can conclude that the automation of Cartesian method is easier than the logical inference method.
Here, we propose a primitive approach of finding out an algorithm to prove a geometric theorem in an automated way. There are several previously proposed ways: Wu’s method \cite{elias2006automated}, Area method \cite{chou1994machine}, etc. Our goal is to build up a framework of finding an algorithm that resembles the way in which we the human or intelligent theorem prover thinks to prove a theorem. Let’s discuss the previous example again in a different way. Consider the geometric system in \figurename~\ref{f3}.
\begin{figure}[h]
\begin{center}
\includegraphics[scale=0.5]{./fig3}
\end{center}
\caption{A geometric system of a parallelogram.\label{f3}}
\end{figure}
Our hypotheses are as follows:
\begin{enumerate}
\item $OA = x$
\item $E$ lies on $OA$
\item $BE$ is perpendicular to $OA$
\item $OE$ = $y$
\item $EB$ = $z$
\item $OB||AC$
\item $OA||BC$
\item $D$ lies on $AB$
\item $D$ lies on $OC$
\item $DF$ is perpendicular to $OF$
\item $A$ lies on $OG$
\item $CG$ is perpendicular to $OG$
\end{enumerate}
We have to show that $OD=CD$ and $BD=DA$. Here, we get a unique geometric structure for a unique set of the parameters $(x, y, z)$. Our next goal is to explore the geometric structure to express all of the dimensions (length of the segments of lines) as functions of these three parameters $x, y$ and $z$. When $OD, CD, BD$ and $DA$ can be expressed as functions of $x, y$ and $z$, then the process of exploration stops. If $OD=CD$ and $BD=DA$, then the claim is proved.
A possible sequence to work out different dimensions are as follows:
\begin{enumerate}
\item Find $CG$. $CG = BE=z$ (exploiting the fact that $BC||OA$ and $G$ lies on the extension of $OA$)
\item Find $\frac{CG}{AG} (= \frac{BE}{OE} =\frac{z}{y})$ (exploiting the fact that $\triangle OBE$ is similar to the $\triangle ACG$)
\item Find $AG$, since we know $CG$ and the ratio $\frac{CG}{AG}$.
\item Find $OG$. $OG = OA + AG$
\item Find $\frac{DF}{OF} (=\frac{CG}{OG})$ exploiting the fact that $\triangle DFO$ and $\triangle CGO$ are similar.
\item Find $AE$. $AE = OA-OE$
\item Find $\frac{AF}{DF}$ which equals to $\frac{AE}{BE}$ ( $\triangle ADF$ and $\triangle ABE$ are similar)
\item Express $AF = OA-OF$
\item Find $DF$ and $OF$ using the ratio $\frac{DF}{OF}$and $\frac{OA-OF}{DF}$
\item Find $OD, OD =\sqrt{OF^2+DF^2}$
\item Find $CD: CD =\sqrt{(OG-OF)^2+(CG-DF)^2}$
\item Check whether $OD = CD$
\end{enumerate}
Here, if $OD$ and $CD$ are equal then the theorem is proved. In the same way we can check whether $AD$ and $BD$ are equal or not.
Now, we present another example, more complicated than the previous one. Please see \figurename~\ref{f4}. Let $\triangle ABC$ is a triangle with $\angle BCA = 90^o$ and let $D$ be the foot of the altitude from $C$. Let $X$ be a point in the interior of the segment $CD$. Let $K$ be the point on the segment $AX$, such that $BK=BC$. Similarly, let $L$ be the point on the segment $BX$ such that $AL = AC$. Let $M$ be the point of intersection of $AL$ and $BK$. We have to show that, $MK = ML$\footnote{This problem is taken from the International Mathematics Olympiad 2012 http://www.imo-official.org/problems/IMO2012SL.pdf}.
\begin{figure}[h]
\begin{center}
\includegraphics[scale=0.5]{./fig4}
\end{center}
\caption{A geometric system of a triagle. \label{f4}}
\end{figure}
Let's rephrase the hypotheses in the following way:
\begin{enumerate}
\item $AD = a$
\item $CD\perp AB$ and $CD = h$
\item $A,C$ are added by a line segment
\item $CB \perp AC$ at $C$
\item $B$ lies on the extension of the line $AD$
\item $X$ lies on $CD$ where $XD = q$
\item $A, X$ are added by a line segment
\item $B, X$ are added by a line segment
\item $K$ lies on $AX$ such that $BK = BC$
\item $L$ lies on $BX$ such that $AL = AC$
\item $M$ is the point of intersection between $BK$ and $AL$
\end{enumerate}
Here, we get a unique geometric structure for a unique set of the parameters $(a, h, q)$. Next goal is to explore the geometric structure to express all of the dimensions (length of the segments of lines) as functions of these three parameters $a, h$ and $q$. When we will be able to express $LM$ and $MK$ in terms of $(a, h, q)$ then the process of exploration stops. If the two functions are equal then the claim if proved.
Our proposed method GraATP will find out a sequence of the dimensions (which need to be worked out in terms of $(a, h, q)$ of this geometric structure starting from $(AD=a, CD=h, XD=q)$ to $(LM, KM)$. To locate the points $K, M$ and $L$ we draw $KN, MR$ and $LS$ perpendicular to $AB$. A possible sequence of working out the dimensions is:
\begin{enumerate}
\item Find $AC: AC =\sqrt{a^2+h^2}$
\item Find $BD$ (exploiting the similarity between $\triangle ABC$ and $\triangle ADC$)
\item Find $BC$ (exploiting the similarity between $\triangle ABC$ and $\triangle ADC$)
\item Find $AX: AX =\sqrt{a^2+q^2}$
\item Find $BX: BX =\sqrt{BD^2+q^2}$
\item Find $KN$ and $AN$ (exploiting the similarity between $\triangle AKN$ and $\triangle AXD$, and applying Pythagoras theorem in $\triangle BKN$)
\item Find $LS$ and $AS$ (exploiting the similarity between $\triangle BXD$ and $\triangle BLS$, and applying Pythagoras theorem in $\triangle ALS$)
\item Find $BN = AB-AN$
\item Find $AS = AB-BS$
\item Find $MR$ and $AR$ (exploiting the similarity between triangles $(\triangle BMR, \triangle BKN)$ and ($\triangle AMR, \triangle ALS$)
\item Find $KM$ ($KM^2 = (AR-AN)^2 + (KN-MR)^2$)
\item Find $ML(ML^2=(AS-AR)^2+(LS-MR)^2$
\item Check whether $KM = ML$
\end{enumerate}
By observing the commonalities between the two above mentioned techniques we can formulate a general way to find a theorem proving algorithm as follows:
\begin{enumerate}
\item Specify a set of parameters by means of which the geometric structure can uniquely be constructed
\item Find out different dimensions of the structure by means of the predefined parameters [to do so we use basically similarity between triangles and Pythagoras theorem]
\item Continue step 2 until the dimensions of a set of particular elements are found
\item Check whether the claim is true
\end{enumerate}
The whole process can be represented as the formation of a graph and traversing through the graph. We can represent different dimensions (length of line segment, angle and circular arc-length) and the functions of dimensions (for example, ratio of two line segments) as nodes of the graph. Using the hypotheses of the theorem, we discover the relationships among the dimensions. If we can work out the node $A$ from node $B$ then we draw a directed edge from $B$ to $A$. In the evolutionary process of the formation of the graph, we put the nodes showing the dimensions which we choose as parameters. In the parallelogram example, these dimensions are $OA(=x), OE(=y)$ and $BE(=z)$.
\begin{figure}[t]
\begin{tabular}{cc}
\includegraphics[width=.2\textwidth]{./fig5a}&\includegraphics[width=.2\textwidth]{./fig5bn}\\
(a)&(b)\\
\includegraphics[width=.2\textwidth]{./fig5cn}&\includegraphics[width=.2\textwidth]{./fig5dn}\\
(c)&(d)\\
\includegraphics[width=.2\textwidth]{./fig5en}&\includegraphics[width=.2\textwidth]{./fig5fn}\\
(e)&(f)\\
\end{tabular}
\caption{Steps of the evolutionary process for the formation of the graph. \label{f5}}
\end{figure}
\figurename~\ref{f5} (a) shows the initial step. The color gray denotes the nodes that are the chosen as parameters; no other dimensions are required to know to find out their values. Hence, edges from other nodes will not be incident on them. Now, using the hypotheses we will see which dimensions are closely connected to these three dimensions and include them in the graph. Since $E$ lies on $OA, AE = OA-OE$. We can find out $AE$ from $OA$ and $OE$. In the second step, we include another node $AE$ (shown in \figurename~\ref{f5} (b)). Also we include two edges one from $OA$ to $AE$ and another from $OE$ to $AE$; and we draw them with same color (red) and label them with number 1 to indicate that the set of dimensions $\{OA, OE\}$ is required to be known to find out $AE$. A same node can be found out by knowing different sets of dimensions. In that case, we would choose different colors and labels.
In the next step, we exploit the similarity between $\triangle OBE$ and $\triangle ACG$ to discover more relations: $\frac{CG}{AG}=\frac{BE}{AG}$. Therefore, we can include another node, this time a ratio of dimensions, $\frac{CG}{AG}$ (\figurename~\ref{f5} (c)). Blue edges labeled with number 2 come out from the nodes $OE$ and $BE$ and they are incident on the node $\frac{CG}{AG}$. Next, $A$ lies on $OG$. Hence $OG = OA+AG$. We include nodes $AG$ and $OG$. We draw two edges, one from $OA$ and another one from $AG$ to $OG$. They are labeled with number 3. The dimension $AG$ is not a parameter and still no edges are incident on it from any other node which can be represented as a function of the parameters ${OA, OE, BE}$. That's why we have made it lime colored (\figurename~\ref{f5} (d)) and put an asterix mark on it. It means that we have to discover more node(s) from which edge(s) will come out to meet $AG$ and connect $AG$ with the nodes which have already been discovered. In the next step, we use the fact that $BC||OG$ to decide that $BE = CG$. Therefore, we add another node $CG$ and draw an edge from $BE$ to $CG$ (\figurename~\ref{f5} (e)). Now, we can find out $AG$ from $CG$ and the ratio $\frac{CG}{AG}$. So we draw two edges: one from $\frac{CG}{AG}$ to $AG$ and another from $CG$ to $AG$ (\figurename~\ref{f5} (f)). The node $AG$ is connected with the discovered nodes, so its color becomes white now and the asterix mark is dropped.
The process continues until:
\begin{enumerate}
\item A connected graph is formed containing the parameter-nodes ($OA, OE,BE$) and the destination-nodes ($CD, OD$),
\item There exists no node having no incoming edges except for the parameter-nodes. As for example in step 4 the node $AG$ was included. There was no edge which is directed from other node to $AG$. Also $AG$ is not one of the parameter-nodes like $OA$, $OE$ and $BE$. Therefore the process of forming the graph continues.
\end{enumerate}
The algorithm is given in Algorithm~\ref{alg1}.
\begin{algorithm}
\DontPrintSemicolon
$H:$ set of hypotheses\;
$R:$ set of conclusions\;
$D:$ set of dimensions \;
$P=$ create a set of unique parameters\;
$E\leftarrow\phi$\;
$V\leftarrow\phi$\;
$G=\langle V,E \rangle$\;
\For{each $p\in P$}
{
create a node $u$\;
$V=V\cup {u}$\;
}
\For{each $r\in R$}
{
create a node $u$\;
$V=V\cup {u}$\;
}
\While{$D \neq \phi$}
{
create node $u$ for the next close dimension $d\in D$\;
\For{each $v \in V$ that is related to $u$}
{
add a directed edge $(u,v)$ or $(v,u)$\;
}
remove $d$ from $D$\;
}
\If{$G$ is not connected}
{
\Return null\;
}
\Else
{
\Return $G$\;
}
\caption{GraATP (H,R)}
\label{alg1}
\end{algorithm}
\begin{figure}[h]
\begin{tabular}{cc}
\includegraphics[width=.25\textwidth]{./fig6an}&\includegraphics[width=.2\textwidth]{./fig6bn}\\
\end{tabular}
\caption{The complete graph of proving the theorem on parallelogram. How the edge-relations between nodes are discovered, are also mentioned.\label{f6}}
\end{figure}
\figurename~\ref{f6} shows the complete graph to reach $OD$ and $CD$ from ${OA, OE, BE}$. Now we will apply standard topological ordering algorithm to find out the sequence of steps of the theorem proving algorithm. First, we will enlist the nodes having no incoming edges. They are the parameter nodes: $OA, OE$ and $BE$. Next, we delete these enlisted nodes and the edges adjacent of them as shown \figurename~\ref{f7}.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.3\textwidth]{./fig7n}
\end{center}
\caption{First step of the topological ordering algorithm.\label{f7}}
\end{figure}
After that, we look for the nodes having no incoming edges in the new graph. They are $CG/AG, CG$ and $AE$. We delete them and their adjacent edges from the graph. We proceed in this way until we reach the destination vertices $CD$ and $OD$. Therefore, the topological order of the nodes is: $OA, OE, BE, CG/AG, CG, AE, AF/DF, AG, OG, (OA-OF)/DF, DF/OF, DF, OF, CD, OD$. The topological sorting algorithm is given in Algorithm~\ref{alg3}.
\begin{algorithm}
\DontPrintSemicolon
$A=\phi$\;
$L\leftarrow$ set of all nodes with indegree = 0\;
\While {$L\neq \phi$}
{
$u\leftarrow L.\textsf{extractNode()}$\;
$A.\textsf{addToLast}(u)$\;
\For{each $v\in Adj[u]$}
{
$E= E-(u,v)$\;
}
}
\If {$E\neq \phi$}
{
\Return null
}
\Else
{
\Return $A$
}
\caption{Topological Ordering ($G=\langle V,E\rangle$)\label{alg3}}
\label{alg1}
\end{algorithm}
\section{Conclusion\label{secCon}}
So far we have discussed how to translate a geometric structure, which is uniquely configured by setting a set of parameters, to a graph and how to traverse through the graph to find out a sequence of steps performing which the theorem can be proven. There are several mechanical methods of proving geometric theorems which have already been proposed, e.g. Wu’s method \cite{elias2006automated}, Area method \cite{chou1994machine}, and so on. The purpose of this work is to resemble the way in which human thinks, perhaps when it is in the most naive way, to prove a theorem. It can be thought of as a primitive step of creating artificial thought processor. Any particular system can be thought as a geometric structure. Data which we sense by means of our sensory organs are the different `dimensions'. When we think we find out the relationship among different dimensions.
\begin{figure}[t]
\begin{center}
\includegraphics[width=.35\textwidth]{./fig8}
\end{center}
\caption{A geometric system of a parallelogram.\label{f8}}
\end{figure}
However, there are couples of challenges which we need to face while accomplishing an automated theorem prover in above mentioned method. They are listed below:
\begin{enumerate}
\item How the automated system would recognize which particular dimensions are required to be worked out to reach the goal. There are lots of dimensions possible, which we have ignored. For example, we have completely ignored the point of intersection between $OD$ and $BE$, say it is $G$ (\figurename~\ref{f8}). More dimensions like $OG, GD, BG$ and $GE$ are included. Unless we fix some heuristic constraints search space may get enormously enlarged.
\item How the theorem prover would extract relationships among different dimensions extracting from the hypotheses. There should be a complete mechanism to do it.
\end{enumerate}
In this paper, we have discussed the overview of an automated theorem proving algorithm. While proving a theorem in Euclidian Logical inference method, the theorem prover should be skilled enough to inspect different portions of the geometric structure and to correlate them with the previously proven theorem(s), to infer useful decisions about different dimensions. It requires higher level of intelligence. At the very early stage, this is hard to accomplish. On the other hand, in Cartesian method lines and curves are represented by means of algebraic equations. It is done by following limited number of rules, hence more naive than the Euclidean method, resulting complicated calculations to solve the equations for some unknown variables. This method reduces the readability of the proof by increasing the complexity of calculations. Our proposed method assumes that the automated prover can 1) apply Pythagoras theorem and 2) apply the ratio of sides rule for similar triangles and can detect the situation where to apply them- this is an aspect of Euclidean logical inference method. A set of parameters will be defined by an expert and all other dimensions will be represented as functions of them similar to the Cartesian method. This primitive theorem prover shares aspects of both methods.
More research works are required to be performed to meet the requirements mentioned above to accomplish an automated geometric theorem prover resembling humane thought process.
\bibliographystyle{IEEEtran}
|
1,314,259,994,966 | arxiv | \section{Introduction}
One way in which candidate quantum theories of gravity can differ
from one another is the spectrum of bosons they predict at very
low energies. Such fields, if they exist and are sufficiently
light, can mediate long-range forces that can observably compete
with gravity.
Scalar-tensor theories of gravity are among those that can arise
in this way, with light scalar fields in addition to the usual
metric tensor \cite{chibarev, damourrev, fujiimaeda, faraoni, singhrai, brans}. A variety of scalars commonly arise in fundamental theories, although it is very unusual for them to be light enough to mediate forces over macroscopic distances. They are rarely this light because quantum corrections
famously tend to give scalars masses, even if they would have been
massless at the purely classical level. But in some circumstances
symmetries can protect against masses, such as if the scalar is a
pseudo-Goldstone boson \cite{pGB} for a spontaneously broken approximate
symmetry, or part of the low-energy limit of an extra-dimensional model
\cite{ubernat}.
A good deal of effort has been invested in comparing the
predictions of scalar-tensor theories with observations in various
astrophysical systems \cite{will, efrev, binpult, binpulandgravwav, binpulnew, psaltis, whitedwarf, stellarosc, psaltisbinpul}, in order to improve the
constraints on their existence (or to discover their presence).
Binary pulsars are particularly useful for this purpose, since the
precision of their timing allows accurate measurements
of the relativistic gravitational effects that are generated by
the strong gravitational fields present.
Central to these tests is an understanding of how the stellar
properties depend on the microscopic couplings of the scalar field
to matter. Yet these stellar properties can sometimes be
remarkable in scalar-tensor theories in the presence of
relativistic sources. In particular a phenomenon called
spontaneous scalarization can occur, in which the star above a
critical baryonic mass can locally support a scalar field even if
the scalar-matter coupling vanishes asymptotically far from the
star \cite{damourspsc}. This allows scalar-tensor gravity to
deviate strongly from General Relativity (GR) near the star while
still passing weak-field solar system tests.
Since different fundamental theories predict scalars with
different microscopic couplings, it is useful to be able to survey
how stellar properties depend on these couplings. For this reason
in this paper we re-examine the equilibrium conditions in a star
in scalar-tensor models as a function of scalar couplings. In particular
we do so working as far as possible within analytic approximation
schemes, since these more easily allow the results to be varied
for different kinds of scalar properties. We find semi-analytic
progress is possible using a weak-coupling approximation for the
scalar field near the stellar center. By comparing with numerical
integrations of the equations of stellar structure we find the
domain of validity of these approximations, which allow us to
understand stellar properties fairly well.
As a preliminary to studying the relativistic limit in more
detail, we apply our analysis to the special case of an
incompressible stellar fluid, for which the energy density is
approximately constant. (This can be done consistent with
conservation laws only if the pressure is not also regarded as
being a function of the energy density.) To leading order in the
weak-coupling expansion, the profiles of physical variables of
constant-density stars can be described in closed form in terms of
elementary functions, dilogarithms, and Heun functions.
Although not physically realistic, solutions with this equation of
state can give insight into general features of relativistic
stars, in particular near the maximum mass that can be supported
by gravitational forces. (For GR the maximum mass found under the
assumption of constant density gives an upper bound on the maximum
mass that would be found with more realistic equations of state
\cite{buchdahl}.) We compare the predictions of constant density
in scalar-tensor theory with those of several representative
equations of states for neutron stars.
The rest of the paper is organized as follows. In \S2\ we define
the scalar-tensor theories of interest, and in particular the
parameters describing the couplings of the light scalar to
ordinary matter. Following much of the literature we specialize to
the case where the scalar-matter couplings are only weakly
dependent on the scalar fields themselves --- what we call
quasi-Brans/Dicke (qBD) models --- as well as the constraints on
their couplings that are inferred from solar system tests of
gravity. \S3\ then derives the equations describing hydrostatic
equilibrium for static and spherically-symmetric stars in
scalar-tensor gravity, as well as the matching formulae that
relate the interior and exterior geometries. Some of those general
properties that can be extracted without solving them explicitly
are also discussed, such as whether the pressure need be
monotonic; the kinds of relations they imply amongst quantities
visible to exterior observers; and the non-relativistic limit.
Next, \S4\ provides a discussion of the perturbative solutions to
these equations in the weak-coupling limit, in both the
relativistic and non-relativistic cases. The important distinction
between perturbations in the strength of the coupling measured at
the stellar center, vs its strength at infinity, first arises in
this section. Finally, in section \S5\ the perturbed equations are
solved explicitly to leading nontrivial order for the special case
of incompressible stars, with the results compared to more
realistic equations of state for neutron star models.
\section{Single-field scalar-tensor models}
We start by defining the field equations of the scalar-tensor
systems of interest.
\subsection{Action and field equations}\label{sec:action}
We consider a single light scalar field, and choose its action to
be given by\footnote{Conventions: we use metric signature $(-+++)$
and Weinberg's curvature conventions \cite{Wbg} (differing from
MTW \cite{MTW} only by an overall sign for the Riemann tensor). Units are chosen with $\hbar=c=1$.}
\begin{equation}\label{action}
S = - \frac{1}{16 \pi G} \int {\hbox{d}}^{4}x \sqrt{-g} \; g^{\mu\nu}
\bigl( \mathcal{R}_{\mu\nu} + 2 \,\partial_{\mu} \phi\partial_{\nu} \phi
\bigr) + S_{\rm{m}}[\psi, \tilde{g}_{\mu \nu}] \,,
\end{equation}
where a Weyl re-scaling is performed to go to the Einstein frame
(which eliminates any $\phi$-dependence from the Einstein-Hilbert
term) and the scalar field is redefined to ensure its kinetic term
is minimal (with a conventional factor of 2). Here $G$ is the
Einstein-frame gravitational constant, $g_{\mu \nu}$, is the
Einstein-frame metric whose Ricci tensor is $\mathcal{R}_{\mu\nu}$, and
$S_{\rm m}$ denotes the `matter' action, involving all other
observed fields (collectively denoted here by $\psi$).
There are two physical choices made in writing this action, beyond
the choice of using only a single scalar field.
\begin{itemize}
\item We assume the absence of a scalar potential, which we assume
is small enough to be negligible for the astrophysical
applications of interest. This would necessarily be true if the
scalar is relevant to cosmology, but is also the feature that
quantum contributions make most difficult to achieve in realistic
models (unless there is an approximate symmetry, like shifts $\phi
\to \phi + c$, for constant $c$).
\item We assume the matter action, $S_{\rm m}$, only depends on
$\phi$ and $g_{\mu\nu}$ through the one `Jordan-frame' combination
$\tilde{g}_{\mu \nu} = A^{2}(\phi)g_{\mu \nu}$, where the
conformal factor $A(\phi)$ is a function whose form would be
specified within any particular fundamental theory.
This kind of coupling actually arises in specific models (such as if $\phi$
arises as the breathing mode for the geometry of extra dimensions), and
has the attractive feature that it naturally evades many of the strongest
observational constraints on violations of the equivalence
principle.
\end{itemize}
The field equations obtained by varying (\ref{action}) are
\begin{eqnarray}
\label{fieldeq1}
\mathcal{R}_{\mu \nu} + 2\partial_{\mu} \phi \partial_{\nu} \phi +
8 \pi G \left(T_{\mu \nu} - \frac{1}{2} T g_{\mu \nu} \right)
&=& 0 \\
\label{fieldeq2}
\Box \phi + 4 \pi G a(\phi)T &=& 0 \,,
\end{eqnarray}
where $\Box = g^{\mu\nu} \nabla_\mu \nabla_\nu$ is the
d'Alembertian operator built using the Levi-Civita connection of
$g_{\mu \nu}$, while
\begin{equation}
T^{\mu \nu} = -\frac{2}{\sqrt{-g}}
\frac{\delta S_{\rm m}}{\delta g_{\mu \nu}}
\end{equation}
is the Einstein-frame energy-momentum tensor, $T=g^{\mu \nu}T_{\mu
\nu}$ is its trace, and $a(\phi) = A'(\phi)/A(\phi)$ defines the
scalar-matter coupling function in terms of the function
$A(\phi)$.
\subsection{Observational constraints}
\label{sec:sctens_constraints}
Because it is $\tilde g_{\mu\nu}$ that appears in the matter
action, it is the geodesic of this Jordan-frame metric along which
the trajectories of matter particles tend to move (in the absence
of other forces). Upon taking the post-Newtonian limit of the
Jordan-frame metric $\tilde{g}_{\mu \nu}$, one finds that the
effective Jordan-frame gravitational constant, measured in asymptotic
Einstein-frame units,\footnote{These are units in which the Einstein-frame
metric is asymptotically Minkowski: diag$(-1,1,1,1)$. If one instead uses units in which the Jordan-frame metric is asymptotically Minkowski, then $\widetilde G = G A^2(\phi_\infty) [1 + a^2(\phi_\infty)] $.} is
\begin{equation}\label{Geff}
\widetilde{G} = G \Bigl[ 1+a^{2}(\phi_{\infty}) \Bigr] \,,
\end{equation}
and that the Jordan-frame parameterized post-Newtonian
quantities whose values differ from those of GR are
\begin{equation}\label{ppn}
\tilde{\beta} = 1+ \frac{a^{2}(\phi_{\infty})b(\phi_{\infty})}
{2 [ 1+a^{2}(\phi_{\infty}) ]^{2}} \,, \qquad \tilde{\gamma} =1 -
\frac{2a^{2}(\phi_{\infty})}{1+a^{2}(\phi_{\infty})} \,,
\end{equation}
where $\phi_{\infty}$ is the asymptotic value of the scalar field
far from the source and $b(\phi) = {\hbox{d}} a(\phi)/{\hbox{d}}\phi$
\cite{damourrev}.
Constraints on these PPN parameters from solar system observations
provide the best bounds on the model, with $|\tilde{\gamma}-1|<
2.3 \times 10^{-5}$ inferred from Cassini tracking, and
$|\tilde{\beta}-1| < 2.3 \times 10^{-4}$ from lunar laser ranging
\cite{will}. This corresponds to the coupling bounds
\begin{equation}\label{ss_bounds}
a^{2}(\phi_{\infty}^{SS}) < 1.2 \cdot 10^{-5} \,
\quad \hbox{and} \quad
a^{2}(\phi_{\infty}^{SS})|b(\phi_{\infty}^{SS})| < 4.6 \cdot
10^{-4} \,,
\end{equation}
where $\phi_\infty^{SS}$ denotes the asymptotic value of $\phi$ as
one leaves the solar system.
\subsection{(Quasi) Brans/Dicke scalars}
\label{sec:qbd models}
The simplest scalar-tensor theory is Brans-Dicke theory \cite{BD},
for which $a(\phi)=a_{s}$ is constant. In this case -- see
eq.~(\ref{ss_bounds}) -- solar system tests constrain $a_{s} < 3.5
\cdot 10^{-3}$, and so all of the predictions of Brans-Dicke
theory are very close to those of GR.
The next-simplest theory, which we call quasi-Brans/Dicke (qBD)
theory, allows $a(\phi)$ to vary slowly with $\phi$ \cite{qBD}:
\begin{equation}\label{quadmod}
A(\phi) = \exp(a_{s} \phi + \textstyle{
\frac{1}{2}} \, b_{s} \phi^{2})\,, \qquad
a(\phi) = a_{s} + b_{s} \phi\,.
\end{equation}
This introduces a variety of new phenomena because it makes the
strength of the scalar-matter couplings depend on $\phi$, and so
allows them to vary with position and time \cite{PD1, walkingphi}. This
means that couplings in exotic environments (like stellar
interiors) could be stronger than na\"ively expected without
running into conflict with the strong solar-system bounds
mentioned above (this is similar in spirit to, but
different in detail from, evading these bounds through
matter-dependent scalar self-couplings
\cite{chameleon2,chameleon, environscalar}).
Assuming $\phi$ is defined such that $\phi \to 0$
asymptotically far from the Sun, eqs.~\pref{ss_bounds} show that
the strong bound on $a_s$ implies that solar system bounds do not
strongly constrain $b_s$.
The constraints on $b_s$ are comparatively weak, and the best come
from studies of binary pulsars \cite{binarypulsarobs3,
binarypulsarobs1, binarypulsarobs2}. The precise timing
measurements that are possible for binary pulsars allow their
orbits to be accurately measured over long periods of time, and
comparing measurements with the predictions of the qBD model leads
to the constraint $b_{s} \gtrsim -5$
\cite{efrev,binpult,binpulandgravwav,binpulnew}.
Measurements of the redshift of
spectral lines from neutron stars leads to a weaker constraint
$b_{s} \gtrsim -9$ \cite{psaltis}. The main
uncertainties in these bounds come from the poor understanding of
the nuclear equation of state appropriate for neutron star
interiors.
Observations disfavor negative $b_s$ because for $b_{s} \roughly< -4$,
compact objects like neutron stars exhibit a phenomenon called
spontaneous scalarization \cite{damourspsc}.
For sufficiently dense objects, whose
precise threshold density depends on $b_{s}$ and the nuclear
equation of state, it is possible for the star to support a
nonzero scalar field even though the scalar coupling vanishes
asymptotically far from the star: $a(\phi_{\infty}) = 0$.
Furthermore it is known that when scalarization takes place it is
the stable solution to the field equations \cite{stab3,chibarev}.
Scalarized neutron stars tend to be disfavored by binary pulsar
observations because scalarization significantly changes the
dynamics and radiation generated by a neutron star.
Collapse processes involving scalarized neutron stars have been
investigated by a number of authors \cite{collapse},
who found that the waveform of the emitted
gravitational radiation depends strongly on $b_{s}$. Moreover,
scalar-tensor gravity allows monopole and dipole radiation, which
are forbidden in GR. Thus, future measurements of gravitational
waves may lead to improved constraints on $b_{s}$ \cite{binpulandgravwav}.
\section{Stellar structure}
One of the potential uncertainties when trying to constrain
scalar-tensor models using astrophysical tests of gravity is the
strength of the scalar field that is supported by objects like the
Sun or a neutron star. For macroscopic astrophysical objects made
up of weakly coupled constituents one's intuition is that the
scalar coupling to the macroscopic object should be proportional
to the scalar coupling to each of its constituents, and we shall
see in this section that this intuition is generally borne out for
weakly coupled non-relativistic systems. We shall also see that it
can fail for relativistic systems, even in the limit of weak
scalar coupling.
In order to do so, we next summarize how the scalar field alters
the physics of stellar interiors, since it is the matching to this
that dictates the properties of the external field configurations
to which external observers have observational access.
\subsection{Equations of hydrostatic equilibrium}\label{sec:st_str}
The equations of hydrostatic equilibrium in scalar-tensor gravity
were first studied in \cite{salmona}, and subsequently in
\cite{nutku,saakmnats,matsuda,yokoi,heintzhill,hillheintz,reizlin,saenz,
bruckman,banerjee,avakyan,zaglauer,damourspsc,bruckman2,salgsud,whinnett,
kozyrev,whintor,salgsud2,yazadjiev,kazanas}.
Following \cite{damourspsc} we model the stellar
interior as a static, spherically-symmetric and perfect fluid in
local thermal equilibrium, locally characterized (in the Jordan
frame) by its pressure, $P$, and mass-energy density, $\rho$.
Time-independence and spherical symmetry allow the use of
Schwarzschild-like coordinates for the Einstein-frame metric
interior to the star:
\begin{equation}\label{sscoords}
g_{\alpha \beta} \, {\hbox{d}} x^{\alpha} {\hbox{d}} x^{\beta} =
-e^{\nu(r)} {\hbox{d}} t^{2} + \frac{{\hbox{d}} r^{2}}{1-2\mu(r)}
+ r^{2} {\hbox{d}}\Omega^{2}\,,
\end{equation}
where ${\hbox{d}}\Omega^2 = {\hbox{d}} \theta^2 + \sin^2\theta \, {\hbox{d}} \phi^2$
denotes the usual round angular metric on the 2-sphere and
$\nu(r)$ and $\mu(r)$ are to-be-determined functions that depend
only on the radial coordinate $r$.
The Jordan-frame energy-momentum tensor for matter is defined by
\begin{equation}
\tilde{T}^{\alpha \beta} = -\frac{2}{\sqrt{-\tilde{g}}}
\frac{\delta S_{\rm m}}{\delta \tilde{g}_{\alpha \beta}}\,,
\end{equation}
and is related to the Einstein-frame energy-momentum tensor by
\begin{equation}
T_{\alpha \beta} = A^{2}(\phi) \, \tilde{T}_{\alpha \beta}\,,
\qquad
T_{\alpha}^{\phantom{\alpha} \beta} =
A^{4}(\phi) \, \tilde{T}_{\alpha}^{\phantom{\alpha} \beta}
\quad \hbox{and} \quad
T^{\alpha \beta} = A^{6}(\phi) \, \tilde{T}^{\alpha \beta}\,,
\end{equation}
where indices on $\tilde{T}_{\alpha\beta}$ are raised and lowered
using the Jordan-frame metric, $\tilde{g}_{\alpha\beta}$.
Being a Jordan-frame perfect fluid, the energy-momentum tensor has
the form
\begin{equation}\label{perf_fluid}
\tilde{T}_{\alpha \beta} = (\rho + P)\tilde{u}_{\alpha}
\tilde{u}_{\beta} + P \tilde{g}_{\alpha \beta}\,,
\end{equation}
where $P = P(r)$ and $\rho = \rho(r)$, and $\tilde{u}_{\alpha}$ is
the Jordan-frame 4-velocity of the perfect fluid, given in
co-moving coordinates by
\begin{equation}
\tilde{u}_{\alpha} = e^{\nu/2} \, A(\phi) \, \delta^t_{\alpha}\,,
\end{equation}
so that $\tilde g^{\alpha \beta} \tilde u_\alpha \tilde u_\beta = -1$.
When writing the field equations it is convenient to scale out a
dimensional factor of the central density, $\rho_0 := \rho(0)$,
from the density and pressure,
\begin{equation}
p(r) := \frac{P (r)}{\rho_{0}}
\quad \hbox{and} \quad
\varrho(r) := \frac{\rho(r)}{\rho_{0}} \,,
\end{equation}
in terms of which the equation of state is specified by writing
$\varrho(r) = \varrho[p (r); p_0]$. Here
\begin{equation}
p_0 := \frac{P(0)}{\rho_0} := \frac{P_0}{\rho_0} \,,
\end{equation}
labels the star's central pressure in the same units, and the
$p_0$-dependence of $\varrho[p ; p_0]$ is meant to
emphasize that the functional form of the equation of state,
$P(\rho)$, in general changes as one changes the central density.
Notice that our notation implies the identity
$\varrho(p_0;p_0)=1$.
With these choices the Einstein field equation,
eq.~(\ref{fieldeq1}), in this geometry boils down to the following
three conditions:
\begin{eqnarray}
\label{eqss1}
r \mu' + \mu &=& 4 \pi G \rho_{0} r^{2} A^{4}(\phi)
\, \varrho(p) + \frac{r^{2}}{2}(1-2\mu)\phi'^{2}\,, \\
\label{eqss2}
p ' &=& - [\varrho(p) + p] \left[
\frac{ 4 \pi G \rho_{0} r^{2} A^{4}(\phi) \, p + \mu}{r(1-2\mu)} +
\frac{r}{2} \, \phi'^{2} + a(\phi) \phi' \right]\,, \\
\label{eqss4}
\nu' &=& \frac{8 \pi G \rho_{0} r^{2} A^{4}(\phi)
\, p + 2\mu}{r(1-2\mu)} + r\phi'^{2} \,,
\end{eqnarray}
where primes denote derivatives with respect to $r$. The scalar
wave equation, eq.~(\ref{fieldeq2}), similarly becomes
\begin{equation}
\label{eqss3}
\phi'' = \frac{4 \pi G \rho_{0} A^{4}(\phi)}{1-2\mu}
\Bigl[ a(\phi) [\varrho(p) - 3p ]
+ r \phi' [\varrho(p) - p ] \Bigr]
- \frac{2(1-\mu)}{r(1-2\mu)} \phi'\,.
\end{equation}
These equations are to be integrated subject to the following
initial conditions at the centre of the star:
\begin{equation}
\label{ics_1}
\mu(0)=0\,,\qquad
p (0)=p_0 \,,\qquad
\phi(0)=\phi_{0}\,,\qquad
\phi'(0)=0\,.
\end{equation}
Writing eqs.~(\ref{eqss2}) and (\ref{eqss4}) as
\begin{equation}
2 p' + (p + \varrho) \Bigl[ \nu' + 2a(\phi) \phi'
\Bigr] = 0 \,,
\end{equation}
and integrating once allows $\nu$ to be written in terms of
$p $ and $\phi$:
\begin{equation}
\nu = -2f(p ;p_0) - 2\ln A(\phi) + {\rm const} \,,
\end{equation}
where
\begin{equation}\label{f_defn}
f(p ;p_0) = \int_{p_0}^{p }
\frac{{\hbox{d}}\hatp}{\hatp + \varrho(\hatp)} \,.
\end{equation}
Regarding eq.~(\ref{eqss1}) as a linear, first-order differential
equation for the product $r \mu$, allows it to be solved to obtain
\begin{equation}
\mu = \frac{1}{r} e^{-\int_{0}^{r} \tilde r \phi'^{2} d \tilde r}
\int_{0}^{r} \hat r^2\left[ \frac{\phi'^{2}}{2} +
4 \pi G \rho_{0}\, A^{4}(\phi) \, \varrho(p) \right]
e^{\int_{0}^{\hat r} \tilde r \phi'^{2} d \tilde r} {\hbox{d}} \hat r \,,
\end{equation}
where the integration constant is chosen such that (\ref{ics_1})
holds. This expression shows that $\mu$ is always non-negative. It
also shows that $\mu(r)$ should not be interpreted as the
mass-energy inside the ball of radius $r$, unlike in GR.
In principle, eqs.~(\ref{eqss1}) through (\ref{eqss3}) can be
integrated numerically, starting at $r={0}$ and continuing out to
larger $r$. In practice, this system of equations is singular at
$r=0$, so numerical integration must be started at some small
positive $r=r_{0}$. The initial conditions at $r_{0}$ can be
obtained from the power series expansions that are dictated by the
equations of motion themselves:
\begin{eqnarray}
\mu(r) &=& \frac{4\pi G \rho_{0} A^{4}_{0}}{3} \, r^{2}
+ \mathcal{O}(r^{4}) \,, \nonumber \\
p (r) &=& p_0 + \frac{2\pi G \rho_{0}\,
A^{4}_{0}}{3} (p_0 + 1) \bigl[ a^{2}_{0} (3p_0-1)
-(3p_0+1) \bigr] r^{2} + \mathcal{O}(r^{4}) \,, \nonumber\\
\phi(r) &=& \phi_{0} - \frac{2\pi G \rho_{0}\, A^{4}_{0}}{3}
\, a_{0} (3p_0-1)r^{2}+ \mathcal{O}(r^{4}) \,,
\end{eqnarray}
where $A_0 = A(\phi_0)$ and $a_0 = a(\phi_0)$.
\subsection{Matching to exterior observables}\label{sec:matching}
The integration within the stellar interior continues until
eventually the pressure $p $ vanishes. The value $r = R$
where this happens defines the (Schwarzschild coordinate) radius
of the star, beyond which the appropriate solution instead solves
the `matter-vacuum' field equations with $\rho = P = 0$. Of
course, for generic scalar-tensor theories it might happen that
$p$ never actually vanishes, since unlike for GR $p $
need not be a monotonically decreasing function. We comment where we
can on the stability of these configurations below, although as we shall
see they are unlikely to happen sufficiently close to the
Brans-Dicke limit and for non-relativistic equations of state.
\subsubsection*{Exterior solutions}
For $r > R$, an exterior solution is required to satisfy the vacuum field
equations,
\begin{equation}
\mathcal{R}_{\alpha\beta} + 2\,\partial_\alpha \phi \,\partial_\beta \phi = 0
\quad \hbox{and} \quad
\Box \phi = 0 \,,
\end{equation}
for which a closed-form static and spherically symmetric solution
may be found by taking $\phi = \phi(r)$ and using the metric
\begin{equation} \label{sphericalmetric}
{\hbox{d}} s^2 = - e^{2 u} \, {\hbox{d}} t^2 + e^{-2 u} \, {\hbox{d}} r^2 + e^{2v}
\, \Bigl[ {\hbox{d}} \theta^2 + \sin^2 \theta \, {\hbox{d}} \varphi^2
\Bigr] \,,
\end{equation}
where $u = u(r)$ and $v = v(r)$. The field equations for $\phi$,
$u$ and $v$ have the following solutions \cite{extsol}
\begin{equation} \label{BackgroundSolutions}
e^{2u} = \left( 1 - \frac{\ell}{r} \right)^x \,, \quad
e^{2v} = r^2 \left( 1 - \frac{\ell}{r} \right)^{1-x}
\quad \hbox{and} \quad
e^{2\phi} = e^{2\phi_\infty}
\left( 1 - \frac{\ell}{r} \right)^q \,,
\end{equation}
where the real constants $x$ and $q$ satisfy $x^2 + q^2 = 1$ and
are otherwise arbitrary, leaving three free integration constants:
$x$, $\ell$ and $\phi_\infty$.
These three constants are more conveniently rewritten in terms of
the large-$r$ limit of the solution when expressed in the original
Schwarzschild coordinates,
\begin{eqnarray}
{\hbox{d}} s^2 &=& \left( -1 + \frac{2\,GM}{r} + \dots \right) {\hbox{d}} t^2
+ \cdots \nonumber\\
e^\phi &=& e^{\phi_\infty} \left( 1 - \frac{GQ}{r}
+ \dots \right) \,,
\end{eqnarray}
where $M$ is the system's ADM mass \cite{ADM} in the Einstein
frame and $Q$ defines its `scalar charge'. For systems where the
scalar coupling, $a(\phi)$, is field-dependent, the asymptotic
value of the scalar field, $\phi_\infty$, may usefully be traded
for the asymptotic value of the scalar coupling strength,
$a_\infty = a(\phi_\infty)$.
Together with the star's radius, we see that from the point of
view of an external observer there are four independent bulk
parameters that characterize any such a star in scalar-tensor
theory: $M$, $Q$, $R$ and $\phi_\infty$. These can be calculated
in principle as functions of the stellar equation of state by
matching the exterior solution to that of the interior at $r = R$,
implying that they are functions of the two parameters, $\phi_0$
and $p_0$, that define the initial conditions of integration
at the stellar center, eq.~\pref{ics_1}. One of our main goals is
to identify the two relations that must hold amongst the four
external parameters (for any given equation of state),
\begin{equation}
\xi_1(R,M,Q,\phi_\infty) = \xi_2(R,M,Q,\phi_\infty) = 0 \,,
\end{equation}
generalizing the familiar mass-radius relation, $R = R(M)$, that
expresses the content of stellar structure within GR. The explicit
form of these constraints for scalar-tensor models is discussed in
section \ref{sec:pertexp} below.
Physically, we expect the value of $\phi_{\infty}$ to depend on
physics external to the star or stellar system of interest,
governed by the local properties of the galaxy in which the stars
are located. $\phi_{\infty}$ could also depend on cosmological
time if $\phi$ is light enough to be evolving over cosmological
time intervals. Thus, only one combination of the four parameters
$M$, $Q$, $R$ and $\phi_\infty$ can vary in practice from star to
star within a specific galactic neighborhood at a given
cosmological epoch. In particular, this means that the scalar
charge $Q$ and mass $M$ of a star are not independent parameters
for any stars within a local neighborhood at a given epoch.
This result greatly simplifies the phenomenological analysis of
binary pulsars. Instead of having two independent parameters
describing each of the two stars in the binary system, there is in
practice only one and so it suffices to describe the observational
constraints as a function of the masses of the two stars, just as
in GR. In principle the dependence on $\phi_\infty$ could
complicate the combining of the implications of many pulsars that
are located far from one another, but a simple estimate shows that
$\phi$ does not vary strongly across the galaxy, so in practice
$\phi_{\infty}$ can be taken to have a common for all stars in the
galaxy. This approximation is implicitly used in
refs.~\cite{efrev,binpult,binpulandgravwav}, when combined
data from several binary pulsars are plotted on one theory-space
exclusion plot in terms only of the masses of the two components.
To estimate the variance of $\phi(r)$ across the galaxy, we may
use the non-relativistic limit for which $g^{\alpha \beta}
T_{\alpha \beta} = A^4(\phi) \tilde g^{\alpha \beta} \tilde
T_{\alpha \beta} = A^4(\phi) (3p - \rho) \simeq - A^4(\phi) \,
\rho$ and so
\begin{equation} \label{phieqgal}
\Box \phi = \frac{e^{-\nu/2} \sqrt{1 -2 \mu}}{r^2 }
\Bigl( e^{\nu/2} \, r^2 \sqrt{1 - 2 \mu} \; \phi'
\Bigr)' \simeq \frac{1}{r^2} \Bigl( r^2 \, \phi' \Bigr)'
\simeq 4 \pi G a(\phi) A^4(\phi) \, \rho \,.
\end{equation}
If, on the right-hand-side, we assume $\phi \simeq \phi_g$ to be
approximately constant, then $a(\phi) A^4(\phi) \simeq a_g A_g^4$
is also constant. Then the only $r$-dependence on this side comes
from $\rho$, and we may estimate how large a deviation from
constant $\phi$ is implied by eq.~\pref{phieqgal}. For $r$ large
enough that $\rho$ is dominated by Dark Matter, the density
profile is $\rho \simeq \rho_g \, (\ell/r)^2$ so that orbital
velocities are $r$-independent: $v_g^2 \simeq G M(r)/r \simeq 4
\pi G \rho_g \, \ell^2$. Then eq.~\pref{phieqgal} implies
\begin{equation}
\phi(r) \simeq \phi(r') + 4\pi G a_g A_g^4 \rho_g \ell^2 \ln \left(
\frac{r}{r'} \right) \simeq a_g A_g^4 v_g^2 \ln
\left( \frac{r}{r'} \right) \,.
\end{equation}
This is clearly small because it depends only logarithmically on
$r$, and both $a_g$ and $v_g$ are small. See Appendix B of \cite{dam2pn} for a related discussion.
\subsubsection*{Matching at $r = R$}
Requiring continuity of the exterior and interior profiles across
$r = R$ implies the three external parameters $M$, $Q$, and
$\phi_{\infty}$ must satisfy \cite{damourspsc}
\begin{eqnarray}
\label{match1}
s := \frac{GM}{R} &=& \frac{\mathcal{K}}{2\sqrt{1-2\mu_\star}}
\exp \biggl[- \frac{\mathcal{K}}{\mathcal{L}} \;
{\rm arctanh} \left( \frac{\mathcal{L}}
{\mathcal{J}} \right)\biggr]\,, \\
\label{match2}
a_{{\scriptscriptstyle A}} := \frac{Q}{M} &=& \frac{2R \, \phi_\star'
(1-2\mu_\star)}{\mathcal{K}}\,,\\
\label{match3}
\phi_{\infty} &=& \phi_\star + \frac{2 R \,\phi_\star'
(1-2\mu_\star)}{\mathcal{L}} \;
{\rm arctanh} \left( \frac{\mathcal{L}}
{\mathcal{J}}\right)\,,
\end{eqnarray}
where
\begin{eqnarray}
\label{matchj}
\mathcal{J} &:=& 2(1-\mu_\star) + R^{2} \phi_\star'^{2}
(1-2\mu_\star) \,, \\
\label{matchk}
\mathcal{K} &:=& 2\mu_\star + R^{2}\phi_\star'^{2}
(1-2\mu_\star) \,, \\
\label{matchl}
\mathcal{L} &:=& \sqrt{4\mu_\star^{2} +
4R^{2}\phi_\star'^{2}(1-\mu_\star)(1-2\mu_\star)
+ R^{4} \phi_\star'^{4}(1-2\mu_\star)^{2}} \,,
\end{eqnarray}
and $\mu_\star := \mu(R)$, $\phi_\star := \phi(R)$, and
$\phi_\star' := \phi'(R)$.
It is sometimes useful to decompose the ADM mass as $M = M_{\scriptscriptstyle B} +
\Delta M$, where $\Delta M$ is the gravitational binding energy
and $M_{\scriptscriptstyle B}$ is the baryonic mass, defined by
\begin{equation}\label{barmass}
M_{\scriptscriptstyle B} := 4 \pi m_{0} \int_{0}^{R} {\hbox{d}} r \;
\frac{n A^{3}(\phi) \, r^{2}}{\sqrt{1-2\mu}}\,,
\end{equation}
where $n(r)$ is the local baryon-number density, and $m_b$ is the
average mass of an individual baryon. This baryonic mass can be
related to the stellar pressure and density if the star has
constant entropy per nucleon, since in this case it follows from
energy conservation that
\begin{equation}
\left( \frac{\rho}{n} \right)'
+ P \left(\frac{1}{n}\right)' =0 \,.
\end{equation}
This equation can be used to write $n$ in terms of $p $,
\begin{equation}\label{n_eqn}
n=n_{0} \left[ \frac{p
+ \varrho(p)}{p_0 + 1} \right] e^{-f(p ;p_0)} \,,
\end{equation}
where $n_{0}=n(0)$ and $f(p; p_0)$ is defined in equation
(\ref{f_defn}). The baryonic mass then becomes
\begin{equation}\label{barmass2}
M_{\scriptscriptstyle B} = 4 \pi m_{0}n_{0} \int_{0}^{R}
e^{-f(p)} \left[ \frac{p + \varrho(p)}{p_0 + 1}
\right] \frac{A^{3}(\phi) r^{2} {\hbox{d}} r}{\sqrt{1-2\mu}} \,.
\end{equation}
The quantities $s$ and $a_{\scriptscriptstyle A}$ given by the matching equations,
(\ref{match1}) and (\ref{match2}),
are in themselves useful because each has a
physical interpretation. $s$ is called the self-gravity\footnote{Note that
Will \cite{will} uses $s$ to denote the sensitivity of the mass to the
gravitational constant, which he defines as
$-(\partial \log M)/(\partial \log G)$. This definition of $s$ differs from
our definition, but has the same
order of magnitude.}, or
compactness, of the star and it provides a dimensionless measure
of how relativistic its gravitational field is at the stellar
surface, $r = R$. For non-relativistic stars $s \ll 1$, and in
general relativity $s \leq \frac49$ for any star within which the
mass-energy density, $\rho$, is a non-increasing function of $r$,
a result known as Buchdahl's theorem \cite{buchdahl}.\footnote{The
potential generalization of this result to scalar-tensor gravity
is investigated in \cite{buchdahlsctens}.}
Similarly, the quantity $a_{{\scriptscriptstyle A}}$ can often be interpreted as the
effective scalar-matter coupling of the star as seen by the
observer at infinity. This can be understood by considering the
lowest-order non-relativistic interaction energy between two
widely separated stars, A and B, \cite{damourrev}:
\begin{equation}
U_{{\scriptscriptstyle AB}} = -\frac{GM_{{\scriptscriptstyle A}}M_{{\scriptscriptstyle B}}}{r_{{\scriptscriptstyle AB}}}
-\frac{GQ_{{\scriptscriptstyle A}}Q_{{\scriptscriptstyle B}}}{r_{{\scriptscriptstyle AB}}}
\equiv
- \frac{\widetilde{G}_{{\scriptscriptstyle AB}}M_{{\scriptscriptstyle A}} M_{{\scriptscriptstyle B}}}{r_{{\scriptscriptstyle AB}}} \,,
\end{equation}
where
\begin{equation}\label{geff}
\widetilde{G}_{{\scriptscriptstyle AB}} = G (1 + a_{{\scriptscriptstyle A}} a_{{\scriptscriptstyle B}})
\end{equation}
is the effective Jordan-frame gravitational constant between the
two stars, and $r_{{\scriptscriptstyle AB}}$ is the distance between them.
All quantities are measured in units in which
the Einstein-frame metric is asymptotically Minkowski\footnote{As mentioned earlier, to convert to units with an asymptotically Minkowski Jordan-frame metric
multiply $\widetilde{G}_{{\scriptscriptstyle AB}}$ by $A^{2}(\phi_{\infty})$, multiply $r_{{\scriptscriptstyle AB}}$ by
$A(\phi_{\infty})$, and divide $M_{\scriptscriptstyle A}$, $M_{\scriptscriptstyle B}$ and $U_{{\scriptscriptstyle AB}}$ by
$A(\phi_{\infty})$.}.
The connection between $a_{\scriptscriptstyle A}$ and the scalar coupling can also
be seen in another way. Since in the non-relativistic limit we
have $M \simeq M_{\scriptscriptstyle B}$, and since each individual baryon if
separated to infinity would microscopically satisfy $q_b/m_b =
a_\infty = a(\phi_\infty)$, we expect that for macroscopic
non-relativistic systems $Q/M = (N q_b)/(N m_b) = a_\infty$ as
well, so
\begin{equation}\label{limits1}
\lim_{s \to 0} a_{{\scriptscriptstyle A}} = a(\phi_{\infty})
\quad \hbox{and} \quad
\lim_{s \to 0} \widetilde{G}_{{\scriptscriptstyle AB}} = \widetilde{G} \,,
\end{equation}
where $\tilde{G}$ is the Jordan-frame gravitational constant
defined in equation (\ref{Geff}) \cite{damourrev}. We shall see
below that the prediction $Q/M = a(\phi_\infty)$ is given by a
more detailed matching calculation, where it can be seen to hold
independent of the stellar equation of state in the limit of both
non-relativistic sources and weak scalar coupling.
\subsubsection*{The black hole limit}
For sufficiently massive stars the only stable configuration in GR
is a black hole, for which there is only the exterior solution,
eq.~\pref{BackgroundSolutions} with $x=1$ and $q=0$ \cite{Wbg,MTW}.
In this case the role of matching
to an interior geometry across $r = R$ becomes replaced by the
boundary condition that the geometry not have a physical
singularity at the event horizon, $r = R_{bh}$, making this the
natural equivalent of the stellar radius, $R$, for a black hole.
Since for spherically symmetric stars in GR we have $R_{bh} = 2\,
GM$, we see that for black holes the mass-radius condition
predicted is particularly simple: $s = GM/R_{bh} = \frac12 >
\frac49$.
For a black hole coupled to scalar fields the exterior solution,
\pref{BackgroundSolutions}, always has a curvature singularity at
$r = \ell$ whenever $q \ne 0$ \cite{damourrev}.
This shows that only $q = 0$
describes a legitimate black hole, for which $\phi = \phi_\infty$
is constant and arbitrary (this is unchanged if $a_\infty =
a(\phi_\infty) \ne 0$, because there is no matter exterior to the
black hole with which to couple). The metric is then given by the
Schwarzschild geometry, just as for GR, implying $s = \frac12$ and
$Q = 0$ (for all $\phi_\infty$) even when scalars are present.
This is a special case of the no-hair theorems, originally formulated for
gravity in \cite{nohairorig} and extended to scalar fields in
\cite{nohairold}.
\subsubsection*{Stability}
\label{sec:stability}
\FIGURE[r]{\epsfig{file=stab-example.eps,angle=270,width=7.5cm}
\label{fig:stab_example}
\caption{$M_{{\scriptscriptstyle B}}$ versus $M$ for constant-density stars in
Brans-Dicke theory, with $\phi_{\infty}=0$ and $a_{s}^{2}=0.2$.}
}
Once a solution of equations (\ref{eqss1})-(\ref{eqss3}) is obtained,
and the external parameters (\ref{match1})-(\ref{match3}) are calculated,
it must be checked whether the solution is stable against perturbations.
To do this properly requires working with the time-dependent equations
of stellar structure. We here instead follow \cite{damourspsc,stab3,chibarev}
and perform a simplified analysis.
The main idea builds on the observation that (for general relativity)
the equations of hydrostatic equilibrium are equivalent to
the problem of extremizing $M$, while keeping $M_{{\scriptscriptstyle B}}$ fixed
\cite{Wbg}. The idea used in \cite{damourspsc,stab3,chibarev} is to assume
the same is true for scalar-tensor theories (with $\phi_{\infty}$
fixed as well as $M_{\scriptscriptstyle B}$) although we have been unable to
prove this so far in scalar-tensor gravity.
For given values of $M_{{\scriptscriptstyle B}}$ and $\phi_{\infty}$, one generically
finds that there can exist multiple stellar configurations with
different values of $M$. This is illustrated by the plot of
$M_{{\scriptscriptstyle B}}$ versus $M$ given in figure \ref{fig:stab_example}
for a one-dimensional family of stellar configurations
that share a common value for $\phi_{\infty}$. Whenever two
or more values of $M$ are are obtained for a given $\phi_\infty$
we take the configuration with the lowest value of $M$ to be stable,
and the others to be unstable.
\subsection{Stellar structure for quasi-Brans/Dicke scalars}
\label{sec:st_str_quadmod}
Of course all of the predictions for quantities like $Q/M$ depend
in principle on the details of the coupling function, $a(\phi)$,
in addition to depending on the stellar equation of state. In this
section we specialize the equations of the previous section to the
quasi-Brans/Dicke (qBD) model of eq.~(\ref{quadmod}), since this
boils the dependence on $a(\phi) = a_s + b_s \phi$ down to the
dependence on the two parameters $a_s$ and $b_{s}$.
Because the gravity-scalar part of the theory is invariant under
constant shifts, $\phi \to \phi + c$, this transformation can be
used to set $a_{s}$ to zero with no loss of generality, provided
that $b_{s} \neq 0$. With this choice, the matter coupling can be
seen to have a reflection symmetry $\phi \to -\phi$. We henceforth
choose this convention for $a_s$, making it sufficient to follow
the dependence of observables on $b_s$.
Consider now integrating the field equations starting from $r =
0$. The information that used to reside in $a_s$ now resides in
the value of the field (or coupling) at the stellar origin, $a_0
:= a(\phi_{0}) = b_s \phi_0$. Suppose first that the initial
value, $\phi_0$, chosen for $\phi$ at the stellar center is such
that the coupling vanishes there, $a(\phi_{0}) = 0$. In this case
the reflection symmetry implies $\phi(r) \equiv 0$ for all $r$ is
a solution to the field equations, and so $a(\phi) = a_0 = 0$,
everywhere within the stellar interior. (That this is a solution
can be seen by direct inspection of eq.~\pref{eqss3}.) In this
case the equations of stellar structure, (\ref{eqss1}) through
(\ref{eqss3}), uniquely reduce to those of GR.
If instead $a_0 = a(\phi_{0}) = b_s \phi_0 \neq 0$, then, after
the change of variables
\begin{equation}
\label{chvar1}
\varphi = (\phi - \phi_{0})/a_{0} \,, \qquad
u = 8 \pi G \rho_{0} A^{4}_{0} \, r^{2} \,,
\end{equation}
\noindent
the equations of stellar structure simplify to
\begin{eqnarray}\label{eqssnew1}
\dot{\mu} &=& -\frac{\mu}{2u}
+ \frac{\varrho(p) \, e^{4a_0^2 \varphi \, (1
+b_{s} \varphi / 2)}}{4} + a_0^2 u(1-2\mu)\dot{\varphi}^{2}\,, \\
\label{eqssnew2}
\dot{p } &=& - [\varrho(p) + p\,]
\left[ \frac{\mu}{2u(1-2\mu)} +
\frac{p e^{4a_0^2 \varphi(1+b_{s} \varphi / 2)}}{4(1-2\mu)}
+a_0^2 \dot{\varphi}(1+u\dot{\varphi}+b_{s}\varphi) \right]\,, \\
\label{eqssnew3}
\ddot{\varphi} &=& -\frac{(3-4\mu)\dot{\varphi}}{2u(1-2\mu)}
+ \frac{e^{4a_0^2\varphi(1+b_{s}\varphi / 2)}}{8u(1-2\mu)}
\Bigl[ (1+b_{s}\varphi)[\varrho(p)-3p\,]
+ 2[\varrho(p)-p \,] u\dot{\varphi} \Bigr]\,,
\end{eqnarray}
where dots denote derivatives with respect to $u$. These equations
show that $a_0 = a(\phi_0) = a(r = 0)$ only enters physical
observables through its square: $a^2_{0}$. In terms of these
variables the initial conditions are
\begin{equation}
\label{quadmod_ics}
\mu(0)=0 \,, \qquad
p (0)=p_0 \,, \qquad
\varphi(0)=0 \,, \qquad
\dot{\varphi}(0)=\frac{1}{12} \, (1-3p_0) \,.
\end{equation}
The profiles $\mu(u)$, $p (u)$ and $\varphi(u)$ obtained by
integrating these equations depend only on the three parameters
$a_0^2$, $b_{s}$, $p_0$, as well as on the choice of equation
of state $\varrho(p)$.
The baryonic mass, eq.~(\ref{barmass2}), then becomes
\begin{eqnarray}
M_{\scriptscriptstyle B} &=& \frac{2 \pi m_{0} n_{0}}{(8 \pi G
\rho_{0})^{3/2}A^{3}_{0}} \int_{0}^{U} {\hbox{d}} u \;
\frac{p + \varrho(p)}{p_0 + 1}
\, e^{-f(p ;p_0)} e^{3 a_0^2 \varphi (1+b_{s}\varphi/2)}
\sqrt{\frac{u}{1-2\mu}} \,,
\nonumber \\
\label{barmass_quadmod} & := & \frac{2 \pi m_{0} n_{0}}{(8 \pi
G \rho_{0})^{3/2}A^{3}_{0}} \;
\mathcal{M}(a_0^2,b_{s},p_0) \,,
\end{eqnarray}
where
\begin{equation}\label{U_defn}
U(a_0^2,b_{s},p_0) = 8 \pi G \rho_{0} A^{4}_{0} \, R^{2}
\end{equation}
is the value $U = u(R)$ corresponding to the boundary of the star.
For a given equation of state, $U$ depends only on $a_0^2$,
$b_{s}$ and $p_0$, because it can be computed by finding the
point at which $p (u)$ vanishes.
Note that the quantities $\mathcal{J}$, $\mathcal{K}$, and
$\mathcal{L}$, defined in equations (\ref{matchj}) through
(\ref{matchl}), depend only on $\mu_\star = \mu(U)$ and
$R^{2}\phi'^{2}(R)=4 a_0^2 U^{2}\dot{\varphi}^{2}(U)$. Therefore,
the compactness $s$ --- given by equation (\ref{match1}) --- also
depends only on $a_0^2$, $b_{s}$ and $p_0$. The matching
conditions at the stellar surface, eqs.~(\ref{match2}) --
(\ref{match3}), can be re-written using these variables as
\begin{equation}\label{f}
\frac{\phi_{\infty}-\phi_{0}}{a_{0}} = \varphi(U) + \frac{4U
\dot{\varphi}(U) (1-2\mu_\star)}{\mathcal{L}}
\, {\rm arctanh} \left(
\frac{\mathcal{L}}{\mathcal{J}}\right) :=
\mathcal{F}(a_0^2,b_{s},p_0) \,,
\end{equation}
and
\begin{equation}\label{aa}
a_{{\scriptscriptstyle A}} = \frac{Q}{M} = \frac{4a_{0} \, U \dot{\varphi}(U)
(1-2\mu)}{\mathcal{K}} := a_{0}
\mathcal{A}(a_0^2,b_{s},p_0) \,,
\end{equation}
which define the quantities $\mathcal{F}$ and $\mathcal{A}$.
\subsubsection*{Properties of the pressure profile}
\label{sec:phys_real}
This section briefly pauses to investigate whether the pressure,
$p$, decreases monotonically, and whether the equation
$p (u) = 0$ must have a solution.
It is clear that (unlike for GR) ${\hbox{d}} P/{\hbox{d}} r$ can be positive
for sufficiently large couplings and pressures. This follows from
evaluating equations (\ref{eqssnew2}) and (\ref{quadmod_ics}) at
the stellar center,
\begin{equation}
\dot{p }(0) = - \frac{1+p_0}{12} \Bigl[ 1+3p_0 +
a_0^2(1-3p_0) \Bigr] \,,
\end{equation}
which (for $p_0 \ge 0$) is strictly non-positive unless
$p_0 = P_0/\rho_0 > \frac13$ and
\begin{equation}\label{monpresineq1}
a_0^2 > {a^2_0}_{\rm crit} :=
\frac{3p_0+1}{3p_0-1} \,.
\end{equation}
In general, to determine whether the pressure profile is monotonically
decreasing everywhere,
it is not sufficient to look only at $\dot{p}(0)$. In the qBD theory
with $b_{s}>0$, it can happen that $\dot{p}(0) >0$,
$\dot{p}(u_{\star})=0$ for some $u_{\star} > 0$, and
$p(U) = 0$ for some $U>u_{\star}$. Such a star has its maximum pressure
in between the centre and the surface.
Similarly, if $b_{s}<0$, it can happen that $\dot{p}(0)<0$,
$\dot{p}(u_{\star})=0$ for some $u_{\star}>0$,
$p(u) > 0$ for all $u \geq 0$, and $p(u) \to \infty$ as
$u \to \infty$. Such a solution describes an object of infinite extent,
and seems unphysical.
By contrast, for Brans Dicke theory our numerical calculations
suggest that this kind of complicated behaviour does not take
place, and the pressure profile
$p(u)$ cannot have any local extrema. We have been unable to
prove this analytically, but we have shown that if
$a^2_0={a^2_0}_{\rm crit}$, then $\Pi(u)=\eta$ for all $u$,
for all equations of state. This constant-pressure
solution appears to be the boundary
between the solutions with $\dot{p} > 0$, and those with
$\dot{p} < 0$. For Brans Dicke theory Salmona
\cite{salmona} has shown that if
$p / \varrho < 1/3$ everywhere, then $p$ decreases
monotonically everywhere.
If we assume that this simple behaviour of the pressure profile in
Brans-Dicke theory is correct, then
imposing certain requirements on the properties of the solutions of
the equations of hydrostatic equilibrium leads to constraints on
$a_{0}^{2}$. If we require that the pressure is
decreasing for all $p_0$, then it follows that $a_{0}^{2} < 1$.
A more conservative constraint on $a_0^2$ can be obtained by using
some information about the equation of state. For these purposes
it suffices to consider only neutron stars, because only these are
relativistic enough to have $p_0 > 1/3$. But neutron-star
interiors can be modelled as relativistic polytropes
\cite{astrophys}, with
\begin{equation}
\frac{\rho}{m_{b}} = n + \frac{\kappa \, \hat{n}}{\gamma-1}
\left( \frac{n}{\hat{n}} \right)^{\gamma}
\quad \hbox{and} \quad
\frac{P}{m_{b}} = \kappa \, \hat{n} \left( \frac{n}{\hat{n}}
\right)^{\gamma}\,,
\end{equation}
where $n$ is the baryon number density, $\hat{n}=0.1 \; {\rm
fm}^{-3}$ is a typical nuclear density, $m_{b} = 1.66 \cdot
10^{-24} \, {\rm g}$ is the mass of an average baryon, and
$\kappa$ and $\gamma$ are constants ($\gamma$ is called the
polytropic index). Notice that these choices imply the central
density and pressure are related by $\rho_0/m_b = n_0 + \kappa \,
\hat n (n_0/\hat n)^\gamma/(\gamma - 1)$, and so
\begin{equation}
1 = \frac{m_b n_0}{\rho_0} + \frac{p_0}{\gamma - 1} \,,
\end{equation}
implying the maximum value of $p_0$ that is possible is
$p_0^{\rm max} = \gamma - 1$. The functions $\varrho(p)$
and $f(p)$ are similarly given by
\begin{eqnarray}
\varrho(p ;p_0) &=&
\frac{p }{\gamma-1} + \left( \frac{p }{
p_0}\right)^{1/\gamma} \left(1-\frac{
p_0}{\gamma-1} \right) \,, \\
f(p ;p_0) &=& \ln \left[
\frac{\gamma - 1 - p_0 + \gamma p
(p_0/p )^{1/\gamma}}
{(p_0+1)(\gamma - 1)} \right] \,.
\end{eqnarray}
Requiring that the pressure is decreasing for all relativistic polytropes
yields the constraint
\begin{equation}\label{monpresineq2}
a_0^2 < \frac{3\gamma -2}{3\gamma - 4} \,.
\end{equation}
For the neutron-star equations of state EOS II and EOS A of
ref.~\cite{damourspsc}, these bounds evaluate to $|a_{0}| < 1.29$
and $|a_{0}| < 1.26$, respectively. Although nowhere near as good
as the solar system bounds of eqs.~(\ref{ss_bounds}), they are
complementary because they apply to the coupling deep within a
neutron star interior, and rely only on general considerations,
rather than precise observations.
\subsection{Non-relativistic limit}
\label{sec:nonrellim}
In most stars, relativistic effects are negligible, allowing us to
take $p = P/\rho_0 \ll 1$ and $\mu \ll 1$. In this case, the
equations of stellar structure, eqs.~(\ref{eqss1}) through
(\ref{eqss3}), simplify to
\begin{eqnarray} \label{newt1}
r\mu' + \mu &\simeq& 4 \pi G r^{2}A^{4}(\phi)\rho +
\frac{r^{2} \phi'^{2}}{2} \ , \\
\label{newt2}
P' &\simeq& -\rho \left[ \frac{\mu}{r} + \frac{r\phi'^{2}}{2} +
a(\phi)\phi' \right] \,, \\
\label{newt3}
\phi'' &\simeq& 4 \pi G A^{4}(\phi) \rho
\Bigl[ a(\phi) + r\phi' \Bigr] - \frac{2\phi'}{r}\,, \\
\label{newt4}
\nu' &\simeq& \frac{2\mu}{r} + r\phi'^{2}\,,
\end{eqnarray}
where the energy density, $\rho$, is equivalent to the mass
density in the non-relativsitic limit.
The matching conditions, eqs.~(\ref{match1}) through
(\ref{match3}), similarly simplify to
\begin{eqnarray} \label{newt_match1}
\frac{GM}{R} &=& \mu(R) + \frac{[R \, \phi'(R)]^{2}}{2}\,, \\
\label{newt_match2}
\frac{GQ}{R} &=& R \, \phi'(R)\,, \\
\label{newt_match3}
\phi_{\infty} &=& \phi(R) + R \, \phi'(R)\,.
\end{eqnarray}
Notice that these last two matching conditions quite generally
imply
\begin{equation}
\phi(R) = \phi_{\infty} - \frac{GQ}{R} \,,
\end{equation}
as is appropriate for the non-relativistic limit of the known
external solutions.
\subsubsection*{Newtonian polytropes}
The equation of state that is of most interest for Newtonian
systems is that of a polytrope,
\begin{equation}\label{eq:polytrope}
P = K \rho^{1+1/\chi}\ ,
\end{equation}
where $\chi$ is the polytropic index (a constant that need not be
an integer), and $K$ is a constant. We briefly specialize to this
equation of state here for later use in subsequent sections.
Specializing eqs.~(\ref{newt1}) -- (\ref{newt4}) to this equation
of state, (\ref{eq:polytrope}), the above equations simplify after
changing to dimensionless variables
\begin{equation}\label{newtpoly_chvar}
r = r_s w \,,\qquad \rho=\rho_{0} \,\theta^{\chi} \,,
\end{equation}
where $\rho_{0} = \rho(0)$, and $r_s$ is a length scale that is to
be specified later. Then equations (\ref{newt1}) -- (\ref{newt4})
become
\begin{eqnarray}
\label{eq:poly1}
\ddot{\phi} &=& -\frac{2 \dot{\phi}}{w} +
C A^{4}(\phi) \theta^{\chi}
\Bigl[ a(\phi) + w \dot{\phi} \Bigr]\, \\
\label{eq:poly2}
\frac{{\hbox{d}}}{{\hbox{d}} w} \left[ \zeta w^{2} \dot{\theta}
+ \frac{1}{2}w^{3}
\dot{\phi}^{2} + w^{2} a(\phi) \dot{\phi} \right]
&=& -C A^{4}(\phi) w^{2} \, \theta^{\chi}
- \frac{w^{2}}{2} \, \dot{\phi}^{2}\ ,
\\
\label{eq:poly3}
\mu &=& - w\left[ \zeta \dot{\theta}
+ \frac{w}{2} \, \dot{\phi}^{2} +
a(\phi) \dot{\phi} \right]\ ,
\end{eqnarray}
where dots now denote ${\hbox{d}}/{\hbox{d}} w$, and the dimensionful
parameters are all rolled into the new dimensionless constants,
\begin{equation}\label{eq:a_polytrope}
\zeta := K(\chi+1) \rho_{0}^{1/\chi}\ ,\ \
C := 4 \pi G r_s^{2} \rho_{0}\ .
\end{equation}
In these variables the initial conditions are
\begin{eqnarray}\label{eq:newtpoly_ic1}
&&\theta(0)=1 \,, \qquad \dot{\theta}(0)=0\ , \\
\label{eq:newtpoly_ic2}
\hbox{and} \quad
&&\phi(0)=\phi_{0} \,, \quad\;
\dot{\phi}(0) = 0\ .
\end{eqnarray}
We now specialize to the quasi-Brans/Dicke models of
eq.~(\ref{quadmod}), for which we had
\begin{equation} \label{chvar2}
a_0^2 = a^{2}(\phi_{0})=(a_{s} + b_{s} \phi_{0})^{2}
\quad \hbox{and} \quad
\varphi = \frac{\phi - \phi_{0}}{a_{0}} \ .
\end{equation}
If we choose
\begin{equation}\label{newtpoly_s}
r_s = \frac{1}{A^{2}_{0}} \sqrt\frac{\zeta}{4 \pi G \rho_{0}}
= \frac{1}{A^{2}_{0}} \sqrt{\frac{K(\chi+1)}{4\pi G}}
\rho_{0}^{(1-\chi)/2\chi} \,,
\end{equation}
so that
\begin{equation}
\zeta=C A^{4}_{0} \,,
\end{equation}
then equations (\ref{eq:poly1}) -- (\ref{eq:poly3}) become
\begin{eqnarray} \label{eq:poly_mod1_1}
- \frac{{\hbox{d}}}{{\hbox{d}} w} (w^{2} \dot{\theta}) - \frac{a_0^2
\, b_{s}}{\zeta} \, w^{2} \dot{\varphi}^{2} &=&
w^{2} e^{4 a_0^2 \varphi (1 + b_{s} \varphi / 2)}
\theta^{\chi} \Bigl[ 1 + a_0^2 \left( 1 + w\dot{\varphi}
+ b_{s} \varphi \right)^{2} \Bigr] \\
\label{eq:poly_mod1_2}
\frac{{\hbox{d}}}{{\hbox{d}} w}(w^{2} \dot{\varphi}) &=& \zeta
w^{2} e^{4 a_0^2 \varphi (1 + b_{s} \varphi / 2)}
\theta^{\chi} \left( 1 + w \dot{\varphi}
+ b_{s} \varphi \right) \\
\label{eq:poly_mod1_3}
\hbox{and} \quad
\mu &=& -w \left(\zeta \dot{\theta} +
\frac{1}{2}a_0^2 w \dot{\varphi}^{2} + a_0^2(1+b_{s}
\varphi)\dot{\varphi}\right)\ ,
\end{eqnarray}
and the initial conditions of eq.~\pref{eq:newtpoly_ic1} for
$\varphi$ now are:
\begin{equation}
\varphi(0)=0\ ,\ \ \ \dot{\varphi}(0)=0\ .
\end{equation}
This system implies the solutions have the following power-series
expansions near $w = {0}$:
\begin{eqnarray}
\theta(w) &=& 1 - \frac{1}{6}(1+a_0^2)w^{2} + \mathcal{O}(w^{4})\,\\
\varphi(w) &=& \frac{\zeta}{6} \, w^{2} + \mathcal{O}(w^{4})\ .
\end{eqnarray}
When written in terms of the variables $\theta$, $\varphi$, and
$w$, the matching equations, (\ref{newt_match1}) --
(\ref{newt_match3}), become
\begin{eqnarray}\label{poly_match1}
\frac{GM}{R} &=& - W \Bigl(\zeta \dot{\theta}(W) +
a_0^2 [1+b_{s}\varphi(W)] \dot{\varphi}(W) \Bigr) \\
\label{poly_match2}
\frac{GQ}{R} &=& a_{0} W \dot{\varphi}(W) \\
\label{poly_match3}
\hbox{and} \quad
\phi_{\infty} &=& \phi_{0} + a_{0} [ \varphi(W)
+ W\dot{\varphi}(W) ] \,,
\end{eqnarray}
where $W = w(R) = R/r_s$ denotes the stellar boundary.
\section{Solutions for weak central coupling}
Most of what is known about the solutions to the equilibrium
equations derived above is based on integrating them numerically,
revealing several surprising features such as the phenomenon of
spontaneous scalarization \cite{damourspsc}. But the regime of
most practical interest is weak coupling, $a_0^2 \ll 1$, and since
interesting phenomena like scalarization are already present in
this limit, it is worth exploiting the simplicity of the
weak-coupling regime at the outset, both to simplify the numerics
required and (in some cases, see below) to allow analytic
solutions to be obtained.
Our goal in this section is to systematically expand in powers of
the scalar coupling at the stellar centre, $a_0^2 = a^2(\phi_0) =
a^2(r = 0) \ll 1$. By comparing these perturbative results with
direct numerical integrations, we show that for small $a_0^2$
their domain of validity typically covers the entire stellar
interior.
Our motivation for pursuing the simplifications introduced by this
expansion is the ease of generalizing to different kinds of scalar
couplings and to different equations of state. However in this
paper we confine our attention to the well-studied qBD case,
$a(\phi) = a_s + b_s \phi$, in order to better compare with known
results.
\subsection{The weak-central-coupling expansion}
The weak central-coupling expansion is clearest in the case of qBD
models, for which the entire coupling function, $a(\phi)$, is
determined by the two parameters $a_0 = a(\phi_0)$ and $b_s$. In
this case the weak central-coupling solutions are obtained by
expanding the differential equations in powers of $a_0^2$. For
Brans-Dicke theory ($b_s = 0$) the coupling is constant and known
to be small, $a_0^2 = a_s^2 \roughly< 1.2 \times 10^{-5}$.
For qBD theories the lowest-order expansion of the equilibrium
equations, \pref{eqssnew1} through \pref{eqssnew3} gives
\begin{eqnarray}\label{eqssnew1p}
\dot{\mu} + \frac{\mu}{2u} - \frac{\varrho(p)}{4} &\simeq&
a_0^2 \left[ \varrho(p) \varphi \, \left( 1
+ \frac{b_{s} \varphi }{ 2} \right) +
u(1-2\mu)\dot{\varphi}^{2} \right] + \mathcal{O}(a_0^4) \\
\label{eqssnew2p}
\dot{p } + \frac{[\varrho(p) + p\,]
(2\mu + u p)}{4u(1-2\mu)} &\simeq& - a_0^2
[\varrho(p) + p\,]
\left[ \frac{p \varphi(1+b_{s} \varphi / 2)}{1-2\mu}
+ \dot{\varphi}(1+u\dot{\varphi}+b_{s}\varphi) \right]
+ \mathcal{O}(a_0^4) \\
\label{eqssnew3p}
\ddot{\varphi} + \frac{(3-4\mu)\dot{\varphi}}{2u(1-2\mu)} &-&
\frac{1}{8u(1-2\mu)}
\Bigl[ (1+b_{s}\varphi)[\varrho(p)-3p\,]
+ 2[\varrho(p)-p \,] u\dot{\varphi} \Bigr] \\
&\simeq& \frac{a_0^2\varphi(1+b_{s}\varphi / 2)}{2u(1-2\mu)}
\Bigl[ (1+b_{s}\varphi)[\varrho(p)-3p\,]
+ 2[\varrho(p)-p \,] u\dot{\varphi} \Bigr]
+ \mathcal{O}(a_0^4) \,,\nonumber
\end{eqnarray}
where dots denote derivatives with respect to $u = 8 \pi G
\rho_{0} A^{4}_{0} \, r^{2}$. Notice that the leading contribution
to the $\varphi$ equation depends on the self-coupling $b_s$, even
if $a_0^2 \to 0$. The boundary conditions are as before: $\mu(0)=
\varphi_0 = 0$, $p (0) = p_0$ and $\dot{\varphi}(0) =
\frac{1}{12} \, (1-3p_0)$ (and so $\dot \varphi(0) > 0$
provided $p_0 < \frac13$).
We seek interior profiles $\mu(u)$, $p (u)$ and $\varphi(u)$
obtained by integrating these equations subject to the series {\em
ans\"atze},
\begin{equation}
\mu(u) = \sum_{i=0}^\infty \mu_{(i)}(u) \, a_0^{2i} \,, \quad
p(u) = \sum_{i=0}^\infty p_{(i)}(u) \, a_0^{2i}
\quad \hbox{and} \quad
\varphi(u) = \sum_{i=0}^\infty \varphi_{(i)}(u) \, a_0^{2i} \,,
\end{equation}
with the leading expressions for $p_0(u)$ and $\mu_0(u)$
agreeing with the results from GR. In particular, because $a_0^2$
is small, the pressure profile decreases monotonically, ensuring
the existence of a solution for $R$ of $p(R) = 0$. Because of the
explicit factor of $a_0$ appearing in the definition $\varphi :=
(\phi - \phi_0)/a_0$, given a solution for $\mu$, $p$ and
$\varphi$ correct to order $a_0^{2k}$, we have a solution for
$\phi$ that is valid to order $a^{2k+1}_{0}$.
Such solutions are obtained explicity for $k=0$ and $k=1$ for incompressible
stars in sections \ref{sec:fo_sol} and \ref{sec:chi_corrections} below.
\subsection{Perturbative relations amongst observables}
\label{sec:pertexp}
This same $a_0^2$ expansion is inherited by the expressions
relating the external physical parameters, $M$, $R$, $Q$ and
$\phi_\infty$, by virtue of the matching conditions at $r = R$.
Although it is difficult to characterize these constraints
analytically in the general case, an expansion in powers of $a_0^2
= a^{2}(\phi_{0})$ allows some progress to be made. To this end
write
\begin{eqnarray} \label{Fnexpns}
U(a_0,b_s,p_0) &\simeq& U_{(0)}(p_0) + U_{(1)}
(b_s, p_0) \, a_0^2 + \mathcal{O}(a_0^4) \nonumber\\
\mathcal{F}(a_0, b_s, p_0) &\simeq& \mathcal{F}_{(0)}(b_s,
p_0)
+ \mathcal{F}_{(1)}(b_s,p_0) \, a_0^2 + \mathcal{O}(a_0^4) \nonumber\\
\mathcal{A}(a_0, b_s, p_0) &\simeq& \mathcal{A}_{(0)}(b_s, p_0)
+ \mathcal{A}_{(1)}(b_s,p_0) \, a_0^2 + \mathcal{O}(a_0^4) \\
s(a_0, b_s, p_0) &\simeq& s_{(0)}(p_0) + s_{(1)}(b_s,p_0) \, a_0^2 + \mathcal{O}(a_0^4) \nonumber\\
\mathcal{M}(a_0, b_s, p_0) &\simeq& \mathcal{M}_{(0)}(p_0)
+ \mathcal{M}_{(1)}(b_s, p_0) \, a_0^2 + \mathcal{O}(a_0^4) \,.\nonumber
\end{eqnarray}
where $s = GM/R$, $\mathcal{F} = (\phi_\infty - \phi_0)/a_0$, $\mathcal{A} = Q/M a_0 = a_{\scriptscriptstyle A}/a_0$ and so on.
We seek two constraints among the four quantities $M$, $Q$, $R$
and $\phi_\infty$, and it is convenient to write the first of
these as a relationship between $M$, $R$ and $\phi_{\infty}$, and
the second as a relationship between $a_{{\scriptscriptstyle A}} = Q/M$, $s = GM/R$
and $a_\infty = a(\phi_{\infty})$. The convenience of this choice
comes from the GR limit, for which the first constraint becomes
the usual $M$-$R$ relation, and the second constraint degenerates
into something vacuous: $0 = 0$.
An important point about these constraints is that the dependence
of observables like $M$, $Q$ and $R$ (or $U$) on $\phi_\infty$ ---
or $a_\infty = a(\phi_\infty)$ --- arises completely through their
dependence on $a_0 = a(\phi_0)$. Obtaining this dependence
therefore requires a relation between the scalar field at the
origin and infinity: $\phi_{0}(\phi_{\infty})$. This is
accomplished by using the function $\mathcal{F}$, whose definition
-- see eq.~(\ref{f}) -- states $\phi_\infty = \phi_0 + a_0 \, \mathcal{F}$,
and so
\begin{eqnarray} \label{infty_0_rel_A}
A(\phi_{\infty}) &=& A(\phi_{0}) \exp \left[ a_0^2
\left( 1 + \frac{b_{s} \mathcal{F}}{2} \right)
\mathcal{F} \right] \,, \\
\label{infty_0_rel_alpha}
a(\phi_{\infty}) &=& a(\phi_{0})
(1+b_{s} \mathcal{F}) \,.
\end{eqnarray}
It is tempting to ask at this point whether we are working too
hard. In particular, since it is $a_\infty$ and not $a_0$ that
directly controls the strengths of interactions that we see, being
asymptotic observers, perhaps we could avoid the exercise of
trading $a_0$ for $a_\infty$ by directly expanding the field
equations in powers of $a_\infty$ rather than $a_0$. The reason we
do not do so --- and indeed the point of expanding in powers of
$a_0$ --- is that the mapping defined by
eq.~\pref{infty_0_rel_alpha} between $a_0$ and $a_\infty$ is in
general not one-to-one. This is the lesson of scalarization, which
relies on $a_\infty = 0$ corresponding to {\em several} choices:
$a_0 = 0$ and $a_0 \ne 0$. It is the option of having a second
choice that allows the star to support a scalar field ($Q \ne 0$)
despite the vanishing of $a_\infty$. It is the fact that
integration of the field equations makes stellar properties
single-valued in $a_0$ that makes this the natural expansion
parameter. The existence of several branches to the function
$a_0(a_\infty)$ means that stellar properties need not also be
analytic in $a_\infty$. We describe the relevance of this to
scalarization in more detail below.
\subsubsection*{The generalized mass-radius relation}
To obtain the leading form of the constraint generalizing the
$M(R)$ relation of GR, set $a_0^2=0$ in equations (\ref{eqssnew1})
-- (\ref{eqssnew2}). The result implies that $\mu_{(0)}$ and
$p_{(0)}$ do not depend on $b_{s}$. Consequently
$U_{(0)}(p_0)$, which is defined by $p_{(0)}(U_{(0)},
p_0) = 0$, also cannot depend on $b_{s}$ --- a fact already
indicated in eqs.~\pref{Fnexpns}. The same is then true for the
compactness,
\begin{equation} \label{GRcompcond}
s(U,\phi_\infty;b_s) = \frac{GM}{R} \simeq s_{(0)}[
U_{(0)}, p_0(U_{(0)})] + \mathcal{O}(a_0^2) \,,
\end{equation}
implying this constraint goes over to the GR limit to leading
order in $a_0^2$, even though the profile for $\varphi(u)$ need
not be trivial (for nonzero $b_s$). Thus is reproduced the usual
$M$ vs $R$ (or $U$) relation of GR.
\FIGURE[ht]{ \epsfig{file=bd.mr.eps,angle=270,width=0.8\hsize}
\\
\epsfig{file=bd.mr.eos.eps,angle=270,width=0.8\hsize}
\caption{$M$ vs $R$ in Brans-Dicke theory for various equations of state
and values of $a_{s}$. The central value, $p_0 =
P_0/\rho_0$ varies along each curve. The starting point
of each curve ($p_0 \to 0$) is at $M=R=0$, and the endpoint of each
curve corresponds to the ultra-relativistic limit where $p_0
\to p_0^{\rm max}$. The values of $\phi_{0}$ are chosen
such that $\phi_{\infty}$ is constant along each curve. The stellar
configurations become unstable after the first turning point where
$dM/dR=0$.
Top: Incompressible stars, for which $p_0^{\rm max}=\infty$.
Notice that the curves with non-zero
scalar coupling have smaller maximum radii and masses.
Bottom: Relativistic polytrope models of neutron stars, as defined
in \cite{damourspsc}, for which $p_0^{\rm max}$ is finite (colour online).
} \label{Fig1} }
Figure \ref{Fig1} illustrates how the generalized mass-radius
relation depends on $a_{s}$ in Brans-Dicke theory ({\em i.e.} $b_s
= 0$) for incompressible stars (discussed in
more detail in the next section) and relativistic polytrope models
of neutron stars.
Each curve traces the
relationship between $M$ and $R$ as $p_0$ is varied,
beginning at $M=R=0$ where $p_0 \to 0$, and terminating at the point where $p_0 \to p_0^{\rm max}$.
For incompressible stars,
$p_0^{\rm max}$ is infinite. In general relativity, $M \propto R^{3}$,
and the maximum $M$ and $R$ that can be supported against gravitational
collapse are attained in the ultra-relativistic limit. However, once the
scalar-matter coupling is turned on, the maximum values of $M$ and
$R$ are attained at a finite value of $p_0$.
As $a_{s}$ increases, the maximum values of $M$ and $R$
decrease.
The equations of state EOS II and EOS A are defined in reference
\cite{damourspsc}. They are relativistic polytropes, with a maximum
central pressure of $p_0^{\rm max}=\gamma-1$, where $\gamma$ is the
polytropic index. Their $M-R$ curves are more complicated than
those of incompressible stars, and turning on a weak scalar-matter coupling
slightly shifts these curves.
Numerically carrying out the stability
analysis described in section \ref{sec:matching}
shows that in all cases, the stellar configurations
become unstable after the first turning point where
$dM/dR=0$. Thus, the scalar field destabilizes ultra-relativistic
incompressible stars.
For non-relativistic systems the matching condition simplifies to
eq.~\pref{newt_match1},
\begin{equation}
s_{(0)} = \mu_{(0)}(U_{(0)};p_0) \ll 1\,,
\end{equation}
and although numerical methods are usually required to follow the
dependence on $p_0(R)$, more explicit statements about the
scalar corrections to this relation are possible for specific
choices of equation of state. The examples of Newtonian polytropes
and incompressible stars are considered more explicitly below.
\subsubsection*{Scalar-coupling constraint}
For a given equation of state the second observable constraint,
eq.~(\ref{aa}), gives an expression for $a_{{\scriptscriptstyle A}} = Q/M$ in terms
of $a_{0}$, $b_{s}$ and $p_0$. Equation
(\ref{infty_0_rel_alpha}) can be used to relate $a(\phi_{\infty})$
with $a(\phi_{0})$, and on expansion yields
\begin{equation}\label{spsc_eqn}
a(\phi_{\infty}) \simeq (1+b_{s}\mathcal{F}_{(0)}) a_{0} + b_{s}
\mathcal{F}_{(1)} a^{3}_{0} + \mathcal{O}(a^{5}_{0}) \,.
\end{equation}
Therefore,
\begin{equation}\label{constraint2_fo}
a_{{\scriptscriptstyle A}} = \frac{Q}{M} = a_0 \, \mathcal{A}(a_0^2, b_s, p_0)
\simeq \frac{a(\phi_{\infty}) \mathcal{A}_{(0)}}
{1+b_{s}\mathcal{F}_{(0)}} + \mathcal{O}(a^{3}_{0}) \,.
\end{equation}
This expression diverges when $1 + b_{s}\mathcal{F}_{(0)} \to 0$. In
this limit, one must include the $\mathcal{O}(a_{0}^{3})$ terms
in order to obtain a meaningful result. This will be described in the
section below.
\FIGURE[ht]{ \epsfig{file=bd.as.eps,angle=270,width=0.9\hsize}
\\
\epsfig{file=bd.constraint.eps,angle=270,width=0.9\hsize}
\caption{Top: A comparison of $\mathcal{A} = a_{{\scriptscriptstyle A}}/a_{s} = Q/(Ma_{s})$
vs $s=GM/R$ for incompressible stars in Brans-Dicke theory, for
various values of $a_{s}$. The curves start at $s=0$, $\mathcal{A}=1$ where
$p_{0} \to 0$, and terminate where $p_0 \to
\infty$. Bottom: The same quantity comparing incompressible stars
with two kinds of neutron-star equations of state (relativistic
polytropes) given in \cite{damourspsc}, using Brans-Dicke theory
to leading order in $a_{s}$.} \label{Fig2} \label{Fig3}}
The dependence of $a_{\scriptscriptstyle A}/a_0$ on $s$ is shown in Figure
\ref{Fig2} for several choices of $a_0 = a_s$ in the special case
of Brans-Dicke theory ($b_s = 0$), using for illustration an
incompressible star equation of state (see \S5, below).
Each curve can be parametrized by $p_0$. The starting point is
at $s=0$, $\mathcal{A}=1$ when $p_0 \to 0$, and the endpoint
is reached in the limit $p_0 \to p_0^{\rm max}$.
These curves show that the small-$a_0$ limit works well in this
case even out to very relativistic stars. Notice that $a_{\scriptscriptstyle A}$ is
in general smaller than $a_0$, with the suppression increasing for
more relativistic stars.
Fig.~\pref{Fig3} also compares the amount of this suppression for
several other choices for the equation of state, indicating that
the suppression for relativistic polytropes (more about which
later) is under-estimated for incompressible stars, although they
all agree in the non-relativistic limit (for which $s_{(0)} \ll
1$), in agreement with intuition.
For incompressible stars $a_{\scriptscriptstyle A}$ eventually becomes negative.
As seen in the insert in Fig.~\pref{Fig3}, this change in sign
takes place before the onset of instability,
so there exist stable incompressible stars with $a_{\scriptscriptstyle A} < 0$.
The scalar interaction between two stars $A$ and $B$ is attractive if
$a_{\scriptscriptstyle A} a_{\scriptscriptstyle B} >0$, and repulsive if $a_{\scriptscriptstyle A} a_{\scriptscriptstyle B} < 0$. If at least one of
$A$, $B$
is an incompressible star, then both cases are possible.
However, negative values of $a_{\scriptscriptstyle A}$ are not seen for the more
realistic equations of state, although these do approach $a_{\scriptscriptstyle A} =
0$ when extremely relativistic. This should be compared with the
corresponding universal result, $Q = 0$, found above for a static,
spherically symmetric black hole.
\subsubsection*{Spontaneous Scalarization}
In the above section, the scalar-coupling constraint was expanded in powers of $a_{0}$, and it was found that at leading order, $a_{{\scriptscriptstyle A}}$ is a single-valued function of $a_{\infty}$. In this section, we take the expansion to next order, and demonstrate that $a_{{\scriptscriptstyle A}}$ becomes a multi-valued function $a_{\infty}$. As a consequence, $a_0$ and $a_{\scriptscriptstyle A}$ can both be nonzero even when $a_\infty$ vanishes. The phenomenon where $Q/M$ is nonzero even though $a_\infty = 0$ is called spontaneous scalarization. To simplify notation, write
\begin{eqnarray}
\label{ainfty_exp}
a_{\infty} = d_{1}a_{0} + d_{2}a_{0}^{3} + \mathcal{O}(a_{0}^{5}) \,,
\\
\label{aa_exp}
a_{{\scriptscriptstyle A}} = e_{1}a_{0} + e_{2}a_{0}^{3} + \mathcal{O}(a_{0}^{5})\,.
\end{eqnarray}
Dropping terms of order $a_{0}^{5}$ and inverting equation (\ref{ainfty_exp}) yields
\begin{equation}
a_{0} = \omega C_{+} + \bar{\omega} C_{-} \,,
\end{equation}
where
\begin{equation}
C_{\pm} = \sqrt[3]
{
\frac{a_{\infty}}{2d_{2}}
\pm
\sqrt{D}
} \,,
\qquad
D=\left(\frac{a_{\infty}}{2d_{2}}\right)^{2}
+\left(\frac{d_{1}}
{3d_{2}}\right)^{3} \,,
\end{equation}
and $\omega = 1, -e^{-i\pi/3}, -e^{i\pi/3}$. Thus,
\begin{equation}
a_{{\scriptscriptstyle A}} = (\omega C_{+} + \bar{\omega}C_{-})e_{1} +
(\omega C_{+} + \bar{\omega}C_{-})^{3}e_{2}
+ \mathcal{O}(a_{0}^{5}) \,.
\end{equation}
If $D>0$, then the $\omega=1$ solution is real, and the other two solutions are complex. Thus, specification of $a_{\infty}$ determines a unique stellar configuration.
If $D<0$, then all solutions are real. Thus, specification of $a_{\infty}$ determines three stellar configurations.
In the limit $a_{\infty} \to 0$, the $\omega=1$ solution vanishes,
and the other two solutions become
\begin{equation}
\label{sc_cond1}
a_{0} = \pm \sqrt{-\frac{d_{1}}{d_{2}}} \,.
\end{equation}
Therefore, whenever the quantity inside the square root is positive, there exist stellar configurations with $a_{\infty}=0$ and
\begin{equation}
a_{{\scriptscriptstyle A}} =
\pm \left\{ \left(-\frac{d_{1}}
{d_{2}}\right)^{1/2} e_{1}
+ \left(-\frac{d_{1}}
{d_{2}}\right)^{3/2} e_{2} \right\}
+ \mathcal{O}(a_{0}^{5})
\,.
\end{equation}
This is precisely the phenomenon of spontaneous scalarization.
The coefficients $d_{i}$ and $e_{i}$ are functions of $b_{s}$ and $p_0$, and they also depend on the equation of state. The solution obtained in section \ref{sec:fo_sol} for incompressible stars can be used to calculate
\begin{eqnarray}
\label{d1_eqn}
d_{1} &=& {\rm HeunG}(\tilde{a},\tilde{q};\tilde{\alpha},\tilde{\beta},
\tilde{\gamma},\tilde{\delta} ; Z)
- \frac{1+p_0}{1+3p_0}\log \left( \frac{1+p_0}{1+3p_0} \right)
{\rm HeunG'}(\tilde{a},\tilde{q};\tilde{\alpha},\tilde{\beta},
\tilde{\gamma},\tilde{\delta} ; Z) \,,
\\
\label{e1_eqn}
e_{1} &=& \frac{1+p_0}{b_{s}(1+3p_0)}
{\rm HeunG'}(\tilde{a},\tilde{q};\tilde{\alpha},\tilde{\beta},\tilde{\gamma},
\tilde{\delta} ; Z) \,,
\end{eqnarray}
where the arguments inside the Heun functions are given in equations (\ref{heunpars1})-(\ref{heunpars2}), and $Z=p_0/(3p_0+1)$. Note that in the limit $p_0 \to 0$, we have $d_{1} \to 1$ and $e_{1} \to 1$. In principle, the next coefficients $d_{2}$ and $e_{2}$ can be calculated using the solution found in section \ref{sec:chi_corrections}. However, the resulting expressions are very complicated.
It follows from equation (\ref{sc_cond1}) that the onset of scalarization implies $d_{1}=0$. Thus, all the points $(b_{s},p_0)$ at which scalarization starts can be found by doing a root search of equation (\ref{d1_eqn}).
In figure (\ref{fig:d1_bneg}),
equation (\ref{d1_eqn}) is plotted versus
$p_0$, for various negative values of
$b_{s}$. In all cases, $d_{1}$ is a convex
function of $p_0$, with one global minimum.
If $b_{s} \in (-4.329,0)$, then $d_{1}$ never crosses
zero, and there is no scalarization.
If $b_{s} \in (-\infty,-4.329)$, then $d_{1}$ has two crossings of zero,
which correspond to the scalarization which has been extensively studied
in the literature.
In figure (\ref{fig:d1_bpos}), equation (\ref{d1_eqn}) is plotted
versus $p_0$, for various positive values of $b_{s}$. In all cases,
$d_{1}$ is an oscillatory function of $p_0$, and there are
multiple regions of scalarization. We have verified numerically that
scalarization does actually occur when $d_{1}<0$. However, application of
of the stability criterion of \S\ref{sec:stability} shows that these
scalarized stars are unstable whenever $b_s > 0$.
\FIGURE[r]{\epsfig{file=d1_bneg.eps,angle=270,width=0.8\hsize}
\label{fig:d1_bneg}
\caption{The coefficient $d_{1}$ plotted versus $p_0$, for constant-density stars, for several choices of $b_{s}<0$. Scalarization becomes possible for
$b_{s} < -4.329$.}
}
\FIGURE[r]{\epsfig{file=d1_bpos.eps,angle=270,width=0.8\hsize}
\label{fig:d1_bpos}
\caption{The coefficient $d_{1}$ plotted versus $p_0$, for constant-density stars, for several choices of $b_{s}>0$. There are multiple regions of
scalarization.}
}
\clearpage
\subsection{Non-relativistic polytropes}
As an example for which the scalar-field dependence of the above
constraints can be more explicitly explored, consider the case of
Newtonian polytropes discussed \S\ref{sec:nonrellim} with equation
of state $P = K \rho^{1+1/\chi}$. In this case for weak central
coupling the scalar field, $\varphi$, and the dimensionless
density, $\theta = (\rho/\rho_0)^{1/\chi}$, can be expanded in
$a_0^2$, $b_s$, as well as the parameter $\zeta = (1 + \chi) K
\rho_0^{1/\chi} = (1 + \chi) P_0/\rho_0 = (1 + \chi) p_0$:
\begin{eqnarray}
\theta &=& \sum_{i} \theta_{(i)} a_0^{2i} = \sum_{i,j}
\theta_{(i,j)} \, a_0^{2i} \, b_{s}^{j} = \sum_{i,j,k}
\theta_{(i,j,k)} \, a_0^{2i} \, b_{s}^{j} \, \zeta^{k}\ , \\
\varphi &=& \sum_{i} \varphi_{(i)} a_0^{2i} = \sum_{i,j}
\varphi_{(i,j)} \, a_0^{2i} \, b_{s}^{j} = \sum_{i,j,k}
\varphi_{(i,j,k)} \, a_0^{2i} \, b_{s}^{j} \, \zeta^{k}\ .
\end{eqnarray}
We show below that $\zeta \ll 1$ for the polytropes of practical
interest, such as white dwarfs and main-sequence stars.
In the limit $a_0^2=0$, eqs.~(\ref{eq:poly_mod1_1}) --
(\ref{eq:poly_mod1_2}) become
\begin{eqnarray} \label{eq:le0_1}
{\theta}'' &=& - \frac{2 {\theta}'}{w} - \theta^{\chi}\ ,
\\ \label{eq:le0_2}
{\varphi}'' &=& - \frac{2 {\varphi}'}{w} + \zeta
\theta^{\chi} [1 + w {\varphi}' + b_{s} \varphi ]\ .
\end{eqnarray}
where for later notational convenience we use primes in this
section to denote differentiation with respect to $w$. Equation
(\ref{eq:le0_1}) is called the Lane-Emden equation, and its
solutions are well-studied because of its important role in the
theory of stellar structure. It can be solved analytically when
$\chi=0,1,5$ \cite{chandbook}. The solutions of the Lane-Emden
equation with initial conditions (\ref{eq:newtpoly_ic1}) are
called Lane-Emden functions, and are denoted by
$\Theta_{\chi}(w)$. Thus,
\begin{equation}
\theta_{(0)}(w) = \Theta_{\chi}(w)\ .
\end{equation}
If we now take $b_{s}=0$ then equation (\ref{eq:le0_2}) can be
solved for $\varphi$ in terms of $\theta$, giving
\begin{eqnarray}
{\varphi'_{(0,0)}} &=& - \frac{\zeta}{w^{2}}
e^{-\zeta (w\Theta_{\chi})'}
\int^w (\hat w^{2}{\Theta}_{\chi}')'
e^{\zeta(\hat w \Theta_{\chi})'}
{\hbox{d}} \hat w\ \\
&=&
-\frac{1}{w} + \frac{1}{w^{2}} e^{-\zeta (w \Theta_{\chi})'}
\int^w e^{\zeta (\hat w \Theta_{\chi})'} {\hbox{d}} \hat w \,.
\end{eqnarray}
Expanding this in powers of $\zeta$ then yields
\begin{equation}
\varphi_{(0,0)} \simeq \zeta(1-\Theta_{\chi}) +
\mathcal{O}(\zeta^{2})\,.
\end{equation}
and so
\begin{equation}
\varphi_{(0,0,0)} = 0
\quad \hbox{and} \quad
\varphi_{(0,0,1)} = 1 - \Theta_{\chi}\ .
\end{equation}
Expanding the matching equations, eqs.~(\ref{poly_match1}) --
(\ref{poly_match3}) in powers of $a_0^2$, $b_{s}$ and $\zeta$
similarly yields
\begin{eqnarray}
\label{poly_match1_pert} \frac{GM}{R} &\simeq& -\zeta W
{\Theta'}_{\chi}(W) + \mathcal{O}(a_0^2) \,,
\\ \label{poly_match2_pert}
\frac{Q}{Ma_{0}} &\simeq& 1 + \mathcal{O}(a_0^2)
+ \mathcal{O}(b_{s}) + \mathcal{O}(\zeta) \,,
\\ \label{poly_match3_pert}
\frac{\phi_{\infty}-\phi_{0}}{a_{0}} &\simeq& \zeta
\Bigl[ 1-\Theta_{\chi}(W) - W {\Theta'}_{\chi}(W) \Bigr]
+ \mathcal{O}(a_0^2) + \mathcal{O}(b_{s})
+ \mathcal{O}(\zeta^{2}) \,.
\end{eqnarray}
Notice in particular that eq.~(\ref{poly_match2_pert}) implies
that
\begin{equation}
a_{A} \simeq a_{0} \Bigl[ 1 + \mathcal{O}(a^{2}_{0})+
\mathcal{O}(b_{s}) + \mathcal{O}(\zeta) \Bigr] \,,
\end{equation}
which is consistent with the limit $a_{A} \to a(\phi_{\infty})$
for weakly self-gravitating stars. Similarly, equation
(\ref{poly_match1_pert}) can be re-written as
\begin{eqnarray} \label{poly_mass}
GM &=& -\zeta r_s W^2 \Theta'_\chi + \mathcal{O}(a_0^2) \nonumber\\
&=& - W^{2} {\Theta}'_{\chi}(W) \; \left[
\frac{\rho_{0}^{(3-\chi)/2\chi}}{A^{2}_{0}}
\sqrt{\frac{K^{3}(\chi+1)^{3}}{4 \pi G}} \right]
+ \mathcal{O}(a_0^2) \,.
\end{eqnarray}
which is independent of $\rho_{0}$, as advertised, when $\chi = 3$.
To get an idea about the validity of the $\zeta$ expansion, we
close this section by estimating its size for the examples of
white dwarfs and main-sequence polytropes.
\subsubsection*{White dwarfs}
A white dwarf can be modeled by a degenerate fermion gas which
satisfies in the ultra-relativistic limit the polytropic equation
of state, eq.~(\ref{eq:polytrope}), with
\begin{equation}
K = \frac{3^{1/3}\pi^{2/3}}{4} \left( \frac{Y_{e}}{m_{b}}
\right)^{4/3}
\quad \hbox{and} \quad
\chi=3 \,.
\end{equation}
Here $Y_{e}$ is the number of electrons per nucleon and $m_{b}$ is
the average nucleon mass \cite{astrophys}. Although the fermion
gas is highly relativistic, the gravitational field generated by
it is not strong, allowing use of the non-relativistic expressions
developed above.
The typical central densities of white dwarfs are $\rho_{0} \sim
(10^{7} \ldots 10^{14} ) \; {\rm kg}\, {\rm m}^{-3}$
\cite{astrophys}, corresponding to $10^{-5} \roughly< \zeta \roughly<
10^{-2}$ for $Y_{e} = \frac12$. This shows that a perturbative
expansion in $\zeta$ is likely a good approximation for white
dwarfs.
Since the ultra-relativistic limit of the fermion gas is used, the
mass calculated below using the matching conditions --- {\em i.e.}
equation (\ref{poly_mass}) --- is greater than the actual mass of
the white dwarf. It is instead to be regarded as the Chandrasekhar
limit: an upper bound on the mass of white dwarfs. Because $\chi =
3$ its value turns out to be independent of $\rho_{0}$, and for
$Y_{e} = \frac{1}{2}$, its value is approximately $1.4
M_{\odot}$ \cite{chandmass}.
\subsubsection*{Main-sequence models}
In the Eddington stellar model, a star is regarded as an ideal gas
whose energy is transported by radiation, and it is assumed that
the gas makes up a fixed fraction of the total pressure, $\beta :=
{P_{\rm gas}}/{P} = {\rm const}$. Here $P_{\rm gas}$ is the
pressure of the ideal gas, and the total pressure is $P = P_{\rm
gas} + P_{\rm rad}$, where $P_{\rm rad}$ is the radiation
pressure. The Eddington model leads to a polytropic equation of
state with
\begin{equation}
K = \left[ \frac{45}{\pi^2 k_{\scriptscriptstyle B}^4} \left(\frac{R_g}{\mu}\right)^{4}
\frac{1-\beta}{\beta^{4}}\right]^{1/3} \quad \hbox{and}
\quad \chi = 3 \,,
\end{equation}
where $R_g$ is the universal gas constant, $\mu$ is the molar mass
of the ideal gas and $k_{\scriptscriptstyle B}$ is Boltzmann's constant
\cite{astrophys2}. Main-sequence stars can be approximately
described by the Eddington standard model, even though convection
also plays a role in heat transfer for more realistic models.
For a more accurate single-polytrope model of the Sun, $\chi=3.35$
and $\zeta \sim 10^{-5}$ \cite{solarmodel}, so the perturbative
expansion in $\zeta$ remains a good approximation.
\section{Incompressible stars}
We now specialize the discussions of the previous sections to the
special case of an incompressible star, for which the stellar
density, $\rho$, is constant. Since constant density can only be
consistent with the pressure gradients required for hydrostatic
equilibrium if $p \ne p(\rho)$, we no longer impose this
kind of equation of state. (It is not necessary in any case, since
the closure of the field equations is now accomplished by the
incompressibility condition, $\rho(r) \equiv \rho_0$.)
The purpose of this exercise is to have a toy example for which
all of the above manipulations can be simply carried through
explicitly in closed form. Performing the same exercise for GR
provides an interesting example that displays the main features of
relativistic structure, including the existence of a maximum
compactness for a star, $s_{(0)} \le \frac49$, that can be
supported against gravitational collapse. The maximum that is
found for incompressible stars turns out to provide an upper bound
to the maximum compactness that can be achieved with other
equations of state.
Several earlier works have numerically investigated
incompressible stars in scalar-tensor gravity
\cite{saakmnats,hillheintz,avakyan},
but our quasi-analytical treatment of these stars is new.
\subsection{Incompressible stars with quasi-Brans/Dicke scalars}
\label{sec:constrho}
We cut right to the chase and specialize directly to qBD scalars,
for which $a(\phi) = a_s + b_s \phi$, since this case is broad
enough to be of wide interest, but restricted enough to be
explored in detail.
\subsubsection*{Field equations}
Taking the equation of state to be $\rho = \rho_0$, or
$\varrho(p ) = 1$, equations (\ref{eqssnew1}) --
(\ref{eqssnew3}) become
\begin{eqnarray}\label{constrho1}
\dot{\mu} &=& -\frac{\mu}{2u} + \frac14 \, e^{4a_0^2 \varphi(1+b_{s}
\varphi / 2)} + a_0^2 \, u(1-2\mu)\dot{\varphi}^{2}
\\ \label{constrho2}
\dot{p } &=& - (1+p )\left[
\frac{\mu}{2u(1-2\mu)} + \frac{p }{4(1-2\mu)} \,
e^{4a_0^2 \varphi(1+b_{s} \varphi / 2)} +a_0^2 \,
\dot{\varphi}(1+u\dot{\varphi}+b_{s}\varphi) \right]
\\ \label{constrho3}
\ddot{\varphi} &=& -\frac{(3-4\mu)\dot{\varphi}}{2u(1-2\mu)} +
\frac{e^{4a_0^2\varphi(1+b_{s}\varphi / 2)}} {8u(1-2\mu)} \Bigl[
(1+b_{s}\varphi)(1-3p ) + 2(1-p )u\dot{\varphi}\Bigr]\,.
\end{eqnarray}
Similarly, the function $f(p )$ defined in eq.~(\ref{f_defn})
becomes
\begin{equation}
f(p ) = \ln \left( \frac{1+p }{1+p_0}\right) \,,
\end{equation}
and so the baryon number density, $n(r)$, computed from
eq.~(\ref{n_eqn}) is also constant, $n=n_{0}$. The function
$\mathcal{M}$ defined in eq.~\pref{barmass_quadmod} similarly
becomes
\begin{eqnarray}
\mathcal{M} &=& \int_{0}^{U} {\hbox{d}} u \sqrt{\frac{u}{1-2\mu}}
\exp\left[ 3a_0^2 \varphi \left(1+ \frac{b_{s}\varphi}{2}
\right) \right] \,.
\end{eqnarray}
\subsection{Perturbative solutions: leading order}\label{sec:fo_sol}
Because the GR problem can be explicitly integrated for
incompressible stars, equations (\ref{constrho1}) --
(\ref{constrho3}) can be solved analytically when $a_0^2 = 0$. The
zeroth-order profiles, $\mu_{(0)}$ and $p_{(0)}$, are given
by \cite{GRincomp, carroll}
\begin{eqnarray}
\mu_{(0)}(u) &=& \frac u6 \,, \\
p_{(0)}(u) &=& \frac{(1+3p_0)\sqrt{1-u/3}-(1+p_0)}
{3(1+p_0) - (1+3p_0)\sqrt{1-u/3}} \,,
\end{eqnarray}
as a function of the central pressure $p_0$. We return to
computing the profile, $\varphi(u)$, below.
At leading order the stellar radius is determined as the zero of
$p_{(0)}$, which vanishes at $u = U_{(0)}$ where
\begin{equation}
U_{(0)}= \frac{12p_0(1+2p_0)}{(1+3p_0)^{2}} \,,
\end{equation}
corresponding to the radius $r = R_{(0)}$ with
\begin{equation}\label{radius}
R_{(0)} = \frac{2}{A^{2}_{0}(1+3p_0)}
\sqrt{\frac{3p_0(1+2p_0)}{8\pi G \rho_{0}}} \,.
\end{equation}
The leading components of the functions relevant to matching to
the exterior solutions --- {\em i.e.} $\mathcal{F}$, $\mathcal{A}$, $s$ and
$\mathcal{M}$ --- are
\begin{eqnarray} \label{match0_constrho}
\mathcal{F}_{(0)} &=& \varphi_{\star(0)}
-12\left(\frac{1+p_0}{1+3p_0}\right)^{2}
\ln \left( \frac{1+p_0}{1+3p_0} \right)
\dot{\varphi}_{\star(0)} \,, \\ \label{match1_constrho}
\mathcal{A}_{(0)} &=& 12\left( \frac{1+p_0}{1+3p_0}\right)^{2}
\dot{\varphi}_{\star(0)}\,,
\\ \label{match2_constrho}
s_{(0)} &=& \frac{2p_0(1+2p_0)}{(1+3p_0)^{2}}\,, \\
\mathcal{M}_{(0)} &=& 3\sqrt{3}
\left\{ \arccos \left( \frac{1+p_0}{1+3p_0}\right)
- 2 \frac{(1+p_0) \sqrt{p_0(1+2p_0)}}{(1+3p_0)^{2}} \right\} \,,
\end{eqnarray}
where, as before, the subscript `$\star$' denotes evaluation at $u
= U$, so $\varphi_{\star(0)} := \varphi_{(0)}(U_{(0)})$.
Equation (\ref{match2_constrho}) implies that the compactness is
an increasing function of $p_0$, which vanishes when
$p_0=0$ and asymptotes to $\frac 49$ as $p_0 \to \infty$.
Thus we reproduce the GR result $0 \leq s_{(0)} \leq \frac49$ for
constant-density stars. This prediction gets modified once
$O(a_0^2)$ corrections are included, however, as is discussed in
detail in the next section.
The mass-radius constraint, eq.~(\ref{GRcompcond}), in this case
becomes
\begin{equation}
s = \frac{GM}{R} = \frac{4\pi G\rho_{0}}{3} A^{4}_{\infty} R^{2}
+ \mathcal{O}(a^{2}_{0}) \,,
\end{equation}
which states $M \propto R^3$, as might be expected for constant
density. This behaviour is seen explicitly in the $a_s = 0$ curve
in Fig.~\pref{Fig1}.
The scalar-coupling constraint, eq.~(\ref{constraint2_fo}),
similarly becomes
\begin{eqnarray} \label{qm_constraint_2_alt}
a_{{\scriptscriptstyle A}} &=&
\frac{12 a_{\infty}(1-2s)\dot{\varphi}_{\star(0)}}
{1+b_{s} \varphi_{\star(0)} -
6b_{s}(1-2s) \ln(1-2s) \dot{\varphi}_{\star(0)}} +
\mathcal{O}(a^{3}_{0})\,,
\end{eqnarray}
where
\begin{equation}
p_0 = \frac{1-3s - \sqrt{1-2s}}{9s-4}
+ \mathcal{O}(a^{2}_{0})
\end{equation}
should be substituted into $\varphi_{\star{(0)}}$ and $\dot
\varphi_{\star{(0)}}$ on the right-hand side. Since further
progress requires knowing the scalar profile, we next turn to
solving its field equation.
\subsubsection*{Scalar profile in Brans-Dicke theory
(when $b_{s}=0$)}\label{sec:solb0}
If $b_{s}=0$, then the model reduces to Brans-Dicke theory, and
$a(\phi) \equiv a_{0} = a_s$. Equation (\ref{constrho3}) with
$a_0^2 = 0$ becomes a first-order linear differential equation for
$\dot{\varphi}$, which can be solved analytically:
\begin{equation}\label{phidot_beta0}
\dot{\varphi}_{(0)}(u) = \frac{ 27(1+p_0)
\left(\frac{\arcsin\sqrt{u/3}}{\sqrt{u/3}} - \sqrt{1-u/3}\right)
-4(1+3p_0)u}
{12u\sqrt{1- u/3}(3(1+p_0) - (1+3p_0)\sqrt{1-u/3})}\,.
\end{equation}
Integrating this expression once more with respect to $u$ then
gives
\begin{eqnarray} \label{phi_beta0}
\varphi_{(0)} &=& \frac{1}{8(2+3p_0)}\biggl[
(9p_0+5)(3p_0-1) \ln
\left( 3(1+p_0)-(1+3p_0)\sqrt{1-u/3}\right)
\nonumber \\
&&\hphantom{\frac{1}{8(2+3p_0)}\biggl[}
-9(1+p_0)\left( 1+3p_0 + 3(1+p_0)\sqrt{1-u/3}\right)
\frac{\arcsin{\sqrt{u/3}}}{\sqrt{u/3}} \biggr]
\nonumber \\
&& + \frac{9(1+3p_0)^{2}(1+p_0)}{16(2
+3p_0)^{3/2}} \biggl[
{\rm Li}_{2} (\lambda_{-}) - {\rm Li}_{2} (\lambda_{+})
+ i\ln \left( \frac{1-\lambda_{-}}{1-\lambda_{+}}\right)
\arcsin{\sqrt{u/3}}\biggr]\,,
\end{eqnarray}
where
\begin{equation}
\lambda_{\pm} := \frac{(1+3p_0)(\sqrt{1-u/3}+i\sqrt{u/3})}
{3(1+p_0) \pm 2\sqrt{2+3p_0}}
\equiv |\lambda_{\pm}| e^{i \arcsin \sqrt{u/3}}
\,,
\end{equation}
and
\begin{equation}
{\rm Li}_{2}(z) = \sum_{k=1}^{\infty}\frac{z^{k}}{k^{2}}
\end{equation}
is the dilogarithm function.
By using the identity \cite{gradshteyn}
\begin{equation}
\sum_{k=1}^{\infty}p^{k} \sin (kx) =
\frac{p \sin x}{1 - 2p \cos x + p^{2}} \,,
\end{equation}
it can be shown that the imaginary part of (\ref{phi_beta0}) is
constant, and can thus be absorbed into an integration constant.
The final normalized and manifestly-real expression for
$\varphi_{(0)}$ is then
\begin{eqnarray}
\label{phi_beta0_final}
\varphi_{(0)}(u) &=& \frac{1}{8(2+3p_0)}\biggl[
(9p_0+5)(3p_0-1) \log
\left( \frac{3}{2}(1+p_0)-\frac{1}{2}(1+3p_0)
\sqrt{1-u/3}\right)
\nonumber \\
&&\hphantom{\frac{1}{8(2+3p_0)}\biggl[}
-9(1+p_0)\left( 1+3p_0 + 3(1+p_0)\sqrt{1-u/3}\right)
\frac{\arcsin{\sqrt{u/3}}}{\sqrt{u/3}} \biggr]
\nonumber \\
&& + \frac{9(1+3p_0)^{2}(1+p_0)}{16(2+3
p_0)^{3/2}} \biggl[
\arctan \left(
\frac{2\sqrt{(2+3p_0)u/3}}{1+3p_0
-3(1+p_0)\sqrt{1-u/3}}
\right)
\arcsin \sqrt{u/3}
\nonumber
\\
&&\hphantom{+ \frac{9(1+3p_0)^{2}(1+
p_0)}{16(2+3p_0)^{3/2}} \biggl[}
+\Re \left[ {\rm Li}_{2} (\lambda_{-}) -
{\rm Li}_{2} (|\lambda_{-}|)
- {\rm Li}_{2} (\lambda_{+}) + {\rm Li}_{2} (|\lambda_{+}|) \right]
\biggr]
\nonumber
\\
&&+\frac{9}{4}(1+p_0) \,.
\end{eqnarray}
If $p_0 > \frac{1}{\sqrt{3}}$, then the inverse tangent in the
above expression changes branch at the critical value
\begin{equation}
u_{\rm crit} = \frac{4(2+3p_0)}{3(1+p_0)^{2}} < U_{(0)}\,,
\end{equation}
so that $\varphi_{(0)}$ is a continuous function of $u$.
Substituting equation (\ref{phidot_beta0}) into (\ref{match1_constrho})
yields
\begin{equation}
\mathcal{A}_{(0)} = \frac{9(1+p_0) (1+3p_0)^{2}}{16 [p_0(1+2p_0)]^{3/2}}
\arccos \left( \frac{1+p_0}{1+3p_0} \right)
-\frac{41p_0^{2} + 34p_0 + 9}{8p_0(1+2p_0)} \,,
\end{equation}
so the scalar-coupling constraint,
eq.~(\ref{qm_constraint_2_alt}), can be explicitly evaluated,
\begin{eqnarray}\label{bd_constraint2}
a_{{\scriptscriptstyle A}} &=&
a_{0} \left(
\frac{5}{2} - \frac{9}{4s}
- \frac{9\sqrt{1-2s}
[27s-14-9\sqrt{1-2s}(2-3s)]}
{8[s(5-9s+3\sqrt{1-2s})]^{3/2}} \arccos \sqrt{1-2s}
\right)
\nonumber
\\
&&
+ \mathcal{O}(a_{0}^{3})
\\
&=& a_{0}\left(1 - \frac{6}{5}s + \mathcal{O}(s^{2})\right)
+ \mathcal{O}(a_{0}^{3})
\,.
\end{eqnarray}
This confirms that although $a_{\scriptscriptstyle A} \to a_0$ in the
non-relativistic limit $s \to 0$ --- consistent with
eq.~(\ref{limits1}) --- it is in general depressed relative to
$a_0$ for relativistic systems, even when additional powers of
$a_0^2$ are neglected.
Notice that in the opposite limit we have
\begin{eqnarray}
\lim_{s \to 4/9} a_{{\scriptscriptstyle A}} &=&
a_{0} \left( \frac{81}{64}\sqrt{2} \arccos ( 1/3 ) -
\frac{41}{16} \right) + \mathcal{O}(a_{0}^{3})
\nonumber
\\ & \simeq & -0.359 a_{0} + \mathcal{O}(a_{0}^{3}) \,.
\end{eqnarray}
Equation (\ref{bd_constraint2}) is plotted in Figure \ref{Fig3},
showing that for incompressible stars $a_{{\scriptscriptstyle A}}$ passes through
zero, changing sign at $s \sim 0.398$. This is compared in the
same figure to the corresponding curves for neutron stars modeled
by relativistic polytropes, using the equations of state EOS A and
EOS II defined in \cite{damourspsc}. For small $s$ all three
curves agree reasonably well, but differ for large $s$. For large
$s$ the neutron star curves significantly deviate from the
constant-density curve, as might be expected given that
relativistic polytropes have a maximum value $p_0^{\max} =
\gamma -1$, where $\gamma$ is the polytropic index, while
constant-density stars have no such maximum value for $p_0$.
Substituting equations (\ref{phi_beta0_final}) and
(\ref{phidot_beta0}) into (\ref{match0_constrho}) yields
\begin{eqnarray}\label{f_beta0}
\mathcal{F}_{(0)} &=& \frac{9}{4}(1+p_0)
- \frac{41p_0^{2} + 34 p_0 + 9}{8 p_0
(1+2p_0)} \log(1+3p_0)
\nonumber \\
&& + \frac{9(1+p_0)(1+3p_0
+3 p_0^{2})}{4(2+3p_0)\sqrt{p_0(1+2p_0)}}
\left( \frac{1+p_0}{\sqrt{p_0(1+2p_0)}} \log(1+p_0) -
\arccos \left(\frac{1+p_0}{1+3p_0} \right)\right)
\nonumber \\
&& - \frac{9(1+3p_0)^{2}(1+p_0)}{16(2+3p_0)^{3/2}} \biggl\{
\arccos \left(\frac{1+p_0}{1+3p_0}\right) \left[
\left(\frac{2+3p_0}{p_0(1+2p_0)}\right)^{3/2}
\log \left( \frac{1+p_0}{1+3p_0} \right) \right.
\nonumber \\
&& \left.\hphantom{- \frac{9(1+3p_0)^{2}(1+p_0)}{16
(2+3p_0)^{3/2}} \biggl\{}
\hphantom{\arccos \left(\frac{1+p_0}{1+3p_0}\right) \biggl[}
+ \widetilde{\arctan} \left( \frac{2\sqrt{p_0(1+
2p_0)(2+3p_0)}}{1-3p_0^{2}}
\right)\right]
\nonumber\\
&& \hphantom{- \frac{9(1+3p_0)^{2}(1+p_0)}{16(2+
3p_0)^{3/2}} \biggl\{}
-\Re [ {\rm Li}_{2} (\Lambda_{-}) - {\rm Li}_{2} (|\lambda_{-}|)
- {\rm Li}_{2} (\Lambda_{+}) + {\rm Li}_{2} (|\lambda_{+}|) ]
\biggr\} \,,
\end{eqnarray}
where
\begin{equation}
\Lambda_{\pm} = \frac{1+p_0 + 2i \sqrt{p_0(1+2p_0)}}
{3(1+p_0) \pm 2 \sqrt{2+3p_0}}
\end{equation}
is the value of $\lambda_{\pm}$ when $u=U_{(0)}$, and
\begin{equation}
\widetilde{\arctan}\, X =
\begin{cases}
\arctan \, X - \pi & \text{if } 0 < p_0 < \frac{1}{\sqrt{3}}
\,,
\\
\arctan \, X & \text{if } p_0 > \frac{1}{\sqrt{3}} \,.
\end{cases}
\end{equation}
Equation (\ref{f_beta0}) is plotted in Figure \ref{Fig4}, and is
compared to the corresponding curves for neutron stars. Again, the
curves are close for small $s$, and diverge for large $s$.
$\mathcal{F}_{(0)}(s)$ is positive and increasing on the interval
$0 < s < 0.25$; positive and decreasing when $0.25 < s < 0.36$;
and negative and decreasing for $0.36 < s < \frac49$. The maximum
value, attained at $s \sim 0.25$, is $\mathcal{F}_{(0),{\rm max}}
\sim 0.27$. As $s \to \frac49$, $\mathcal{F}_{(0)}$ tends to
$-\infty$.
\FIGURE[ht]{ \epsfig{file=bd.f.eps,angle=270,width=0.9\hsize}
\caption{$\mathcal{F}=(\phi_{\infty}-\phi_{0})/a_{0}$ vs $s=GM/R$
for various stars in Brans-Dicke theory, in the limit $a_{0} \to
0$.} \label{Fig4} }
\subsubsection*{Solution for $b_{s} \neq 0$}\label{sec:solbn0}
Next suppose $b_{s} \neq 0$. To calculate $\varphi_{(0)}$, change
variables from $(\varphi_{(0)},u)$ to $(\psi, z)$ with
\begin{equation} \label{chvar_bn0}
\psi = 1+b_{s}\varphi_{(0)} \,,
\qquad z = \frac12 \left( 1- \sqrt{1- \frac{u}{3}} \right) \,.
\end{equation}
Initial conditions for $\psi(z)$ are then
\begin{equation}\label{heun_ic}
\psi(0)=1\,,\qquad \frac{
{\hbox{d}} \psi}{{\hbox{d}} z}\biggr|_{z=0} =
b_{s}(1-3p_0)\,.
\end{equation}
Equation (\ref{constrho3}) with $a_0^2=0$ becomes
\begin{equation}\label{heuneq}
\frac{{\hbox{d}}^{2}\psi}{{\hbox{d}} z^{2}} + \left( \frac{\tilde{\gamma}}{z} +
\frac{\tilde{\delta}}{z-1} + \frac{\tilde{\epsilon}}{z-\tilde{a}}
\right) \frac{{\hbox{d}}\psi}{{\hbox{d}} z} + \frac{\tilde{\alpha}
\tilde{\beta} \, z -\tilde{q}}{z(z-1)(z-\tilde{a})} \psi = 0\,,
\end{equation}
where
\begin{equation} \label{heunpars1}
\tilde{a} = -\frac{1}{1+3p_0}\,,\qquad
\tilde{q} = \frac{3b_{s}}{2} \left( \frac{3p_0 -1}{3p_0 +
1} \right) \,,
\qquad
\tilde{\gamma} = \tilde{\delta} =
\textstyle{\frac{3}{2}}\,,\qquad \tilde{\epsilon} = 1\,,
\end{equation}
\begin{equation} \label{heunpars2}
\tilde{\alpha} = \frac{3}{2} \left( 1- \sqrt{1-
\frac{8b_{s}}{3}} \right) \,,\qquad
\tilde{\beta} = \frac{3}{2} \left( 1+
\sqrt{1- \frac{8b_{s}}{3}} \right)\,,
\end{equation}
and
\begin{equation}
\tilde{\gamma} + \tilde{\delta} + \tilde{\epsilon}
= \tilde{\alpha} + \tilde{\beta} + 1\,.
\end{equation}
Equation (\ref{heuneq}) is called Heun's equation \cite{heun}, and
is a linear second-order differential equation with singularities
at $z=0,1,\tilde{a},\infty$. It is a natural generalization of the
hypergeometric equation to the situation having four regular
singular points. The solution which satisfies initial conditions
(\ref{heun_ic}) is the local Frobenius solution about $z=0$ with
exponent $0$, and is given by the power series
\begin{equation}\label{heunseries}
\psi(z) = {\rm HeunG}(\tilde{a},\tilde{q};
\tilde{\alpha},\tilde{\beta},
\tilde{\gamma},\tilde{\delta};z) =
\sum_{r=0}^{\infty} c_{r}z^{r}\,,
\end{equation}
where the first two coefficients are given by
\begin{equation} \label{recrel1}
c_{0}=1\,,\qquad
c_{1}=\frac{\tilde{q}}{\tilde{a}\tilde{\gamma}}\,,
\end{equation}
and the higher coefficients are found by solving the three-term
recurrence relation
\begin{eqnarray} \label{recrel2}
&& (r-1+\tilde{\alpha})(r-1+\tilde{\beta})
\; c_{r-1} \nonumber\\
&& \qquad\qquad - [r(r-1+\tilde{\gamma})(1+\tilde{a})
+r(\tilde{a}\tilde{\delta}
+\tilde{\epsilon})+\tilde{q}] \; c_{r} \nonumber\\
&& \qquad\qquad\qquad\qquad
+ \tilde{a}(r+1)(r+\tilde{\gamma}) \; c_{r+1}=0\,.
\end{eqnarray}
In terms of $b_{s}$ and $p_0$, the recursion relation for the
coefficients $c_{r}$ become
\begin{equation}
c_{0} = 1\,,\qquad
c_{1} = b_{s}(1-3p_0)\,,
\end{equation}
\begin{eqnarray}
\label{recrel_new}
&& 2(1+3p_0)(r^{2}+r-2+6b_{s}) \; c_{r-1}
\nonumber\\
&& \qquad\qquad -\left[ r(6p_0 r + 9p_0
-1)+3b_{s}(3p_0-1)\right] \; c_{r}
\nonumber\\
&& \qquad\qquad\qquad\qquad
-(r+1)(2r+3) \; c_{r+1}=0\,.
\end{eqnarray}
This implies the coefficients $c_{r}$ of the power series
(\ref{heunseries}) can be written explicitly as a polynomial of
degree $r$ in $b_s$,
\begin{equation}
c_{r} = \sum_{i=0}^{r} a_{i}^{(r)}b_{s}^{i}\,,
\end{equation}
where $a_{i}^{(r)}$ is itself a polynomial in $p_0$ of degree
$r$.
The solutions for $a_i^r$ and $c_r$ are found explicitly in the
Appendix, where it is also shown that the coefficient of the
largest power of $b_s$ has a particularly simple form:
\begin{equation}\label{coef11}
a_{r}^{(r)} = \frac{[6(1-3p_0)]^{r}}{(2r+1)!}\,.
\end{equation}
Because $b_s$ is relatively poorly constrained, it can be larger
than unity so far as phenomenology is concerned. In this case
eq.~\pref{coef11} can be used to obtain an approximation for
$\varphi(r)$ for large $b_s$. This gives (see Appendix for
details)
\begin{eqnarray}
\label{g0} \mathcal{A}_{(0)} &=& \frac{1+p_0}{p_0} \left\{
\frac{1}{2b_{s}} \left( \cosh \sqrt{T} - \frac{\sinh
\sqrt{T}}{\sqrt{T}}\right) + \sum_{k=1}^{\infty}\sum_{j=0}^{2k-1}
\frac{P_{k,j}(p_0)f_{k,j+1}(T)}
{b_{s}^{k+1}[6(1-3p_0)]^{2k}} \right\} \,,
\\
\label{f0} \mathcal{F}_{(0)} &=& \frac{1}{b_{s}} \left( \left[ 1 +
\frac{1+p_0}{2p_0}L \right] \frac{\sinh\sqrt{T}}{\sqrt{T}}
- \frac{1+p_0}{2p_0}L \cosh \sqrt{T} - 1\right)
\nonumber\\
&& + \sum_{k=1}^{\infty}\sum_{j=0}^{2k-1} \left( f_{k,j}(T) -
\frac{1+p_0}{p_0}Lf_{k,j+1}(T)\right)
\frac{P_{k,j}(p_0)}{b_{s}^{k+1}[6(1-3p_0)]^{2k}} \,,
\end{eqnarray}
where $T=6b_{s}p_0(1-3p_0)/(1+3p_0)$ and
$L=\log(1+p_0)-\log(1+3p_0)$. If $b_{s}>0$, then $T \leq
(6-4\sqrt{2})b_{s} \sim 0.34 b_{s}$. If $b_{s}<0$, then $T \geq
-(6-4\sqrt{2})|b_{s}| \sim -0.34 |b_{s}|$. The above expressions
(\ref{g0}) and (\ref{f0}) can be used to calculate the second
constraint (\ref{constraint2_fo}).
\subsubsection*{Compactness vs central density}\label{sec:compactness}
\FIGURE[ht]{ \epsfig{file=bd.comp.eps,angle=270,width=0.9\hsize}
\caption{The compactness $s=GM/R$ plotted versus $p_0 =
P_{0}/\rho_{0}$ for constant-density stars in Brans-Dicke theory,
for various values of $a_0^2$.} \label{Fig5} }
Equation (\ref{match2_constrho}) describes how the compactness
depends on $p_0$ in GR. In order to find how scalar-matter
couplings modify this behaviour, it is necessary to solve the
equations of stellar structure to first order in $a_0^2$, and
calculate $s_{(1)}$.
Figure \ref{Fig5} plots the compactness vs $p_0$ in
Brans-Dicke theory, for various values of the Brans-Dicke coupling
$a_{0} = a_s$. Notice that the compactness eventually stops
growing with $p_0$, approaching instead an asymptotic value as
$p_0 \to \infty$. In
GR, this asymptotic value is $GM/R=4/9$, which is the maximum allowed
by Buchdahl's theorem. As $a_{0}$ increases, this asymptotic value
decreases. This is consistent
with the results of \cite{hillheintz}.
\subsection{Perturbative solutions: next-to-leading corrections}
\label{sec:chi_corrections}
In this section, $\mathcal{O}(a_0^2)$ corrections are calculated.
The defining equation for $U$ is
$p (U)=0$. Expanding it in powers of $a_0^2$ yields
\begin{equation}
U_{(1)} = \frac{12(1+p_0)^{2}}{(1+3p_0)^{2}} \;
p_{(1)}(U_{(0)}) \,.
\end{equation}
Solving equations (\ref{constrho1}) -- (\ref{constrho3})
perturbatively in $a_0^2$ yields
\begin{eqnarray}
\label{mu_corrn} \mu_{(1)}(u) &=& \frac{1}{\sqrt{u}}\int_{0}^{u}
{\hbox{d}} \hat u \sqrt{\hat u} \left[ \varphi_{(0)} \left(
1+\frac{b_{s} \varphi_{(0)}}{2} \right)
+\hat u(1- \hat u/3)(\dot{\varphi}_{(0)})^{2}\right] \,, \\
\label{pi_corrn} p_{1}(u) &=&
\frac{2(1+p_0)\varphi_{(0)}
\left( 1+ {b_{s}\varphi_{(0)}}/{2} \right)}
{(1+3p_0)\sqrt{1-u/3}-3(1+p_0)} \nonumber \\
&&+ \frac{2(1+p_0)\mu_{(1)}(u)}{\sqrt{1-u/3}} \cdot
\frac{(1+p_0)(3-2u)\sqrt{1-u/3}-(1+3p_0)}
{((1+3p_0)\sqrt{1-u/3}-3(1+p_0))^{2}}
\nonumber \\
&&+ \frac{2(1+p_0)\sqrt{1-u/3}}{6((1+3p_0)
\sqrt{1-u/3}-3(1+p_0))^{2}}
\int_{0}^{u} \frac{{\hbox{d}} \hat u}{(1- \hat u/3)^{3/2}} \;
J(\hat u) \,,
\end{eqnarray}
where the function $J(u)$ appearing in equation (\ref{pi_corrn})
is given by
\begin{eqnarray}
J(u) &=& 2 \Bigl[ 6(1+3p_0) \sqrt{1-u/3}
-(1+p_0)(2u^{2}-9u+18) \Bigr]
u(1-u/3)(\dot{\varphi}_{(0)})^{2}
\nonumber \\
&&- (1+p_0) (4u^{2}-18u+9)
\varphi_{(0)} \left( 1 + \frac{b_{s}
\varphi_{(0)}}{2} \right) \,.
\end{eqnarray}
The perturbation to the scalar profile is similarly
\begin{equation}
\varphi_{(1)} = \Phi_{1}\psi+ \Phi_{2} \tilde{\psi} \,.
\label{phi_corrn}
\end{equation}
where the functions $\psi$ and $\tilde{\psi}$ are local Frobenius
solutions of equation (\ref{heuneq}) (with parameters
(\ref{heunpars1}) -- (\ref{heunpars2})) about $z=0$ with exponents
$0$ and $-\frac{1}{2}$, respectively. They are given by
\begin{eqnarray}
\psi
&=& {\rm HeunG} \biggl( \frac{-1}{1+3p_0} , \frac{3}{2} b_{s}
\cdot \frac{3p_0-1}{3p_0+1} ; \nonumber \\
&&\hphantom{{\rm HeunG} \biggl(} \frac{3}{2}(1-\sqrt{1-8b_{s}/3}),
\frac{3}{2}(1+\sqrt{1-8b_{s}/3}), \frac{3}{2}, \frac{3}{2};
z\biggr) \,, \\
\tilde{\psi} &=& \frac{1}{\sqrt{z}} \; {\rm HeunG} \biggl(
\frac{-1}{1+3p_0},
\frac{6b_{s}(3p_0-1)+1-6p_0}{4(1+3p_0)}; \nonumber
\\
&&\hphantom{ \frac{1}{\sqrt{z}} {\rm HeunG} \biggl(}
1+\frac{3}{2}\sqrt{1-8b_{s}/3}, 1-\frac{3}{2} \sqrt{1-8b_{s}/3} ,
\frac{1}{2}, \frac{3}{2}; z \biggr) \,,
\end{eqnarray}
where $z=(1-\sqrt{1-u/3})/2$. The coefficients $\Phi_{1}$ and
$\Phi_{2}$ are given by
\begin{eqnarray}
\Phi_{1} &=& 288\int_{0}^{z}\tilde{\psi} (F p _{(1)} +
G\mu_{(1)} + H) (1-2z)^{2} [z(1-z)]^{3/2}
[1+(1+3p_0)z] \, {\hbox{d}} z \,, \\
\Phi_{2} &=& -288\int_{0}^{z} \psi (F p _{(1)} + G\mu_{(1)} +
H) (1-2z)^{2} [z(1-z)]^{3/2} [1+(1+3p_0)z] \, {\hbox{d}} z \,,
\end{eqnarray}
where
\begin{eqnarray}
F &=& -\frac {1+b_{s} \varphi_{(0)} + 8z(1-z)\dot{\varphi}_{(0)}}
{32z(1-z)(1-2z)^{2}} \,, \\
G &=& \frac{(4(1+3p_0)z+1-3p_0)(1+b_{s}\varphi_{(0)})}
{48z(1-z)(1-2z)^{4}(1+(1+3p_0)z)} \nonumber \\
&& \qquad \qquad -
\frac{[12(1+3p_0)z^{3}-6(1+7p_0)z^{2}
+(9p_0-5)z+1]\dot{\varphi}_{(0)}}
{12z(1-z)(1-2z)^{4}(1+(1+3p_0)z)} \,, \\
H &=& \frac{(4(1+3p_0)z + 1-3p_0)
\varphi_{(0)}(1+b_{s}\varphi_{(0)})(1+b_{s}\varphi_{(0)}/2)}
{24z(1-z)(1-2z)^{2}(1+(1+3p_0)z)} \nonumber \\
&& \qquad \qquad + \frac{(2(1+3p_0)z+1-p_0)
\varphi_{(0)}(1+b_{s}\varphi_{(0)}/2)\dot{\varphi}_{(0)}}
{(1-2z)^{2}(1+(1+3p_0)z)} \,.
\end{eqnarray}
\FIGURE[ht]{
\epsfig{file=c0.1.b4.e0.1.mu.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b4.e1.mu.eps,angle=270,width=0.4\hsize}
\\
\epsfig{file=c0.1.b-4.e0.1.mu.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b-4.e1.mu.eps,angle=270,width=0.4\hsize}
\caption{Comparison of $\mu$ vs $u$ calculated perturbatively and
numerically for an incompressible star in quasi-Brans/Dicke theory
with $a_{0}^{2}=0.1$ and $b_{s}=4$, $p_0=0.1$ (top left); $b_s
= 4$, $p_0=1$ (top right); $b_{s}=-4$, $p_0=0.1$ (bottom
left); and $b_s = -4$, $p_0=1$ (bottom right). All curves
terminate at the stellar exterior, $u=U$.} \label{Fig6}
\label{Fig7} }
The first-order corrections to the external parameters are given by
\begin{eqnarray}
\mathcal{F}_{(1)} &=& \varphi_{(1)} - 12 e^{2L} L
\dot{\varphi}_{(1)} -
\frac{3(1+p_0)^{2}}{2p_0(1+2p_0)}
L(1+b_{s}\varphi_{(0)})p _{(1)} \nonumber
\\
&&+36e^{2L} \left(
1+\frac{1+4p_0+5p_0^{2}}{2p_0(1+2p_0)}L \right)
\left( p _{(1)}\dot{\varphi}_{(0)} + 48
e^{2L}\frac{p_0(1+2p_0)}{(1+3p_0)^{2}}
(\dot{\varphi}_{(0)})^{3} \right) \nonumber
\\
&& +
12\left(1+\frac{(1+3p_0)^{2}}{2p_0(1+2p_0)}L\right)
\dot{\varphi}_{(0)}\mu_{(1)} \,,
\\
\mathcal{A}_{(1)} &=& 12e^{2L} \dot{\varphi}_{(1)} +
\frac{3(1+p_0)^{2}}{2p_0(1+2p_0)}
(1+b_{s}\varphi_{(0)})p _{(1)} - 6
\frac{(1+3p_0)^{2}}{p_0(1+2p_0)}
\dot{\varphi}_{(0)}\mu_{(1)} \nonumber
\\
&& - 1728 e^{4L}\frac{p_0(1+2p_0)}{(1+3p_0)^{2}}
(\dot{\varphi}_{(0)})^{3} - 18 e^{2L}
\frac{(5p_0^{2}+4p_0+1)}{p_0(1+2p_0)}
\dot{\varphi}_{(0)}p _{(1)} \,,
\\
s_{(1)} &=& \mu_{(1)} +2e^{2L}p _{(1)} -144 L
\frac{p_0(1+2p_0)}{(1+3p_0)^{2}}e^{4L}
(\dot{\varphi}_{(0)})^{2} \,,
\\
\mathcal{M}_{(1)} &=&
\frac{24(1+p_0)\sqrt{3p_0(1+2p_0)}} {(1+3p_0)^{2}}
p _{(1)} +
\frac{12\sqrt{3p_0(1+2p_0)}}{1+p_0}\mu_{(1)} \nonumber
\\
&& -3 \int_{0}^{U_{(0)}} du \sqrt{\frac{u}{1-u/3}}
[\varphi_{(0)}(1+b_{s}\varphi_{(0)}/2) +
2u(1-u/3)(\dot{\varphi}_{(0)})^{2}] \,,
\end{eqnarray}
where $L=\log(1+p_0)-\log(1+3p_0)$, and the profiles are
all to be evaluated at $U_{(0)}$.
If $b_{s} \neq 0$, then the relation
$a_{{\scriptscriptstyle A}} = \partial \log M / \partial \phi_{\infty}$ \cite{damourrev}
can be used to simplify $\mathcal{M}_{(1)}$:
\begin{eqnarray}
\mathcal{M}_{(1)} &=& \frac{9\sqrt{3}}{2b_{s}} \arccos \left(
\frac{1+p_0}{1+3p_0}\right)
- \frac{\sqrt{3p_0(1+2p_0)}(41p_0^{2}+34p_0+9)}
{b_{s}(1+p_0)(1+3p_0)^{2}}
\nonumber
\\
&& - \frac{96(1+p_0)\sqrt{3p_0^{3}(1+2p_0)^{3}}}
{b_{s}(1+3p_0)^{4}}\; \dot{\varphi}_{(0)}(1+b_{s}\varphi_{(0)})
+ \frac{8\sqrt{3p_0(1+2p_0)}}{1+p_0} \; \mu_{(1)}
\nonumber
\\
&& +
\frac{24(1+p_0)\sqrt{3p_0(1+2p_0)}}{(1+
3p_0)^{2}} \; p_{(1)} \,.
\end{eqnarray}
\FIGURE[ht]{
\epsfig{file=c0.1.b4.e0.1.p.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b4.e1.p.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b-4.e0.1.p.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b-4.e1.p.eps,angle=270,width=0.4\hsize}
\caption{Comparison of $p$ vs $u$ calculated perturbatively
and numerically for an incompressible star in quasi-Brans/Dicke
theory with $a_{0}^{2}=0.1$ and $b_{s}=4$, $p_0=0.1$ (top
left); $b_s = 4$, $p_0=1$ (top right); $b_{s}=-4$,
$p_0=0.1$ (bottom left); and $b_s = -4$, $p_0=1$ (bottom
right). All curves terminate at the stellar exterior, $u=U$.}
\label{Fig8} \label{Fig9} }
\subsection{Comparing perturbative solutions with numerics}
\label{sec:num_an_cmp}
Part of the utility of analyzing the incompressible star in such
detail is that such explicit expressions for the perturbative
solutions allow a detailed comparison with direct numerical
integrations. This helps indicate the domain of validity of the
perturbative results.
First, we look at the profiles for the physical variables
$\mu(u)$, $p(u)$, and $\varphi(u)$ across the interior of the
star. The quantities $\mu$, $p$ and $\varphi$ are
respectively plotted versus $u$ in Figures \pref{Fig6},
\pref{Fig8} and \pref{Fig10}, for $a_{0}^2 = 0.1$ and several
choices for $b_{s}$, and $p_0$. The line labelled ``Zeroth
Order'' plots the zeroeth-order result, {\em e.g.} $\mu_{(0)}$,
while the line labelled ``First Order'' includes also the first
correction, {\em e.g.} $\mu_{(0)} + a_{0}^{2}\mu_{(1)}$. Notice
that the curves all lie close to one other for small $u$, but
begin to separate at the stellar exterior, $u \to U$. Furthermore,
the separation is largest for the more relativistic stars, for
which $p_0$ is larger. However in all cases displayed the
perturbative results capture the full numerics quite well
throughout the entire star, with the strongest deviations
happening for $\varphi(u)$ when $b_s < 0$.
\FIGURE[ht]{
\epsfig{file=c0.1.b4.e0.1.phi.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b4.e1.phi.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b-4.e0.1.phi.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b-4.e1.phi.eps,angle=270,width=0.4\hsize}
\caption{Comparison of $\varphi$ vs $u$ calculated perturbatively
and numerically for an incompressible star in quasi-Brans/Dicke
theory with $a_{0}^{2}=0.1$ and $b_{s}=4$, $p_0=0.1$ (top
left); $b_s = 4$, $p_0=1$ (top right); $b_{s}=-4$,
$p_0=0.1$ (bottom left); and $b_s = -4$, $p_0=1$ (bottom
right). All curves terminate at the stellar exterior, $u=U$.}
\label{Fig10} \label{Fig11}}
Of more practical interest is a similar comparison of the accuracy
of the perturbative expressions for plots that directly relate
observable quantities to one another, such as plots of $a_{\scriptscriptstyle A}$ vs
$s$. Examples of these are given in figures \pref{Fig12}, which
give $s$, $\mathcal{A} = a_{\scriptscriptstyle A}/a_0$, $\mathcal{F} = (\phi_\infty - \phi_0)/a_0$ and
$\mathcal{M}$ as functions of the central pressure, $p_0 = P_0/\rho_0$
for the special case of Brans-Dicke theory ($b_s = 0$) with $a_0^2
= a_s^2 = 0.1$. These again show good agreement between
perturbative and numerical calculations, with the biggest
deviations arising in the most relativistic settings (largest
$p_0$).
\FIGURE[ht]{
\epsfig{file=c0.1.b0.s.cmp.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b0.atilde.cmp.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b0.F.cmp.eps,angle=270,width=0.4\hsize}
\epsfig{file=c0.1.b0.M.cmp.eps,angle=270,width=0.4\hsize}
\caption{Comparison of physical quantities as functions of central
pressure, $p_0 = P_0/\rho_0$, calculated perturbatively and
numerically for an incompressible star in Brans-Dicke theory ($b_s
= 0$) with $a_{s}^{2}=0.1$. The plots show compactness, $s$ (top
left); external coupling, $\mathcal{A} = a_{\scriptscriptstyle A}/a_s = Q/M a_s$, (top
right); $\mathcal{F} = (\phi_\infty - \phi_0)/a_s$ (bottom left); and $\mathcal{M}$
(bottom right). } \label{Fig12}}
\section{Conclusions}
In this paper we set up the equations of stellar
structure, with the stellar interior modeled as a
spherically symmetric, static fluid, and with gravity
described by a scalar-tensor theory with a
single light scalar coupling to matter only
through its coupling to a Jordan frame metric. For
practical reasons, and for the purposes of making
contact with earlier workers, we focus on the
special case where the scalar-matter coupling
function does not vary strongly with the
field, $a(\phi) \simeq a_s + b_s \phi$.
We seek solutions to these equations, for a variety
of equations of state, in the
special case where the scalar-matter
coupling at the stellar center is small,
$a^2_0 = a^2(\phi_0) \ll 1$. We obtain
solutions as perturbations to those
of General Relativity. By comparing these
solutions with explicit numerical
integration we verify that the perturbative
approximation works well throughout most
of the star.
These perturbative solutions
have the merit of being very simple to
integrate numerically, and of allowing
analytic solutions for some choices of
equation of state. This is very convenient
for efficiently exploring different
choices for the scalar properties, and
scalar-matter couplings.
We use these solutions to compute the
form of the observable relations that
are imposed among the external properties
of the stars by the condition that they
match continuously to the stellar interior.
There are two such relations among the
four external variables, $M$, $R$, $Q$ and
$\phi_\infty$, and our semi-analytic
approach allows a simple exposition of
how these relations depend on scalar
properties. These properties ultimately
underly any tests of scalar-tensor
theories using astrophysical systems,
such as binary pulsars.
Finally, these methods are applied to
the illustrative case of an incompressible
star, for which the density is constant.
In this case the solutions generated by
the $a_0$ expansion may be found analytically,
making the comparisons with numerical
results particularly simple. Again we
find that the perturbative expressions
agree well with the solutions obtained
by numerical integration.
\section*{Acknowledgements}
We thank Nemanja Kaloper and Maxim Pospelov for useful
discussions. This research was supported in part by funds from the
Natural Sciences and Engineering Research Council (NSERC) of
Canada. Research at the Perimeter Institute is supported in part
by the Government of Canada through Industry Canada, and by the
Province of Ontario through the Ministry of Research and
Information (MRI).
|
1,314,259,994,967 | arxiv | \section{Introduction}\label{sec.intro}
Coupled dynamical systems on graphs represent many diverse models
throughout the natural sciences and technology. Examples range from
regulatory and neuronal
networks in biology \cite{LaiCho01,BenHan97, MZ12, Swi80},
to Josephson junctions and coupled lasers in physics \cite{LiErn92, PhiZan93, WatStr94},
to communication, sensor, and power networks in technology \cite{DorBul12, Med12},
to name a few. Compared to partial differential equations and lattice dynamical
systems, the analysis of networks meets a new principal challenge: the rich variety
and possible complexity of the underlying graphs. The algebraic methods of graph
theory \cite{Biggs, Chung-Spectral} have been useful in understanding the contribution
of the network topology to certain aspects of networks dynamics, especially in problems
involving synchronization \cite{Med12, MZ12}. The continuum limit of nonlocally coupled
dynamical networks is one of few analytical approaches that have a potential for elucidating
dynamics of a broad class of networks \cite{KurBat02, AbrStr06, WilStr06,
GirHas12, OmeWol12,OmeRie12}.
In this limit, the solutions of the
initial value problems (IVPs) for evolution equations on large discrete
networks are approximated by those for the limiting integro-differential
equations posed on continuous spatial domains. This limiting
procedure has been used to study the mechanisms
of some very interesting effects such as chimera states
\cite{KurBat02,AbrStr06},
multistability \cite{WilStr06,GirHas12},
synchronization, and the coherence-incoherence transition \cite{OmeWol12}.
However, a rigorous justification for taking the continuum limit in
nonlocally models was lacking. In this paper, we use the combination of
techniques from the theory of evolution equations \cite{EvaPDE} and the recent theory
of graph limits \cite{BorChay06, BorChay08, LovSze06, LovSze07, LovGraphLim12}
to provide such justification for a large class of dynamical
models on deterministic graphs. In fact, some of the tools that we develop
in this work come in useful in the analysis of the continuum limit of dynamical systems
on random graphs undertaken in a companion paper \cite{Med13a}.
To motivate the forthcoming analysis of the continuum limit
in the nonlocally coupled systems, we first review several representative
examples. In \cite{WilStr06}, Wiley, Strogatz, and Girvan
studied a nonlocally coupled system of phase oscillators
\begin{equation}\label{Kuramoto}
\dot\phi_i=\omega+{1\over n} \sum_{j=i-k}^{i+k} \sin\left(\phi_j-\phi_i\right),
\end{equation}
where $\phi_i:~{\mathbb R}^+\to {\mathbb S}^1:={\mathbb R}/2\pi{\mathbb Z},\; i\in [n]:=\{1,2,3,\dots,n\}$ is interpreted as the phase
of oscillator~$i$, $\omega$ is the intrinsic frequency, and the sum models the interactions between
oscillator~$i$ and $k$ of its nearest neighbors from each side
(cf. \cite{Kur84, KurBat02}). The oscillators are located on a ring and indexed
by integers from ${\mathbb Z}/n{\mathbb Z}$. By recasting (\ref{Kuramoto}) in uniformly rotating
frame of reference, one can absorb $\omega$. Thus, below we set $\omega=0$.
\begin{figure}
\begin{center}
{\bf a}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf1a.pdf}\hspace{1.0cm}
{\bf b}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf1b.pdf}
\end{center}
\caption{The plot of the support of the function $W_{G_n}$ representing the adjacency matrix
of the $k$-nearest-neighbor graph $G_n$ ({\bf a}) and that of its limit $W_G$ ({\bf b}).
}
\label{f.1}
\end{figure}
It is instructive to view (\ref{Kuramoto}) as a system of differential
equations on graph $G_n=\langle V(G_n), E(G_n)\rangle$ with the
vertex set $V(G_n)=[n]$ and the edge set
$$
E(G_n)=\left\{ (i,j)\in [n]^2:~ 0<\mbox{dist}(i,j)\le k\right\},\;\mbox{where}\;
\mbox{dist}(i,j) =\min\{ |i-j|, n-|i-j|\}.
$$
Let $W_{G_n}:~I^2\to\{0,1\}$ such that
$$
W_{G_n}(x,y)=1\;\mbox{if}\; (i,j)\in E(G_n)\;\mbox{and}\; (x,y)\in [(i-1)n^{-1}, in^{-1})\times [(j-1)n^{-1}, jn^{-1}).
$$
Here and below, $I$ denotes $[0,1]$, the spatial domain of the
continuum limits considered in this paper.
The plot of the support of $W_{G_n}(x,y)$ in Fig.~\ref{f.1}a provides the pixel picture
of the adjacency matrix of $G_n$ \cite{Biggs}. In Fig.~\ref{f.1}a and
in similar plots throughout this paper,
we place the origin of the unit square in the top left corner of the plot to emphasize
the correspondence between $W_{G_n}$ and the adjacency matrix of $G_n$.
As $n\to \infty$, $\{W_{G_n}\}$ converges to the $\{0,1\}$-valued function $W_G(x,y)$,
whose support is shown in Fig.~\ref{f.1}b.
In \cite{WilStr06}, the analysis of the attractors of (\ref{Kuramoto}) employs the continuum limit
of (\ref{Kuramoto}).
Specifically, let $k=rn$ for some fixed $r\in (0,1]$. After interpretting the right
hand-side of (\ref{Kuramoto}) as a Riemann sum and sending $n\to\infty$,
in the uniformly rotating frame of coordinates (\ref{Kuramoto}) formally becomes
\begin{equation}\label{cont}
{\partial\over \partial t}\phi(x,t)=\int_I W_G(x,y)
\sin\left(\phi(y,t)-\phi(x,t)\right)dy,
\end{equation}
where $\phi(x,t)$ describes the evolution of the continuum of oscillators
distributed over $I$.
Equation (\ref{cont}) is called the continuum (thermodynamic) limit of
(\ref{Kuramoto}).\footnote{There is another form of the continuum limit
for the Kuramoto model \cite{StrMir91, Str2000, OttAnt08, Lai09}.
It is formulated in terms of the density characterizing the state of the
continuous system. We do not consider this limit in the present paper.}
The continuum equation (\ref{cont}) has a family of steady state
solutions
\begin{equation}\label{twist}
\theta^{(q)}(x,t)=2\pi qx+c, \; q\in {\mathbb Z}, \; c\in {\mathbb R},
\end{equation}
called $q-$twisted states. In \cite{WilStr06}, the stability analysis of
the continuous twisted states (\ref{twist}) was used to study their
discrete counterparts, which are the steady state solutions of
(\ref{Kuramoto}) ($\omega=0$) for finite $n$. The stability analysis
in \cite{WilStr06} can, in fact, be
completely translated into the discrete setting. However, suppose we replace
the family of $k$-nearest-neighbor graphs in (\ref{Kuramoto})
by a family of small-world graphs (see Fig.~\ref{ff.1}a). Then not only
does the continuum limit provide a convenient setting for the stability
analysis but also the twisted states, as the steady states
of the Kuramoto model, exist only in the limit
as the number of oscillators goes to infinity (see Fig.~\ref{ff.1}b)~\cite{Med13b}.
Therefore, in this case the continuum
limit affords the analysis of the asymptotic behavior of solutions
of the Kuramoto model for large $n$, which is not otherwise feasible in the discrete
setting. The Kuramoto-Battogtokh model generating chimera states \cite{KurBat02}
is another example, where the contnuum limit seems to be critical
for understanding the nontrivial dynamics in the discrete systems.
We will return to the discussion of chimera states in Section \ref{sec.chimera}.
\begin{figure}
\begin{center}
{\bf a}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf2a.pdf}\hspace{1.0cm}
{\bf b}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf2b.pdf}
\end{center}
\caption{ {\bf a}) The pixel picture of a small-world graph obtained from that shown in
Fig.~\ref{f.1}{\bf a}
by replacing a random set of the local connections by randomly chosen long-range ones.
{\bf b}) The pixel picture for a large small-world graph.
}
\label{ff.1}
\end{figure}
These examples lead to the following questions.
\begin{description}
\item[(A)] Does the continuum model (\ref{cont}) truly approximate the dynamics of the discrete
model (\ref{Kuramoto}) for large finite $n$? If so, in what sense the solutions of the integro-differential
equation
approximate those of (\ref{Kuramoto}) with $\omega=0$?
\item[(B)] How big is the class of network topologies for which one can use the continuum
limit? Is it restricted to the special graphs like $k-$nearest-neighbor one on a ring?
Can it be applied, for instance, to the small world networks, the original motivation for the
analysis in \cite{WilStr06}?
\end{description}
The function $W_G$ shown in Fig.~\ref{f.1}b is the limit of the functions
$\{W_{G_n}\}$ (Fig.~\ref{f.1}a) representing the adjacency matrices of the
$k$-nearest neighbor family of graphs $\{G_n\}$. The latter is an example
of a convergent graph sequence and $W_G$ is the corresponding graph
limit \cite{LovGraphLim12}. We will explain the meaning of the limit of
a graph sequence in Section~\ref{sec.limit}. Meanwhile, we refer to the
geometric interpretation of the adjacency matrix for the $k$-nearest-neighbor
graph in Fig.~\ref{f.1}a, which suggests the limiting pattern of $\{W_{G_n}\}$
as $n\to\infty$ (see Fig.~\ref{f.1}b). Likewise, the pixel picture of the large
small-world graph in Fig.~\ref{ff.1}b suggests the (piecewise constant)
limit for the small-world family of graphs, which in turn can be used in the
derivation of the continuum model like (\ref{cont}) \cite{Med13b}.
These observations hint on the possible relevance of the theory of graph limits for constructing
the continuum limits for dynamical networks. We explore this
relation for dynamical systems on convergent families of deterministic graphs
in this paper and extend this approach to random networks in \cite{Med13a}.
Interestingly, in the process of justifying the continuum limit, we discovered the
link between this problem and that of convergence of several classical
numerical methods. Specifically, we show that dynamical networks on simple
and weighted graphs analyzed in Sections~\ref{sec.simple} and \ref{sec.weight}
can be interpreted as the discretizatizations of the continuum evolution equation
by the collocation method and the Galerkin method respectively. Furthermore,
the analysis of the continuum limit for networks on random graphs in \cite{Med13a}
features a similar connection with the Monte-Carlo method. Therefore, in addition to the
rigorous justification of taking the continuum limit for a large class of dynamical
networks, our results characterize convergence of these numerical methods
for solving IVPs for certain nonlinear integro-differential equations.
This paper is organized as follows.
We review the necessary background on graph limits in Section~\ref{sec.limit}.
In Section~\ref{sec.formulate}, we discuss the heat equation on graphs
and graph limits. Here, we extend a classical linear heat equation
on graphs to allow nonlinear diffusion. This extension covers
many dynamical networks
arising in applications including coupled oscillator models like (\ref{Kuramoto}).
In the same section, we formally define the continuum limit for
dynamical networks of a convergent sequence of dense (weighted)
graphs. In this limit, the discrete diffusion operator becomes
an integral operator with the kernel representing the limit of
the infinite family of graphs. We show that the IVP for the
limiting equation is well-posed and admits a unique solution in
$C^1({\mathbb R};L^\infty(I))$. Further, in Theorem~\ref{thm.reg}, we specify
assumptions on the kernel and the initial conditions, which guarantee
that the solutions of the IVPs remain continuous in space over
subdomains of $I$.
This result is used to characterize the attractors of the
continuum model. In particular, we apply it to study
the regions of continuity of the chimera states and attractors
of the Kuramoto equation on certain multipartite graphs (see Section~\ref{sec.examples}).
The rest of the paper is focused on studying the relation between
the solutions of the IVPs for discrete networks and and their continuum counterparts.
In Section~\ref{sec.simple}, for sequences
of simple graphs converging to $\{0,1\}-$valued graphons, we show that
the rate of convergence depends on the fractal dimension
of the boundary of the support of the graph limit.
This shows explicitly how the geometry of the graphon affects
the accuracy of the continuum limit. In Section~\ref{sec.weight},
we analyze networks on convergent
weighted graph sequences.
The results of this paper
are illustrated with the discussion of the dynamics of two concrete
models: the Kuramoto-Battogtokh nonlocal system generating
chimera states \cite{KurBat02} and the Kuramoto equation on the
half and complete bipartite graphs (cf.~Section~\ref{sec.examples}).
The final section, Section~\ref{sec.conclusion},
contains concluding remarks.
\section{Graph limits}\label{sec.limit}
\setcounter{equation}{0}
In this section, we review several definitions and results from the theory of graph
limits that we will need later. In our brief tour through graph limits, we mainly follow
\cite{BorChay11} and \cite{Pikh10}. For the full exposition of this powerful theory
with many diverse applications, we refer an interested reader to the pioneering
papers by Lov{\' a}sz and Szegedy \cite{LovSze06, LovSze07}, and Borgs, Chayes,
Lov{\' a}sz, S{\'o}s, and Vesztergombi \cite{BorChay06, BorChay08}; and to the
monograph \cite{LovGraphLim12}.
An undirected graph $G=\langle V(G), E(G)\rangle$
without loops and multiple edges is called simple. $V(G)$ stands for the
set of nodes and $E(G)\subset V(G)\times V(G)$ denotes the edge set.
\begin{figure}
\begin{center}
{\bf a}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf3a.pdf}\hspace{1.0cm}
{\bf b}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf3b.pdf}
\end{center}
\caption{ {\bf a}) The pixel picture of the Erd\H{o}s-R\'{e}nyi graph $G(40,0.5)$.
The edge between a pair of distinct nodes is inserted with probability $0.5$.
{\bf b}) The pixel picture of $G(600,05)$.
}
\label{ff.2}
\end{figure}
Let $G_n=\langle V(G_n), E(G_n)\rangle, n\in\mathbb{N}$ be a sequence of dense
(simple) graphs, i.e., $|E(G_n)|=O(|V(G_n)|^2)$, where $\left|\cdot\right|$ denotes the cardinality of a set.
The convergence of the graph sequence $\{G_n\}$ is defined in terms of the homomorphism densities
\begin{equation}\label{hdense}
t(F,G_n)={\mbox{hom}(F,G_n)\over \left|V(G_n)\right|^{|V(F)|}}.
\end{equation}
Here, $F=\langle V(F), E(F)\rangle$ is a simple graph and $\mbox{hom}(F,G_n)$ stands for
the number of homomorphisms (i.e., adjacency preserving maps
$V(F)\to V(G_n)$).
In probabilistic terms, (\ref{hdense}) is the likelihood of a random
map $h:~V(F)\to V(G_n)$ to be a homomorphism.
\begin{df}\label{df.convergent}\cite{LovSze06, BorChay08}
The sequence of graphs $\{G_n\}$
is called convergent if $t(F,G_n)$ is convergent for every simple graph
$F$.\footnote{In the theory of graph limits, convergence in Definition~\ref{df.convergent}
is called left-convergence. Since this is the only convergence of graph sequences used
in this paper, we refer to the left-convergent sequences as convergent.}
\end{df}
It turns out that the limiting object can be represented by a measurable symmetric function
$W: I^2\to I$. We recall that $I$ stands for $[0,1]$.
Such functions are called graphons. The set of all graphons is
denoted by $\mathcal{W}_0$.
\begin{thm}\cite{LovSze06}
For every convergent sequence of simple graphs, there is $W\in\mathcal{W}_0$
such that
\begin{equation}\label{t-to-t}
t(F,G_n)\to t(F,W):=\int_{I^{|V(F)|}} \prod_{(i,j)\in E(F)} W(x_i,x_j) dx
\end{equation}
for every simple graph $F$.
Moreover, for every $W\in\mathcal{W}_0$ there is a sequence of graphs
$\{G_n\}$ satisfying
(\ref{t-to-t}).
\end{thm}
The cut-norm is important for describing the metric properties of graphons.
For any integrable function and, in particular, for any graphon $W\in\mathcal{W}_0$,
$$
\|W\|_\square =\sup_{S,T\in \mathcal{L}_I} \left| \int_{S\times T} W(x,y) dxdy\right|
$$
is called the cut-norm of $W$. Here, $\mathcal{L}_I$ stands for the set of all Lebesgue measurable
subsets of $I$. The cut-distance between two graphons $W$ and $U$
is defined by
$$
\delta_\square(U,W)=\inf_{\phi}\|U-W^\phi\|_\square,\;\;
$$
where $W^\phi(x,y):=W(\phi (x),\phi (y))$ and $\phi$ ranges over all measure-preserving
bijections of $I$. The infinum over all $\phi$ is used to make the cut-distance between graphons
invariant with respect to graph isomorphisms, as well as some other transformations that do not change
the asymptotic properties of the graph sequences (see \cite{BorChay08, LovGraphLim12} for
more details).
A graph sequence is convergent if and only if it is Cauchy in the cut-distance \cite{BorChay08}.
Graph limits are the equivalence classes of graphons
$$
[W]=\left\{U\in\mathcal{W}_0:~\delta_\square(U,W)=0\right\}.
$$
With a customary abuse of notation, we refer to both $W$ and $[W]$ as graphons.
The pseudo-metric $\delta_\square(\cdot,\cdot)$ induces the metric on
$\chi=\{[W]:~W\in\mathcal{W}_0\}$.
The metric space $(\chi,\delta_\square)$ is compact \cite{LovSze07}.
\begin{figure}
\begin{center}
{\bf a}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf4a.pdf}\hspace{1.0cm}
{\bf b}\hspace{0.1 cm}\includegraphics[height=1.8in,width=2.0in]{newf4b.pdf}
\end{center}
\caption{ {\bf a}) The pixel picture of the half-graph $H_{20,20}$.
{\bf b}) The limit of $\{W_{H_{n,n}}\}$.
}
\label{ff.3}
\end{figure}
We now describe a simple construction of convergent graph sequences that will be used
in the analysis of the continuum limit of dynamical networks below.
Consider a sequence of simple graphs $\{G_n\}$ on $n$ nodes. Define
\begin{equation}\label{pixel}
W_{G_n}(x,y)=\left\{ \begin{array}{ll} 1, & \mbox{if}\; (i,j)\in E(G_n)\;\mbox{and}\;
(x,y)\in \left[{i-1\over n}, {i\over n}\right)\times \left[{j-1\over n}, {j\over n}\right),\\
0,& \mbox{otherwise}.
\end{array}\right.
\end{equation}
The support of $W_{G_n}$ provides the pixel picture of the adjacency
matrix of $G_n$ (see Fig.~\ref{ff.1}a), and $[W_{G_n}]$ is the corresponding graphon.
Note that $[W_G]$ is invariant under relabeling the nodes of $G$
while $W_{G_n}$ is not.
The graph sequence $\{G_n\}$ is convergent if $W_{G_n}$ converge
with respect to the cut-norm. In particular, since for any integrable
function $W\in\mathcal{W}_0$
$$
\|W\|_\square\le \|W\|_{L^1(I^2)},
$$
convergence of $\{W_{G_n}\}$ in the $L^1$-norm implies convergence
of the graph sequence $\{G_n\}$. The deterministic networks
analyzed in this paper are actually convergent with respect to the stronger
$L^1$-norm. However, the convergence of graphons with
respect to the cut-norm does not in general imply that with respect to
$L^1$-norm. For instance, the sequence of Erd\H{o}s-R{\'e}nyi
graphs with edge density $p\in (0,1)$ is convergent to the constant
function $p$ on $I^2$, $\mbox{Const}(p)$ \cite{LovSze06, BorChay08},
while no sequence of $\{0,1\}$-valued graphons
can converge to $\mbox{Const}(p)$ with $p\in (0,1)$ in the $L^1$-norm.
In particular, $L^1$-estimates for graphons are insufficient for the
analysis of the continuum limits of
networks on random graphs \cite{Med13a}.
We conclude this section we several examples of convergent graph sequences.
\begin{ex}\label{ex.ER}\cite{LovSze06, BorChay08}
The Erd\H{o}s-R\'{e}nyi graphs.
Let $p\in (0,1)$ and consider a sequence of random graphs
$G(n,p)=\langle V(G(n,p)), E(G(n,p))\rangle$,
$V(G(n,p))=[n]$ such that the probability
${\mathbb P~}\{(i,j)\in E(G(n,p))\}=p$ for any $(i,j)\in [n]^2$ (see Fig.~\ref{ff.2}a).
Then for any simple graph $F,$ $t(F, G(n,p))$ is convergent with probability
$1$ to $p^{|E(F)|}$ as $n\to\infty$ \cite{BorChay08}.
Thus, $\{G(n,p)\}$ is a convergent sequence with the limit given by the
constant graphon $p$. The pixel picture of $W_{G(n,p)}$ in Fig.~\ref{ff.2}b
provides the intuition behind the graph limit for $\{G(n,p)\}$. Note that
for large $n$, the plot of the support of $W_{G(n,p)}$ resembles
that of the constant function if looked at from a distance.
In fact, the limiting graphon reflects the asymptotic density
of connections in $G(n,p)$ as $n\to\infty$. Using the strong law of large numbers,
one can show that $\|W_{G(n,p)} - p\|_\square\to 0$ as $n\to\infty$ with probability
$1$. Thus, $\{W_{G(n,p)}\}$ is convergent in the cut-norm but not in the $L^1$-norm.
\end{ex}
\begin{ex}\label{ex.half}\cite{LovSze06} The half-graphs. Let $H_{n,n}=\langle V(H_{n,n}), E(H_{n,n})\rangle$
be a bipartite graph on $2n$ nodes such that
$$
V(H_{n,n})=\{1,2,\dots,n,1^\prime, 2^\prime,\dots, n^\prime \},\;
E(H_{n,n})=\{( i,j^\prime) \in V(H_{n,n})\times V(H_{n,n}):~ i\le j\}
$$
(see Fig.~\ref{ff.3}a).
The sequence $\{ H_{n,n}\}$ converges to the graphon $[H]$ where
$H: I^2\to I$ is the characteristic function of the set
$\{ (x,y):~ |x-y| \ge 1/2\}$ (see Fig.~\ref{ff.3}a).In this example, $\{W_{H_{n,n}}\}$
converges to $H$ pointwise, and, by the dominated convergence theorem,
in the $L^1$-norm.
\end{ex}
\section{The formulation of the problem} \label{sec.formulate}
\setcounter{equation}{0}
\subsection{The heat equation on discrete and continuous domains}
Let $G_n=\langle V(G_n), E(G_n), W(G_n)\rangle$ be a sequence of weighted graphs,
where $V(G_n)=[n]$ and $E(G_n)$ are the sets of nodes and edges respectively;
and $W(G_n)\!:\ [n]^2\to[-1, 1]$ is a symmetric weight matrix of the form
$$
(W(G_n))_{ij}=\left\{\begin{aligned}&w_{ij}^{(n)}, && (i, j)\in E(G_n),\\ &0,
&&{\rm otherwise.}\end{aligned}\right.
$$
If $G_n$ is a simple graph, $W(G_n)$ is a $\{0,1\}$-valued matrix.
By the nonlinear heat equation on $G_n$ we mean the system of
differential equations
\begin{equation}\label{dheat}
{d\over dt} u_i^{(n)}(t)=\lambda_i^{(n)}\sum_{j=1}^n w_{ij}^{(n)}
D\left(u^{(n)}_j-u^{(n)}_i\right),\; i\in [n],
\end{equation}
where $u^{(n)}(t)=\left( u^{(n)}_1(t),u^{(n)}_2(t),\dots,u^{(n)}_n(t)\right)^\mathsf{T}$,
and $\lambda_i^{(n)}$ are scaling coefficients.
The function $D:~{\mathbb R}\to{\mathbb R}$ is Lipschitz continuous
\begin{equation}\label{Lip}
\left|D(u)-D(v)\right|\le L |u-v| \;\forall u,v\in {\mathbb R}.
\end{equation}
Throughout this paper, we will
use $\lambda_i^{(n)}=n^{-1}$. However, other scalings may also be used.
\begin{rem}\label{rem.generalize}
Our analysis applies to a more general class of equations
\begin{equation}\label{more-general}
{d\over dt} u_i^{(n)}(t)=\lambda_i^{(n)}\sum_{j=1}^n w_{ij}^{(n)}
D\left(u^{(n)}_j-u^{(n)}_i\right) + f_i(t,u^{(n)}),\; i\in [n],
\end{equation}
where functions $f_i(t,u), i\in [n],$ can be taken, for instance, to be
continuous in $t$ and Lipschitz continuous in $u$:
$$
\left|f_i(t,u)-f_i(t,v)\right|\le L |u-v| \;\forall u,v,t \in {\mathbb R}, \; i\in [n].
$$
To keep the presentation simple, we will restrict the analysis to the case of
(\ref{dheat}).
It is straightforward to extend our results to cover (\ref{more-general}).
\end{rem}
If $D(u)=u$, the coupling operator on the right-hand side
of (\ref{dheat}) is the graph Laplacian, and Equation (\ref{dheat})
becomes the linear heat equation on $G_n$. The linear heat equation
has many applications in combinatorial problems such as random walks on graphs
\cite{Chung-Spectral}, and dynamical problems, e.g., analysis of consensus
protocols \cite{Med12}. In this paper, we focus on the nonlinear heat
equation, which provides the framework for a large class of dynamical networks.
In particular, the Kuramoto equation (\ref{Kuramoto}) is of this type.
In the remainder of this paper, we will derive and justify
the continuum counterpart of (\ref{dheat})
\begin{equation}\label{nheat}
{\partial\over\partial t} u(x,t) =\int_{I} W(x,y) D\left( u(y,t)-u(x,t)\right) dy.
\end{equation}
The kernel $W$ will be specified separately for each class of problems that we consider
below.
\subsection{The well-posedness of the IVP}
Before setting out to study the relation between solutions of the discrete and continuous
heat equations (\ref{dheat}) and (\ref{nheat}), we first address the well-posedness
of the IVP for (\ref{nheat}).
It is convenient to interpret the solution of the IVP for (\ref{nheat}),
$u(x,t)$, as a vector-valued map $\mathbf{u}:[0,T]\to L^\infty(I)$. Throughout this paper,
we will use the bold font to denote the vector-valued function $\mathbf{u}(t)$
corresponding to a function of two variables $u(x,t)$.
\begin{thm}\label{thm.wellposed}
Suppose $D$ is Lipschitz continuous, $W\in L^\infty(I^2)$, and $\mathbf{g}\in L^\infty(I)$.
Then for any $T>0$, there exists a unique solution of the IVP for (\ref{nheat})
$\mathbf{u}\in C^1({\mathbb R};L^\infty(I))$ subject to the initial condition $\mathbf{u}(0)=\mathbf{g}$.
\end{thm}
{\it Proof.\;}\; The proof of Theorem~\ref{thm.wellposed} is based on the contraction mapping principle
(cf.~\cite[Theorem~1.1, Chapter VII]{DalKre70}). We include the details for completeness.
Rewrite the IVP for (\ref{nheat}) as the integral equation
\begin{equation}\label{fix}
\mathbf{u}=K\mathbf{u},
\end{equation}
where
$$
[K\mathbf{u}](x,t):=\mathbf{g} +\int_0^t \int_I W(x,y) D\left( u(y,s)-u(x,s)\right)dyds.
$$
Let $M_\mathbf{g}$ be a metric subspace of $C(0,\tau; L^\infty (I))$ (where $\tau>0$ will be
specified later) consisting of functions $\mathbf{u}$ satisfying $\mathbf{u}(0)=\mathbf{g}$.
Then (\ref{fix}) is the fixed point equation for the operator $K: M_{\mathbf{g}}\rightarrow M_{\mathbf{g}}$.
We show below that $K$ is a contraction for a small $\tau>0$.
Indeed, let
\begin{equation}\label{tau}
\tau\le (4L\|W\|_{L^\infty(I^2)})^{-1},
\end{equation}
where $L$ is the Lipschitz constant of $D(\cdot)$. For any $\mathbf{u}, \mathbf{v}\in M_\mathbf{g}$
we have
$$
\left\| K\mathbf{u} - K\mathbf{v} \right\|_{M_\mathbf{g}} = \max_{t\in [0,\tau]}
\left\| K\mathbf{u}(t) - K\mathbf{v}(t) \right\|_{L^\infty (I)}
$$
$$
\le \max_{t\in [0,\tau]} \mathop{\esssup}_{x\in I}
\int_{I\times [0,t]} \left| W(x,y)\right|
\left| D\left( u(y,t)-u(x,t)\right)- D\left( v(y,t)-v(x,t)\right)
\right| dy dt
$$
$$
\le \tau L \|W\|_{L^\infty(I^2)} \max_{t\in [0,\tau]}
\left\{ \int_I \left|u(y,t)-v(y,t)\right| dy+
\left\| \mathbf{u}(t)-\mathbf{v}(t)\right\|_{L^\infty (I)} \right\}
$$
$$
\le
2\tau L \|W\|_{L^\infty(I^2)}\max_{t\in [0,\tau]} \left\|\mathbf{u}(t)-\mathbf{v}(t)\right\|_{L^\infty (I)}.
$$
Thus, by (\ref{tau}) we have
\begin{equation}\label{contract}
\| K\mathbf{u}-K\mathbf{v} \|_{M_\mathbf{g}} \le {1\over 2}
\| \mathbf{u}-\mathbf{v} \|_{M_\mathbf{g}} .
\end{equation}
By the Banach contraction mapping principle, there exists a unique
solution of the IVP for (\ref{nheat}) $\bar{\mathbf{u}}\in M_\mathbf{g}\subset C(0,\tau; L^\infty(I))$.
Using $\bar{\mathbf{u}}(\tau)$ as the initial condition, the local solution
can be extended to $[0, 2\tau]$, and, by repeating this argument,
to $[0, T]$ for any $T>0$. In a similar fashion, we can prove the existence and
uniqueness of the solution of the IVP for (\ref{nheat}) on $[-T,0]$ for any
$T>0$.
Furthermore, since the integrand
in (\ref{fix}) is continuous as a map
$L^\infty(I)\to L^\infty(I)$, $\mathbf{u}$ is continuously differentiable.
Thus, we have a classical solution
of the IVP for (\ref{nheat}) on the whole real axis.\\
$\square$
\subsection{Spatial regularity}
The classical heat equation, as a parabolic partial differential equation, has a strong
smoothening property.
Regardless of the regularity of the initial data, the solution of the IVP
for the classical heat equation is a smooth function of the space variables
for all positive times. No such mechanism is present in the heat
equation on graph limits. Below we show that
the spatial regularity of solutions of the IVP is determined by the
regularity of graphon $W$ and initial condition $\mathbf{u}(0)$.
\begin{thm}\label{thm.reg}
Let $D:{\mathbb R}\to{\mathbb R}$ be a Lipschitz continuous function and
$J=(\alpha,\beta)\subset I$. Suppose
for all $x\in J$ and for almost all $y\in I$,
$W\in L^\infty(I^2)$ has a weak derivative ${\partial\over\partial x}W(x,y)$
and
\begin{equation}\label{weak-assumption}
\esssup_{y\in I} \left\|{\partial\over\partial x}W(\cdot,y)\right\|_{L^2(J)}\le C_1,
\end{equation}
for some $C_1>0$.
Then for any $0<T<\infty$, all $t\in [0,T],$ and
$\alpha<\alpha^\prime<\beta^\prime<\beta,$ the solution of the
IVP for (\ref{nheat}) satisfies\footnote{$H^1(J)$ stands
for the Sobolev space of all Lebesgue measurable functions
$f$ on an open interval $J\subset {\mathbb R}^1$ such that $f$ and
its distributional derivative $f_x$ are in $L^2(J)$ \cite{CheMil12}.}
$$
\mathbf{u}(t)\in H^1(J^\prime),\; J^\prime=(\alpha^\prime, \beta^\prime),
$$
provided $\mathbf{u}(0)\in L^\infty(I)\cap H^1(J)$.
\end{thm}
{\it Proof.\;}\;
Let $T>0$ be arbitrary but fixed, and
$$
h_0={1\over 2} \min\{ \alpha^\prime-\alpha, \beta-\beta^\prime\}.
$$
Then for $0<h<h_0$, the difference quotient
$$
\xi(x,t)={u(x+h,t)-u(x,t)\over h}
$$
is a well-defined function on $\Omega_T=J^\prime\times [0,T]$. Further,
for $(x,t)\in \Omega_T$, $\xi(x,t)$
satisfies the following equation
\begin{eqnarray}\nonumber
{\partial \over \partial t}\xi(x,t)&=&
\int_I W(x,y) h^{-1}\left\{ D\left(u(y,t)-u(x+h,t)\right)-
D\left(u(y,t)-u(x,t)\right)\right\}dy\\
\label{xiode}
&+&\int_I D^h_x W(x,y) D\left(u(y,t)-u(x+h,t)\right) dy,
\end{eqnarray}
where
$$
D^h_x W(x,y)={W(x+h,y) -W(x,y)\over h}.
$$
By multiplying both sides of (\ref{xiode}) by $\xi(x,t)$ and
integrating both sides of the resultant equation over $J^\prime$ with respect
to $x$, we have
\begin{eqnarray}\nonumber
{1\over 2}\int_{J^\prime} {\partial \over \partial t}\xi(x,t)^2 dx &=&\int_{J^\prime \times I}
W(x,y) h^{-1}\left\{ D\left(u(y,t)-u(x+h,t)\right)-
D\left(u(y,t)-u(x,t)\right)\right\}\xi(x,t) dxdy\\
\nonumber
&+&\int_{J^\prime\times I}
D^h_x W(x,y) D\left(u(y,t)-u(x+h,t)\right) \xi(x,t) dxdy\\
\label{re-xiode}
&=:& T_1+T_2.
\end{eqnarray}
Using $\mathbf{u}\in C(0,T;L^\infty(I))$, Lipschitz continuity of $D(\cdot)$,
and the triangle inequality, we have
\begin{equation}\label{boundD}
\max_{t\in [0,T]} \esssup_{(x,y)\in I^2} |D(u(y,t)-u(x,t))|\le 2L
\|\mathbf{u}\|_{C(0,T; L^\infty(I))}=:C_2.
\end{equation}
Furthermore, using Fubini's theorem, (\ref{weak-assumption}), and
the standard results for the difference quotients
(see, e.g., Theorem~5.8.3~\cite{EvaPDE}), we have
\begin{equation}\label{boundDW}
\|D_x^h W\|_{L^2(J^\prime \times I)} \le \esssup_{y\in I}\|D_x^h W\|_{L^2(J^\prime)}
\le C_3 \esssup_{y\in I} \|{\partial\over \partial x} W(\cdot,y)\|_{L^2(J)} \le C_4,
\end{equation}
and, likewise,
\begin{equation}\label{bound-xi0}
\|{\boldsymbol\xi}(0)\|_{L^2(J^\prime)} \le C_5 \| \mathbf{u}(0)\|_{H^1(J)},
\end{equation}
where positive constants $C_4$ and $C_5$ are independent of $h\in(0,h_0)$.
Using (\ref{Lip}), we bound the first term on
the right hand side (\ref{re-xiode})
\begin{equation}\label{T1}
|T_1|\le \|W\|_{L^\infty(I^2)} \int_{J^\prime\times I} L \xi(x,t)^2 dxdy= L \|W\|_{L^\infty(I^2)}\|{\boldsymbol\xi}(t)\|_{L^2(J^\prime)}^2.
\end{equation}
For the second term, we use (\ref{boundD}), (\ref{boundDW}),
and the Cauchy-Schwarz inequality
\begin{eqnarray}\nonumber
|T_2| &\le & C_2 \int_{J^\prime\times I}
\left| D^h_x W {\boldsymbol\xi}(t) \right| dxdy
\le C_2 \|D_x^h W \|_{L^2(J^\prime \times I)}
\|{\boldsymbol\xi}(t)\|_{L^2(J^\prime)} \\
\label{T2}
&\le & C_2 C_4 \|{\boldsymbol\xi}(t)\|_{L^2(J^\prime)}.
\end{eqnarray}
By combining (\ref{re-xiode}), (\ref{T1}), and (\ref{T2}), we have
$$
{d\over dt} \|{\boldsymbol\xi}(t)\|^2_{L^2(J^\prime)} \le C_6\|{\boldsymbol\xi}(t)\|^2_{L^2(J^\prime)}+C_7, \;
C_6=2L \|W\|_{L^\infty(I^2)} +C_7, \; C_7=C_2 C_4,
$$
where inequality $2\|{\boldsymbol\xi}(t)\|_{L^2(J^\prime)} \le
\|{\boldsymbol\xi}(t)\|^2_{L^2(J^\prime)}+1$ was used.
Using Gronwall's inequality, we obtain
\begin{eqnarray}\nonumber
\|{\boldsymbol\xi}(t)\|^2_{L^2(J^\prime)} &\le&
\left(\|{\boldsymbol\xi}(0)\|^2_{L^2(J^\prime)} +{ C_7 \over C_6}\right)
\exp\{C_6T\}\\
\label{bound-xi}
&\le&
\left( C_5^2 \| \mathbf{u}(0)\|^2_{H^1(J)} +{ C_7 \over C_6}\right)
\exp\{ C_6T\}, \; t\in[0,T].
\end{eqnarray}
The last inequality yields a uniform in $h\in (0,h_0]$ bound on the difference quotient
$\|{\boldsymbol\xi}(t)\|_{L^2(J^\prime)}$.
Using the properties of the difference quotients
(cf. Theorem~5.8.3~\cite{EvaPDE}), we conclude that $\mathbf{u}(t)\in H^1(J^\prime)$
for all $t\in (0,T]$.\\
$\square$
\section{Networks on simple graphs}\label{sec.simple}
\setcounter{equation}{0}
In this and in the following sections, we prove that the solution
of the IVP for appropriately chosen continuous problem (\ref{nheat})
approximates the solutions of the discrete problems (\ref{dheat})
when $n$ is sufficiently large. We prove this result for two classes
of convergent graph sequences. In this section, we consider
the case of a sequence of simple graphs converging to a
$\{0,1\}$-valued graphon, and we study a more general case
of convergent sequences of weighted
graphs\footnote{For weighted graphs, one can also define
convergence by extending the notion of the homomorphism density
for this case (see \cite{LovSze06} for details). We do not discuss this
generalization here, because for the problems that we study in this
paper a simpler (and stronger) form of convergence, convergence
in $L^1-$norm, is sufficient (see Section~5).}
in the next section.
We single out networks on $\{0,1\}$-valued graphons for two reasons.
First, many coupled oscillator models fit into
this framework (see, e.g., \cite{WilStr06, GirHas12} and \S\ref{sec.half}).
Second, for this class of networks we can explicitly estimate the accuracy
of approximation of the solutions of the discrete models by those
of their continuum limits in terms of the network size and the geometry
of the graphon of the network (cf. Theorem~\ref{coloc-rate}).
This result is important, because it reveals the structural properties of the
graphs shaping the accuracy of the thermodynamic limit.
Let $W:I^2\to\{0,1\}$ be a symmetric measurable function.
We denote the support of $W$ by
$$
W^+=\{ (x,y)\in I^2:\; W(x,y)\neq 0\}
$$
and its boundary by $\partial W^+$.
For convenience, we rewrite the IVP for (\ref{nheat})
\begin{eqnarray}\label{sheat}
{\partial\over\partial t} u(t,x)&=&\int_I W(x,y) D\left( u(y,t)-u(x,t)\right) dy,\\
\label{sheat-ic}
u(x,0) &=& g(x).
\end{eqnarray}
Throughout this section, to simplify presentation we assume that
$g(x)$
is a step function.
Next, we define a sequence of discrete problems.
To this end, we fix $n\in{\mathbb N}$, divide $I$ into $n$ subintervals
\begin{equation}\label{partition}
I^{(n)}_1=\left[0, {1\over n}\right),\; I^{(n)}_2=\left[{1\over n},{2\over n}\right), \dots,
I_n^{(n)}=\left[ {n-1\over n},1 \right),
\end{equation}
and define a sequence of simple graphs $G_n=\langle V(G_n), E(G_n) \rangle$
such that
$V(G_n)=[n]$ and
$$
E(G_n)=\{(i,j)\in [n]^2:\; (I_i^{(n)}\times I_j^{(n)})\cap W^+\neq\emptyset \}.
$$
The IVP for the nonlinear heat equation on $\{G_n\}$, a discrete
counterpart of (\ref{sheat}),
is given by
\begin{eqnarray}\label{dsheat}
{d\over dt} u_i^{(n)}(t) &=& n^{-1} \sum_{j: (i,j)\in E(G_n)} D(u^{(n)}_j-u^{(n)}_i),\\
\label{dsheat-ic}
u^{(n)}_i(0)&=&g^{(n)}_i,\; i\in [n].
\end{eqnarray}
There are many ways of approximating $g(x)$ by $g_n(x)$. For concreteness,
we assign $g_i^{(n)}$ the average value of $g(x)$ on $I_i$:
\begin{equation}\label{mean-g}
g_i^{(n)}=n \int_{I^{(n)}_i} g(x) dx.
\end{equation}
To compare the solutions of the discrete and continuous models, it is
convenient to represent the discrete function $u^{(n)}=(u^{(n)}_1,
u^{(n)}_2,\dots, u^{(n)}_n)^\mathsf{T}$ as a step function on $I$ as follows
\begin{equation}\label{step-fun}
u_n(x,t) = u^{(n)}_i, \;\mbox{if} \; x\in I^{(n)}_i.
\end{equation}
Then $u_n(x,t)$ satisfies the following IVP
\begin{eqnarray}\label{coloc}
{\partial\over\partial t} u_n(t,x)&=&\int_I \hat W_n(x,y) D\left( u_n(y,t)-u_n(x,t)\right) dy,\\
\label{coloc-ic}
u(x,0) &=& g_n(x),
\end{eqnarray}
where
$$
g_n(x)=g_i^{(n)} \;\mbox{if}\; x\in I^{(n)}_i, i\in[n].
$$
and $\hat W_n (x,y)$ is the step function such that for $(x,y)\in
I^{(n)}_i\times I^{(n)}_j,$
$(i,j)\in [n]^2,$
\begin{equation}\label{hat-W}
\hat W_n (x,y)= \left\{\begin{array}{ll} 1, & \mbox{if}\; (I^{(n)}_i\times I^{(n)}_j)\cap W^+\neq\emptyset,\\
0, & \mbox{otherwise}.
\end{array}
\right.
\end{equation}
\begin{thm}\label{coloc-rate}
Let $\mathbf{u}$ and $\mathbf{u_n}$ denote the vector-valued functions
corresponding to the solutions
of (\ref{sheat}), (\ref{sheat-ic}), and (\ref{coloc})-(\ref{mean-g})
respectively. Denote
the upper box-counting
dimension of $\partial W^+$
by $2b=\overline{\dim}_B \partial W^+$
(cf. \S~3.1,\cite{Falc-Fractal}) and suppose
that $b\in [0.5, 1)$.
Then for any $\epsilon> 0$ and all sufficiently large $n$
\begin{equation}\label{rate}
\| \mathbf{u} -\mathbf{u_n}\|_{C(0,T;L^2(I))} \le C_1
n^{-(1-b-\epsilon)},
\end{equation}
where constant $C_1$ is independent of $n$.
\end{thm}
{\it Proof.\;}\;
Denote $\xi_n(x,t)= u_n(x,t)-u(x,t)$. By subtracting
(\ref{sheat}) from (\ref{coloc}), we have
\begin{eqnarray}\nonumber
{\partial \xi_n\over \partial t} &=& \int_I \hat W_n(x,y)\left\{ D\left(u_n(y,t)-u_n(x,t)\right)-
D\left(u(y,t)-u(x,t)\right)\right\} dy\\
&+&
\label{subtract1}
\int_I \left( \hat W_n(x,y)- W(x,y)\right) D\left(u(y,t)-u(x,t)\right)dy.
\end{eqnarray}
Next, we multiply both sides of (\ref{subtract1}) by $\xi_n(x,t)$ and
integrate over $I$
\begin{eqnarray}\nonumber
{1\over 2} \int_I {\partial\over \partial t} \xi_n(x,t)^2 dx &=& \int_{I^2} \hat W_n(x,y)\left\{ D\left(u_n(y,t)-u_n(x,t)\right)-
D\left(u(y,t)-u(x,t)\right)\right\} \xi_n(x,t) dxdy\\
&+&
\label{dxi2a}
\int_{I^2} \left(\hat W_n(x,y)-W(x,y)\right) D\left(u(y,t)-u(x,t)\right)\xi_n(x,t) dxdy.
\end{eqnarray}
Using the Lipschitz continuity of $D(\cdot)$, $\|\hat W\|_{L^\infty (I^2)}=1$,
the triangle inequality, and the Cauchy-Schwarz inequality,
we estimate the first term
on the right-hand side of (\ref{dxi2a})
\begin{eqnarray}\nonumber
\left|
\int_{I^2} \hat W_n(x,y)\left\{ D\left(u_n(y,t)-u_n(x,t)\right)-
D\left(u(y,t)-u(x,t)\right)\right\} \xi_n(x,t) dxdy
\right|
\\
\label{first}
\le L \int_{I\times I} \left|\left(\xi_n(y,t)-\xi_n(x,t)\right)
\xi_n(x,t)\right| dxdy \le
2L\|{\boldsymbol\xi}_n(t)\|_{L^2 (I^2)}^2.
\end{eqnarray}
We estimate
the second term on the right-hand side of (\ref{dxi2a}),
using the Cauchy-Schwarz inequality and the bound on $D(\cdot)$ (cf. (\ref{boundD}))
\begin{eqnarray}\nonumber
\left|\int_{I^2} \left( \hat W_n(x,y)-W (x,y)\right) D\left(u(y,t)-u(x,t)\right)\xi_n(x,t) dxdy\right|\\
\nonumber
\le \esssup_{(x,y,t)\in I^2\times [0,T]} \left| D\left(u(y,t)-u(x,t)\right)\right|
\left|\int_{I^2} \left( \hat W_n(x,y)-W (x,y)\right)\xi_n(x,t) dxdy\right|\\
\label{second}
\le C_2 \|W-\hat W_n\|_{L^2(I^2)} \|{\boldsymbol\xi}_n\|_{L^2(I)}
\end{eqnarray}
for some constant $C_2>0$ independent of $n$.
Using (\ref{first}) and (\ref{second}), from (\ref{dxi2a}) we have
\begin{equation}\label{insert-1}
{d\over dt} \|{\boldsymbol\xi}_n\|^2_{L^2(I)} \le 4L \|{\boldsymbol\xi}_n\|^2_{L^2(I)}+
2C_2 \|W-\hat W_n\|_{L^2(I^2)}\|{\boldsymbol\xi}_n\|_{L^2(I)}.
\end{equation}
Let $\varepsilon>0$ be arbitrary but fixed, and set
$$
\phi_\varepsilon(t)=\sqrt{\|{\boldsymbol\xi}_n\|^2_{L^2(I)}+\varepsilon}.
$$
By (\ref{insert-1}),
\begin{equation}\label{insert-2}
{d\over dt} \phi_\varepsilon(t)^2\le 4L \phi_\varepsilon(t)^2+
2C_2 \|W-\hat W_n\|_{L^2(I^2)}\| \phi_\varepsilon(t).
\end{equation}
Since $\phi_\varepsilon(t)$ is positive on $[0,T]$, from
(\ref{insert-2}), we have
$$
{d\over dt} \phi_\varepsilon(t)\le 2L \phi_\varepsilon(t)+
C_2 \|W-\hat W_n\|_{L^2(I^2)}, \; t\in [0,T].
$$
By Gronwall's inequality,
\begin{equation}\label{insert-3}
\sup_{t\in [0,T]} \phi_\varepsilon(t) \le \left( \phi_\varepsilon(0)
+{C_2\|W-\hat W_n\|_{L^2(I^2)}\over 2L}\right) \exp\{2LT\}.
\end{equation}
Since $\varepsilon>0$ is arbirtrary, (\ref{insert-3}) implies
\begin{equation}\label{Gron}
\sup_{t\in [0,T]}\|{\boldsymbol\xi}_n(t)\|_{L^2(I)}\le \left( \|\mathbf{g}-\mathbf{g}_n\|_{L^2(I)}
+{C_2\|W-\hat W_n\|_{L^2(I^2)}\over 2L}\right) \exp\{2LT\}.
\end{equation}
It remains to estimate $\|W-\hat W_n\|_{L^2(I^2)}$.
To this end, consider the set of discrete cells $I_i^{(n)} \times I_j^{(n)}$ that covers the boundary
of the support of $W$
$$
J(n)=\{(i,j)\in[n]^2:~ (I_i^{(n)}\times I_j^{(n)})\cap \partial W^+\neq\emptyset \} \;\mbox{and}\;
C(n)=\left|J(n)\right|.
$$
Using one of several equivalent definitions of the upper box-counting dimension of
a subset of ${\mathbb R}^n$, we have
$$
2b:=\overline{\dim}_B \partial W^+=
\overline{\lim_{\delta\to 0}} {\log N_\delta (\partial W^+)\over -\log\delta},
$$
where $N_\delta (\partial W^+)$ is the number of cells of a $(\delta\times\delta)$-mesh that intersect $\partial W^+$
(see Equation (3.12)(iv) in \cite{Falc-Fractal}).
Thus, for any $\epsilon>0$ and all sufficiently large $n$, we have
$$
C(n)\le n^{2(b+\epsilon)}.
$$
Since $W$ and $\hat W_n$ coincide on all cells $I_i^{(n)}\times I_j^{(n)}$ for which
$(i,j)\notin J(n)$, for any $\epsilon>0$ and all sufficiently large $n$, we have
\begin{equation}\label{estimW}
\|W-\hat W_n\|_{L^2(I^2)}^2=\int_{I^2} (W-\hat W_n)^2 dxdy \le C(n)n^{-2}\le n^{-2(1-b-\epsilon)}.
\end{equation}
Finally, from (\ref{mean-g}) it is easy to see that
\begin{equation}\label{numbered}
\|\mathbf{g}-\mathbf{g}_n\|_{L^2(I)}^2=O(n^{-1})
\end{equation}
The combination of (\ref{Gron}), (\ref{estimW}), and (\ref{numbered})
implies (\ref{rate}).\\
$\square$
\section{Networks on weighted graphs}\label{sec.weight}
\setcounter{equation}{0}
In this section, we study a more general case of the heat equation
on convergent sequences of weighted graphs. First, we define two
graph sequences generated by a given graphon $W$ and then we
prove the convergence of the corresponding discrete problems
to the continuum limit (\ref{sheat}).
Throughout this section, we assume that $W: I^2\to [-1,1]$ is a
symmetric measurable function. Let $\mathcal{P}_n$ denote the
partition of $I$ into $n$ intervals, $\mathcal{P}_n=\{I_i^{(n)}, i\in [n]\}$
(see (\ref{partition})) and
$$
X_n=\left\{{1\over n}, {2\over n}, \dots, {n\over n}\right\}.
$$
The quotient of $W$ and $\mathcal{P}_n$,
denoted $W/\mathcal{P}_n$, is the
complete graph on $n$ nodes
$$
W/\mathcal{P}_n =\langle [n], [n]\times [n], \bar W_n\rangle,
$$
such that weights $(\bar W_n)_{ij}$ are obtained by averaging
$W$ over the sets in $\mathcal{P}_n$
\begin{equation}\label{average}
(\bar W_n)_{ij} =n^2 \int_{I_i \times I_j} W(x,y) dxdy.
\end{equation}
The second sequence of weighted graphs is obtained in a way that
is similar to the construction of $W$-random graph (cf. \cite{LovSze06})
\begin{equation}\label{graphH}
\mathbb{H}(S_n,W)=\langle [n], [n]\times [n], \tilde W_n\rangle, \;
(\tilde W_n)_{ij}=W\left({i\over n}, {j\over n}\right).
\end{equation}
In the remainder of this section, we prove convergence of the nonlinear
heat equations on $W/\mathcal{P}_n$ and $\mathbb{H}(S_n,W)$ to the continuum
equation on the graphon $W$ (cf.~(\ref{sheat})). Furthermore, we show that
the former problems correspond to the discretizations of (\ref{sheat})
using the method of Galerkin and the collocation method respectively,
thus, relating the problem of justification of the thermodynamic
limit for dynamical networks to two well-known numerical schemes for
equations of mathematical physics.
We first consider the IVP for the heat equation on $W/\mathcal{P}_n$
\begin{eqnarray}\label{ode}
{d\over dt} u_i^{(n)} (t) &=& n^{-1}\sum_{j=1}^n (\bar W_n)_{ij} D\left( u^{(n)}_j(t)-u^{(n)}_i (t) \right), \\
\label{ode-ic}
u_i^{(n)}(0)&=&g^{(n)}_i, \; i\in [n],
\end{eqnarray}
where
$g^{(n)}_i$ is defined in (\ref{mean-g}).
By associating the step function $u_n(x,t)$ with $u^{(n)}(t)$ (see (\ref{step-fun})), we rewrite
(\ref{ode}) and (\ref{ode-ic}) as
\begin{eqnarray}\label{finite-int}
{\partial \over \partial t} u_n(x,t) &=&\int_{I} W_n(x,y) D\left(u_n(y,t) -u_n(x,t)\right)dy,\\
\label{finite-int-ic}
u_n(x,0) &=& g_n(x),
\end{eqnarray}
where $W_n$ and $g_n$ are the step functions
\begin{eqnarray*}
W_n(x,y) &=& \bar{W}_{ij} \;\mbox{for} \; (x,y)\in I^{(n)}_i\times I^{(n)}_j,\\
g_n(x) &=& g^{(n)}_i, \;\mbox{for} \; x\in I^{(n)}_i.
\end{eqnarray*}
\begin{rem}\label{rem.Galerkin}
It is instructive to note that (\ref{ode}) and
(\ref{ode-ic}) can be viewed as the Galerkin
approximation of the IVP (\ref{sheat}) and (\ref{sheat-ic}).
Indeed, let $H_n$ denote a finite-dimensional subspace of $L^2(I)$
$$
H_n=\mbox{span}\{\mathbf{\phi}_1, \mathbf{\phi}_2,\dots, \mathbf{\phi}_n\},
$$
where $\mathbf{\phi}_i=\chi_{I^{(n)}_i}$ is the characteristic function of
$I_i^{(n)}=[(i-1)n^{-1}, in^{-1})$.
Replacing $u(x,t)$ in (\ref{sheat}) with
$$
u_n(x,t)=\sum_{k=1}^n u^{(n)}_k(t) \phi_k (x)\in H_n
$$
and projecting the resultant equation on $H_n$, we arrive at (\ref{ode}).
\end{rem}
\begin{thm}\label{Galerkin-convergence}
Let $\mathbf{u}$ and $\mathbf{u_n}$ be the solutions
of (\ref{sheat}), (\ref{sheat-ic}), and (\ref{finite-int}), (\ref{finite-int-ic}),
respectively. Suppose $W\in L^\infty(I^2)$ and $\mathbf{g}\in L^\infty (I)$.
Then
\begin{equation}\label{converge}
\| \mathbf{u} -\mathbf{u}_n\|_{C(0,T;L^2(I))} \to 0\;\mbox{as}\; n\to \infty.
\end{equation}
\end{thm}
{\it Proof.\;}\; By following the lines of the proof of Theorem~\ref{coloc-rate}
(see (\ref{Gron})),
for $\xi_n(x,t)=u_n(x,t)-u(x,t)$
we obtain
\begin{equation}\label{Gron2}
\sup_{t\in [0,T]}\|{\boldsymbol\xi}_n(t)\|_{L^2(I)}\le \left( \|\mathbf{g}-\mathbf{g}_n\|^2_{L^2(I)}
+{C_1\|W-W_n\|_{L^2(I^2)}\over C_2}\right) \exp\{C_2 T\},
\end{equation}
where positive constants $C_1$ and $C_2$ are independent of $n$.
By the Lebesgue differentiation theorem,
$$
W_n\to W \;\mbox{and} \; \mathbf{g}_n\to \mathbf{g},\;\mbox{as}\; n\to\infty,
$$
almost everywhere on $I^2$ and $I$ respectively. Thus, the statement of the theorem follows
from (\ref{Gron2}).\\
$\square$
The heat equation on $\mathbb{H}(X_n,W)$ is analyzed in complete analogy to
the IVP for $W/\mathcal{P}_n$. The IVP in this case remains (\ref{finite-int})
and (\ref{finite-int-ic}) modulo the definition of the step function
\begin{equation}\label{Wntilde}
W_n(x,y) = \tilde{W}_{ij} \;\mbox{for} \; (x,y)\in I^{(n)}_i\times I^{(n)}_j.
\end{equation}
We assume that $W(x,y)$ is a bounded symmetric measurable
function that is almost everywhere continuous on $I^2$. Then using
the observation in Lemma 2.5~\cite{BorChay11},
$$
W_n(x,y) \to W(x,y),\;\mbox{as}\; n\to\infty
$$
at every point of continuity of $W$, i.e., almost everywhere.
Thus, by the dominated convergence theorem, we have
$$
\|W-W_n\|_{L^2(I^2)}\to 0\;\mbox{as}\; n\to\infty.
$$
With this observation, the proof of Theorem~\ref{Galerkin-convergence}
applies to the situation at hand. Thus, we have the following theorem.
\begin{thm}\label{collocation-convergence}
Let $\mathbf{u}$ and $\mathbf{u_n}$ be the solutions
of (\ref{sheat}), (\ref{sheat-ic}), and (\ref{finite-int}), (\ref{Wntilde}),
(\ref{finite-int-ic}),
respectively. Suppose $W\in L^\infty(I^2)$, $\mathbf{g}\in L^\infty (I)$,
and $W$ is continuous almost everywhere on $I^2$.
Then
\begin{equation}\label{converge}
\| \mathbf{u} -\mathbf{u}_n\|_{C(0,T;L^2(I))} \to 0\;\mbox{as}\; n\to \infty.
\end{equation}
\end{thm}
\section{Examples} \label{sec.examples}
\setcounter{equation}{0}
In this section, we illustrate the results of this paper
with several examples. First, we apply Theorem~\ref{thm.reg} to explain
the regions of continuity in the chimera states \cite{KurBat02}. Next, we discuss the
attractors of the system of Kuramoto oscillators on multipartite
graphs.
\begin{figure}
\begin{center}
{\bf a}\;\includegraphics[height=1.8in,width=2.0in]{chimera-ic.pdf}\qquad
{\bf b}\;\includegraphics[height=1.8in,width=2.0in]{chimera-state-a.pdf}
\end{center}
\caption{
a) The initial conditions (\ref{chimera-ic}) for the chimera state shown in b).
b) A snapshot of the chimera state generated by (\ref{chimera}).
}
\label{f.2}
\end{figure}
\subsection{Regions of continuity of chimera states} \label{sec.chimera}
Chimera states are persistent patterns of coexisting regions of
spatially coherent and chaotic behaviors (see Fig.~\ref{f.2}b).
They were discovered by Kuramoto and Battogtokh
in the following continuum limit of a system of coupled phase oscillators \cite{KurBat02}
\begin{equation}\label{chimera}
{\partial\over \partial t} \phi(x,t) = \omega +
\int_0^1 G(x-y) \sin\left(\phi(y,t)-\phi(x,t)+\alpha\right) dy.
\end{equation}
Function $\phi:~[0,1]\times{\mathbb R}^+\to {\mathbb S}^1:={\mathbb R}/2\pi{\mathbb Z}$ describes the evolution of the
phase of oscillator at $x\in [0,1]$. The exponential kernel $G(x)=\exp\{-\kappa|x|\}$
provides nonlocal coupling
between oscillators. Equation (\ref{chimera}) was obtained using the phase reduction from the
Ginzburg-Landau equation, which describes collective dynamics of nonlocally coupled limit cycle
oscillators (cf. \cite{KurBat02}). The sequences of discrete problems converging to (\ref{chimera})
can be obtained using one of the schemes of Section~\ref{sec.weight}.
The Kuramoto-Battogtokh model was the first example of a system featuring robust patterns that combine
coherent and irregular dynamics. Since then chimera states were demonstrated in a variety
of computational and experimental settings \cite{LaiKar12, Show12,LarPen13}.
The precise mathematical mechanism underlying
these patterns is the subject of ongoing research \cite{Ome13}. Here,
we focus on one aspect of the chimera states: the regions of
continuity.
Specifically, we use Theorem~\ref{thm.reg} to explain why the synchronous
dynamics is restricted to the two subdomains of $I$ (see Fig.~\ref{f.2}a). We show that
this possible because of the lack of the smoothening property of the
heat equation on graph limits, which is one important
distinction from the classical heat equation.
The numerical generation of the chimera states in (\ref{chimera}) requires a careful setup,
which we review next.
To trigger a chimera state one has to start with the appropriate initial
conditions, otherwise oscillators end up evolving
in phase. Abrams and Strogatz reported that they were unable to generate chimera
states in (\ref{chimera}) from smooth initial conditions \cite{AbrStr06}. Instead, one has to initialize the
system with the initial condition that combines
the regions of coherent and incoherent spatial profiles.
The following initial condition was suggested by Kuramoto (cf. \cite{AbrStr06}):
\begin{equation}\label{chimera-ic}
\phi(x_i,0)=h(x_i) r_i, \; \mbox{where} \;
h(x)=6\exp\left\{ -30\left(x_i -(1/2)\right)^2\right\},\; x_i=in^{-1},\; i\in [n],
\end{equation}
and $r_i$ are independent random variables drawn from the uniform distribution on $(-1/2, 1/2)$
(see Fig.~\ref{f.2}a).
The values of the other parameters are $\kappa=4$, $\alpha=1.457$ (cf. \cite{AbrStr06}).
Numerical integration of
(\ref{chimera}) and (\ref{chimera-ic}) with these parameter values yields
persistent patterns with coexisting regions of spatially coherent and chaotic dynamics.
A representative snapshot is shown in Fig.~\ref{f.2}b.
Theorem~\ref{thm.reg} explains the role of the initial conditions in generating chimera
states.
Note that function $h(x)$ in (\ref{chimera-ic}) is rapidly decaying to $0$ outside a
neighborhood of $1/2$ . Therefore, the initial conditions
in the intervals $J_1=(0,0.2)$ and $J_2=(0.8, 1)$ near the endpoints of the interval $[0,1]$
for all practical purposes can be viewed if they were produced by discretization of a function
that is smooth over $J_1$ and $J_2$ (see Fig.~\ref{f.2}b). For such initial conditions,
Theorem~\ref{thm.reg} implies that the
solution $\phi(x,t)$ will remain continuous on $J_1$ and $J_2$, because $H^1(J_{1,2})\subset C(J_{1,2})$
by the Sobolev Embedding Theorem \cite{EvaPDE}. This explains why the spatial profile remains coherent over
$J_1$ and $J_2$ for positive times (see Fig.~\ref{f.2}a).
Theorem~\ref{thm.reg} also implies that it is impossible to generate chimera states starting
from smooth initial data, because for such data the solution of the continuum limit remains
continuous over the entire domain for all $t>0$. This rules out regions of chaotic
behavior in large networks, because their solutions remain close to that of the continuous
system by Theorem~\ref{Galerkin-convergence} or Theorem~\ref{collocation-convergence}.
This explains failed attempts to produce chimera states from smooth
initial conditions in \cite{AbrStr06}.
\subsection{The Kuramoto equation on multipartite graphs}\label{sec.half}
To illustrate our results for networks on simple graphs (see Section~\ref{sec.simple}),
we discuss the Kuramoto equation on multipartite graphs.
The examples of this subsection illustrate another implication of the
lack of smoothening property of the heat equation on graph limits. This time
we show that the lack of smoothness of the limiting graphon may result in stable
discontinuous patterns.
Consider the Kuramoto equation on the sequence of bipartite complete graphs
\begin{equation}\label{halfK}
\dot u_i^{(n)} (t) ={ (-1)^\sigma\over n} \sum_{j:~(j,i)\in E(K_{n,n})}
\sin\left( u_j^{(n)} (t)-u_i^{(n)} (t)\right),\quad i\in [2n],
\end{equation}
where
$$
K_{n,n}=\langle [2n], E(K_{n,n})\rangle,\;\;\mbox{and}\;\;
E(K_{n,n})=\{ (i,j)\in [n]^2:~ 1\le i\le n< j\le 2n\}.
$$
The sequence $\{K_{n,n}\}$ is convergent with the limit shown in Fig.~\ref{f.3}a.
We consider two models for $\sigma=0$
and $\sigma=1$.
As shown below, the space homogeneous (synchronous) solution is stable
for the $\sigma=0$ model and is unstable if $\sigma=1$.
\begin{figure}
\begin{center}
{\bf a}\includegraphics[height=1.8in,width=2.0in]{f3a.pdf}
{\bf b}\includegraphics[height=1.8in,width=2.0in]{half-sync.pdf}
{\bf c}\includegraphics[height=1.8in,width=2.0in]{half-step.pdf}
\end{center}
\caption{
\textbf{a})The plot of support of $W_{K_{n,n}}$.
\textbf{b,c}) Solutions of the IVP problem for the Kuramoto equation
on the bipartite complete graphs converge to the
synchronous solution for $\sigma=0$ ({\bf b}) and to the step function for
$\sigma=1$ ({\bf c}).
}
\label{f.3}
\end{figure}
Along with (\ref{halfK}) we consider its continuum limit
\begin{equation}\label{halfC}
{\partial \over \partial t} u(x,t) = (-1)^\sigma \int_I K(x,y) \sin\left( u(y,t)-u(x,t)\right) dy,
\end{equation}
where graphon $K\in\mathcal{W}_0$ is the limit of $\{K_{n,n}\}$ (see Fig.~\ref{f.3}a).
Suppose $u(x,0)\in C(I)$. By Theorem~\ref{thm.reg}, for any $t>0$,
$u(x,t)\in \tilde C(I)$ where
$$
\tilde C(I)=\{ u\in L^\infty(I):~ \mbox{for any open interval}~ J\subset (0,1/2)
\cup (1/2,1)~ u\left|_J\right.\in C(J)\}.
$$
Here, by $u\left|_J\right.$ we denote the restriction of $u$ to $J$.
We look for steady state solutions of (\ref{halfC}) that belong to $\tilde C(I)$.
Setting the right hand side of (\ref{halfC}) to $0$, we obtain
\begin{eqnarray}\label{H1}
\int_{1/2}^1 \sin\left( u(y,t)-u(x,t)\right) dy=0, & x\in (0,1/2),\\
\label{H2}
\int_0^{1/2} \sin\left( u(y,t)-u(x,t)\right) dy=0, & x\in (1/2, 1).
\end{eqnarray}
From (\ref{H1}) and (\ref{H2}), we find that the only piecewise constant steady state solutions
from $\tilde C(I)$ are the space homogeneous
function
$$
u^{h}(x)=c,\;\mbox{for}\; x\in[0,1],
$$
and the step function
$$
u^s(x)=\left\{ \begin{array}{cc} c_1, & x\in [0,1/2),\\
c_2, & x\in [1/2,1],
\end{array}
\right.
$$
where constants $c,c_1,c_2\in {\mathbb S}^1$ and $|c_2-c_1|=\pi$.
Next, we turn to the discrete model (\ref{halfK}). The discrete counterparts of
$u^{s}(x,t)$ and $u^{h}(x,t)$ are
$$
u^s=c\mathbf{1}_{2n}\in {\mathbb R}^{2n} \;\mbox{and}\;
u^h=(c_1\mathbf{1}_{n}^\mathsf{T}, c_2\mathbf{1}_{n}^\mathsf{T})\in {\mathbb R}^{2n},
$$
where $\mathbf{1}_{n}=(1,1,\dots,1)^\mathsf{T}\in{\mathbb R}^n$.
The linearization of (\ref{halfK}) about $u=u^h$ yields
\begin{equation}\label{lin-half}
\dot \xi = { (-1)^{\sigma+1}\over n} \mathbf{L} \xi.
\end{equation}
Matrix $\mathbf{L}$ is the Laplacian of $K_{n,n}$
\begin{equation}\label{matrixL}
\mathbf{L}=\begin{pmatrix} nI_n & -J_n\\ -J_n & nI_n \end{pmatrix}, \;
\end{equation}
where $I_n$ is the $n\times n$ identity matrix and $J_n=\mathbf{1}_n \mathbf{1}_n^\mathsf{T}$.
As a graph Laplacian of an undirected connected graph, $\mathbf{L}$ is a symmetric
positive semi-definite matrix with a simple eigenvalue $0$ \cite{Fied73}.
Thus, the space homogeneous solution $u^h$ is stable
for $\sigma=0$ and is unstable when $\sigma=1$.\footnote{The simple
zero eigenvalue in the spectrum of the linearized problem reflects
the translational invariance of (\ref{halfK}), which does not affect the
stability.}
The linearization of (\ref{halfK}) about $u^s$ yields
$$
\dot \xi ={(-1)^{\sigma+1}\over n}\mathbf{L}\xi,
$$
which, up to a sign, coincides with (\ref{lin-half}).
Thus, $u^s$ is unstable if $\sigma=0$ and is stable for $\sigma=1$.
The discrete model (\ref{halfK}) has many other piecewise constant
steady state solutions besides $u^h$ and $u^s$. But the latter are the only two
that approximate functions in $\tilde C(I)$ and, therefore, only these solutions
can be attractors of the discrete system for large $n$ (cf. Theorem~\ref{thm.reg}).
This is consistent with the numerical simulations shown in Fig.~\ref{f.3}b,c.
Numerical experiments show that the synchronous state is the attractor for
the Kuramoto model with $\sigma=0$, while the step function is the attractor
for the model with $\sigma=1$ (see Fig.~\ref{f.3}b,c).
\begin{rem}\label{rem.half}
The Kuramoto model on the family of half-graphs (cf.~Example~\ref{ex.half})
also exhibits exhibits stable step-like patterns, whose
analysis follows the lines of that for the complete bipartite graphs.
\end{rem}
\begin{figure}
\begin{center}
{\bf a}\includegraphics[height=1.8in,width=2.0in]{multipartite-matrix.pdf}\qquad
{\bf b}\includegraphics[height=1.8in,width=2.0in]{multipartite-trace.pdf}
\end{center}
\caption{
{\bf a}) The block structure of $A(G_{nm})$. {\bf b}) A stable multistep pattern
generated by the Kuramoto model on a multipartite graph.
}
\label{f.last}
\end{figure}
In conclusion, we briefly discuss how the Kuramoto model on $\{K_{n,n}\}$ can be generalized
to produce stable patterns with arbitrary number of steps.
To this end, let $C_n=\langle V(C_n), E(G_n)\rangle$ be an $n$-cycle, i.e.,
$V(C_n)=[n]$ and $E(C_n)=\{(i,j)\in [n]^2:~ \mbox{dist}(i,j)=1\}$.
Recall $\mbox{dist}(i,j):=\min\{ |i-j|, n-|i-j|\}$.
The adjacency matrix of $C_n$ is given by
\begin{equation}\label{A(C)}
A(C_n)=\begin{pmatrix} 0 & 1& 0 & \dots & 0&1\\ 1 &0 & 1 & \dots
&0 &0\\ & & & \dots & &\\ 1 & 0& 0 & \dots & 1& 0 \end{pmatrix}.
\end{equation}
Let $K_m$ denote the complete graph on $m$ nodes. Define graph
$C_{n,m}=C_n\otimes K_m$
on $nm$ nodes by replacing each node of $C_n$ with a copy of the complete graph
$K_m$. The adjacency matrix of the resultant graph
is the Kronecker product of $A(C_n)$ and $A(K_m)$
$$
A(C_{n,m})=A(C_n)\otimes A(K_m).
$$
The block structure of $A(C_{n,m})$ is shown in Fig.~\ref{f.last}a.
The Kuramoto model (\ref{halfK}) with $K_{n,n}$ replaced by $C_{n,m}$
generates stable patterns with $n$ steps like those shown in
Fig.~\ref{f.last}b. In computational neuroscience, such patterns
have been sought in the context of modeling memory.
The stability analysis of these multistep patterns, which can be done in analogy
to the analysis in this subsection, will be
presented elswhere.
\section{Conclusion}\label{sec.conclusion}
\setcounter{section}{0}
The heat equation is a fundamental equation of mathematical
physics. On Euclidean domains, the heat operator is used to model
phenomena involving diffusion, propagation, and pattern formation
in diverse problems of physics and biology. On Riemannian manifolds, the
heat equation has been a powerful tool for studying the topology of the underlying
manifold \cite{Rosenberg-Laplacian}. Its discrete counterpart, the heat equation
on graphs plays an important role in the spectral graph theory \cite{Chung-Spectral}.
Motivated by the dynamics large networks, in this paper
we have studied the nonlinear heat equation on dense graphs.
We identified two classes of convergent graph sequences, for which the dynamics
of large coupled networks is approximated by the heat equation on the graph
limit. The latter is a nonlinear evolution equation with an integral operator
that describes nonlocal spatial interactions. The nonlocal heat equation differs
from its partial differential equation counterpart in several respects. First,
the IVP for the heat equation on a graph limit is well-posed in both forward
and backward time. Second, the solutions of the IVPs for the nonlocal heat equation
lack the smoothening property, i.e., the spatial regularity of solutions for positive times
is determined by the initial data and the regularity of the graph limit. In particular,
the heat equation on a graph limit can have attractors that are piecewise continuous
in space (see Subsection~\ref{sec.half}), or combine regions with qualitatively
distict dynamics like in chimera states (see Subsection~\ref{sec.chimera}).
Our analysis highlights the properties of the convergent graph sequences that
are necessary for supporting the continuum limit for coupled dynamical systems.
Note that for convergent sequences of simple graphs analyzed in Section~\ref{sec.simple},
we require that the graph limit is a $\{0,1\}$-valued graphon. For such sequences,
we are able to represent the discrete problems using the step functions $\{\hat W_{G_n}\}$
(cf.~(\ref{hat-W})), which are convergent in the $L^1$-norm. This construction does
not work for an arbitrary sequence of simple graphs. For instance, a sequence
of Paley graphs converges to the constant graphon equal to $1/2$,
$\mbox{Const}~(1/2)$ \cite{BorChay08}.
However, the corresponding
continuum limit (\ref{sheat}) does not approximate the dynamics of the discrete problems.
On the other hand, the analysis in \cite{Med13a} shows that the heat equation on
the sequence of the Erd\H{o}s-R'{e}nyi graphs (which is also a sequence of
simple albeit random graphs converging to $\mbox{Const}(1/2)$) has a well-defined
continuum limit. In contrast to the present work, the analysis of the continuum limit in
\cite{Med13a} does not rely on the $L^1$-norm for graphons, but effectively uses
the cut-norm.
Our results for networks on convergent sequences of simple graphs also reveal
what properties of graphs affect the accuracy of the continuum limit.
Specficially, the rate of convergence estimate in Theorem~\ref{coloc-rate} shows that
the accuracy of approximation of the solutions of the discrete problems by their
continuous counterparts depends on the regularity of the boundary of support
of the graph limit. In particular, the convergence may slow down
significantly if the Hausdorff dimension of the boundary is close to $2$. It is interesting to compare this
result with the rate of convergence estimate for random networks in \cite{Med13a}.
For random networks, the rate is determined by the Central Limit Theorem and
is independent of the regularity of the underlying graphon.
The theory of graph limits provides a useful set of tools for studying dynamics
of large networks \cite{LovGraphLim12}. On one hand, known graph limits for various
convergent sequences like that of half graphs or Erd\H{o}s-R\'{e}nyi graphs
suggest continuum limits for the corresponding networks.
On the other hand, this rich theory offers many useful ideas and analytical
results that can be applied to the analysis dynamical networks. In this paper, we analyzed
two families of networks on convergent sequences of deterministic graphs.
In \cite{Med13a, Med13b} a similar approach is used to study networks on convergent
sequences of random graphs. Therefore, the results of this paper and in
\cite{Med13a} justify the continuum limit for a broad class of networks.
\vskip 0.2cm
\noindent
{\bf Acknowledgements.} The author thanks A.~Grinshpan and
D.~Kaliuzhnyi-Verbovetskyi for useful discussions and
valuable comments on the manuscript. This work was supported in part by
the NSF grant DMS 1109367.
\vfill\newpage
\bibliographystyle{amsplain}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{%
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
|
1,314,259,994,968 | arxiv | \section{Introduction}
After the discovery of accelerated expansion of the Universe \cite{Riess,Perlmutter},
one of the attempts to explain this mysterious
phenomenon is an introduction of a scalar field which is a dynamical field rolling down on a potential.
The model is called a quintessence model \cite{Caldwell}. This can be a solution to the dark energy problem
by adding a new degree of freedom to the Universe. In addition to the dark energy problem,
another mystery is the so-called coincidence problem, which
is why the amounts of dark energy and
matter fluid
(including cold dark matter) are in the same order of magnitude \cite{Ade}. This problem indicates that there might be
some interaction between them. Thus, the idea gives a new model called a coupled quintessence model \cite{Amendola}.
This model contains the original quintessence mechanism but
also gives a new solution called a scaling solution.
This scaling solution also provides not only the accelerated expansion of the universe but also
the density parameter of matter fluid does not vanish.
Since this scaling solution is an attractor, it will be realised naturally at the late time.
As a result, this is one of the possible ways to solve the coincidence problem at the same time
with the dark energy problem.
However, the scaling solution is difficult to be realised
because it
requires a large coupling constant \cite{Amendola,DEbook}.
In addition, there is another problem to introduce a universal scalar field.
Since a scalar field couples to matter fluid, this leads to a new interaction force
between them, the so-called fifth force \cite{STbook},
which has not been detected until now \cite{constraints}.
In order to preserve a scalar field model coupled to matter fluid,
there must be some screening mechanism to hide a new force
from the observations on the ground and solar-system experiments.
The screening mechanism means that the fifth force is
suppressed comparing to Newtonian force in a highly dense region or close to a massive source,
whereas it recovers in a low density region or far from a gravitating source.
Namely, we recover general relativity (GR) or Newtonian gravity
at short distance from a massive source or in a highly dense region
just as in an astrophysical scale.
Three groups of the screening mechanisms have so far been proposed.
The first group is the screening by a scalar field $\phi$ or its effective potential, which
consists of the chameleon mechanism \cite{chameleon1,chameleon2,chameleon3},
the symmetron mechanism \cite{symmetron1,symmetron2},
and the dilaton (Damour-Polyakov) mechanism \cite{dilaton1,dilaton2}.
In the chameleon mechanism, mass of a scalar field depends
on matter density, then a scalar field gets a large mass in a high density region such as on the Earth.
This leads to a short range interaction of the fifth force.
While in the symmetron mechanism or in the dilaton mechanism,
the coupling parameter between a scalar field
and matter fluid depends on the minimum of the effective potential.
In a high density region, for example symmetron mechanism, the symmetry has not broken. Then the minimum of the effective potential is at zero value. As a result, the coupling parameter is equal to zero. Herewith,
the scalar field decouples from matter fluid in highly density region.
The second group is the screening by the first-derivative of a scalar field, $\partial \phi$, or the kinetic term of a scalar field,
which includes the D-BIonic screening \cite{Burrage}, the kinetic screening $P(X)$ \cite{kinetic1,kinetic2}, and
the k-Mouflage mechanism \cite{Babichev}. In this group,
the screening mechanism works by domination of some non-linear term in the equation of motion of
the scalar field.
Since the equation of motion consists of not only the linear term, which leads to the inverse-square ($r^{-2}$) fifth force, but also
the non-linear term, which leads to a different form of the force,
there exists some typical distance below which the non-linear term
dominates, whereas at larger distance from the source, the linear term becomes dominant.
The fifth force is then screened at short distance from the source.
This is analogous to the Vainshtein mechanism.
The last group is the screening by the second-derivative of a scalar field, $\partial \partial \phi$,
or the so-called Vainshtein mechanism \cite{Vainshtein}. This mechanism is found in many models,
for example, the Galileon gravity \cite{galileon}, the Horndeski theory \cite{Horndeski, Deffayet:2011gz,Kobayashi:2011nu}, and also the massive gravity \cite{massive1,massive2}.
In these models, the Vainshtein mechanism works
in the similar way as we mentioned, namely, there exists some typical distance called the Vainshtein radius,
below which the non-linear term
is dominant. As a result, GR recovers at a short distance.
Since a screening mechanism is important when we have a scalar field,
in order to explain the coincidence problem as well as the dark energy problem,
we study cosmological behaviour of a coupled quintessence model, in which
a screening mechanism works.
In this paper, we focus
on the D-BIonic screening mechanism.
This may have another advantage in realisation of a scaling solution because
there exists non-canonical kinetic term which changes the dynamics of the scalar field.
It is interesting whether we find a scaling solution which satisfies the observational
constraints and becomes an attractor or not.
The D-BIonic
theory can reduce to a coupled quintessence model under non-relativistic limit of the Lorentz factor
(we will see clearly in the next section). This is the same as the DBI
theory
considered as a generalised quintessence model.
In the DBI
theory, we find the accelerating universe even though the Lorentz factor is much larger than unity \cite{Martin:2008xw,Copeland}
that is why we call it generalised quintessence.
Here we will analyse unifiedly both D-BIonic and DBI
theories because those can be described
in the similar forms.
In Sec. \ref{basiceq} we show the basic equations for this work.
In Sec. \ref{analytic} we find
analytic solutions corresponding to two solutions in a coupled scaling quintessence:
One case such that the potential term dominates and the other case both potential and matter density terms do contribute in the dynamics.
We show stability analysis of these solutions in Sec. \ref{stability}, and comparing to the observational data in Sec. \ref{observations}.
Finally, Sec. \ref{conclusions} is devoted to conclusions and remarks.
\begin{widetext}
\vskip 1cm
\section{Basic equations in D-BIonic and DBI
theories}
\label{basiceq}
\subsection{Field Equations in D-BIonic and DBI
theories}
We consider the following action
\begin{align}
S = \int d^4 x \sqrt{-g} \left[\frac{1}{2}R - \frac{1}{f(\phi)} \sqrt{1+ f(\phi) (\partial \phi)^2}
+ \frac{1}{f(\phi)} - V(\phi)\right] + \int d^4 x \mathcal{L}_m (A^2 (\phi) g_{\mu\nu}, \psi_m) \,, \label{action}
\end{align}
where a scalar field, $\phi$, couples conformally to matter fluid, $\psi_m$, with a conformal factor $A(\phi)$.
$f(\phi)$ and $V(\phi)$ are an inverse D3-brane-like tension and a potential, respectively.
We will use the units of $\kappa^2 = 8\pi G=1$.
We use the word ``like" here because the DBI theory is in the Jordan frame in which the scalar field does not couple to matter.
Therefore, the action we are considering here is just an action contained non-canonical kinetic term or the DBI-like action.
Varying the action (\ref{action}) with respect to the metric and the scalar field,
we obtain the field equations as follows:
\begin{align}
&
R_{\mu\nu} - \frac{1}{2} g_{\mu\nu} R =
T^{\rm (m)}_{\mu\nu} + T^{(\phi)}_{\mu\nu}
\,, \label{fieldeq}
\\
&
\square \phi + \frac{f_{,\phi}}{f^2}\left[1 + f (\partial\phi)^2\right] - \frac{f_{,\phi}}{2f}(\partial\phi)^2
- \frac{1}{2[1 + f (\partial\phi)^2]}\left[
f_{,\phi}(\partial\phi)^4 + f \nabla_{\mu} (\partial\phi)^2 \nabla^{\mu} \phi\right]
\nonumber \\
&
~~~~
- \left(\frac{f_{,\phi}}{f^2} + V_{,\phi}\right)\sqrt{1 + f (\partial\phi)^2}
= - \frac{A_{,\phi}}{A} T^{\rm (m)} \sqrt{1 + f (\partial\phi)^2} \,, \label{eom1}
\end{align}
where the symbol $_{,\phi} \equiv d/d\phi$ and $T^{\rm (m)}=T^{\rm (m)\mu}_{~~~\mu}$. The energy-momentum tensor of the scalar field is given by
\begin{equation}
T^{(\phi)}_{\mu\nu} \equiv \frac{\partial_{\mu}\phi \partial_{\nu}\phi}{\sqrt{1+ f (\partial\phi)^2}} - g_{\mu\nu}
\left[f^{-1}\sqrt{1+ f (\partial\phi)^2} - f^{-1} + V\right]
\,. \label{Tmunu}
\end{equation}
\end{widetext}
It gives the DBI
theory for $f(\phi) > 0$,
while when $f(\phi) < 0$, it yields D-BIonic
theory.
We assume that the conformal factor is given by the exponential form:
\begin{equation*}
A(\phi) = e^{g \phi} \,,
\end{equation*}
where $g$ is a coupling constant.
According to the original D-BIonic
theory \cite{Burrage},
the inverse D3-brane-like tension is a negative constant, i.e.,
$f(\phi) = -\Lambda^{-4}$, where $\Lambda$ is a characteristic mass scale,
thus $f_{,\phi} =0$. The equation for the scalar field is simplified as
\begin{equation}
\nabla_{\mu} \left(\frac{\nabla^{\mu}\phi}{\sqrt{1 -\Lambda^{-4} (\partial\phi)^2}}\right)- V_{,\phi}= - g T^{\rm (m)}.
\label{eom2}
\end{equation}
This is the same equation of motion as Eq.~(\ref{eom0}) except the potential term. The potential is necessary for studying cosmology as we will see in Sec. \ref{analytic}
Eq.~(\ref{eom2}) obviously consists of a linear term and a non-linear term, then there must be a characteristic radius
analogous to the Vainshtein radius.
Below this radius, we find a screening mechanism, which is called D-BIonic screening \cite{Burrage}.
\subsection{Basic Equations for Coupled D-BIonic and DBI Cosmology}
\label{basic_equations}
In order to study the evolution of the Universe,
we assume that the scalar field is homogeneous, namely $\phi =\phi(t)$
and the spacetime is
described by the flat Friedmann-Lemaitre-Robertson-Walker (FLRW) metric:
\begin{equation*}
ds^2 = - dt^2 + a^2 (t) d \bf x^2 \,. \nonumber
\end{equation*}
Consequently,
Eq.~(\ref{eom1}) becomes
\begin{equation}
\ddot \phi + \frac{3 H \dot \phi}{\gamma^2} + \frac{V_{,\phi}}{\gamma^3} + \frac{f_{,\phi}}{f}
\frac{(\gamma+2)(\gamma-1)}{2\gamma(\gamma+1)} \dot \phi^2 = \frac{g}{\gamma^3} T^{\rm (m)} \,,
\label{eom3}
\end{equation}
where we introduce
the ``Lorentz factor'' as
\begin{equation}
\gamma \equiv \frac{1}{\sqrt{1 - f(\phi) \dot \phi^2}} \,.
\label{lorentz}
\end{equation}
In the standard DBI theory ($f(\phi) > 0$), $\gamma$ takes the values from $1$ to $\infty$,
while in the D-BIonic
theory ($f(\phi) < 0$),
$\gamma$ is limited in the range of $(0, 1)$ instead.
From Eq.~(\ref{eom3}), in the limit of $\gamma = 1 $ $ (|f(\phi)| \dot \phi^2 \ll 1)$,
it obviously becomes the equation of motion for the coupled quintessence model.
We then find the both limits of the Lorentz factor as
\begin{eqnarray*}
&&
\gamma_{\text{DBI}} =
\begin{cases}
\infty & : \text{when} \, f(\phi) \dot \phi^2 \simeq 1 \, \text{(relativistic limit)} \\
1 & : \text{the coupled quintessence}
\end{cases}
\\
&&
\gamma_{\text{D-BIonic}} =
\begin{cases}
1 & : \text{the coupled quintessence} \\
0 & : \text{when} \, -f(\phi) \dot \phi^2 \gg 1 \,
\\
& ~~~\text{(``anti''-relativistic limit)} \\
\end{cases}
\end{eqnarray*}
Therefore, the DBI-like action (\ref{action}) is generalisation of
the coupled quintessence model.
From Eq.~(\ref{Tmunu}), the pressure and the energy density of the scalar field are
given by
\begin{align*}
\rho_{\phi} &= \frac{\gamma^2}{\gamma+1} \dot \phi^2 + V(\phi) \,, \\
P_{\phi} &= \frac{\gamma}{\gamma+1} \dot \phi^2 - V(\phi) \,.
\end{align*}
Subsequently, the Friedmann equation is given by
\begin{align}
H^2 = \frac{1}{3}\left( \frac{\gamma^2}{\gamma+1}\dot \phi^2 + V(\phi)+ \rho_{\rm m}\right) \,,
\label{Friedmann}
\end{align}
where $H=\dot a/a$.
$\rho_{\rm m}$ is the total matter density
(non-relativistic matter $+$ radiation), which we
combine just for simplicity in our description.
Since the scalar field couples to matter fluid, this leads to modification on the energy equation.
Namely, neither the scalar field energy nor matter fluid energy is conserved
(however the total energy is of course conserved).
For conformally coupled case, we obtain
\begin{equation*}
\nabla_{\mu} T^{\rm (m) \,\mu}_{~~~~\nu} = \frac{A_{,\phi}}{A} T^{\rm (m)} \nabla_{\nu} \phi \,.
\end{equation*}
According to the equation of state (EOS) for the matter fluid, $P_{\rm m} = w \rho_{\rm m}$,
the energy equation of matter density becomes
\begin{align}
\dot \rho_{\rm m} + 3H (1+ w) \rho_{\rm m} &= g (1 - 3w) \rho_{\rm m} \dot \phi \,,
\label{matter}
\end{align}
where $w$ is the EOS parameter of matter fluid ($w = 0$ for non-relativistic matter, and $w = 1/3$ for radiation).
The basic equations we will use many times in this work are the equation of motion (\ref{eom3}),
the Friedmann equation (\ref{Friedmann}), and the energy equation of matter density (\ref{matter}).
It is worth mentioning here that for radiation the energy
is conserved because
the electromagnetic field is conformally invariant. Thus, radiation still decreases with the rate $a^{-4}$ as the Universe expands.
Because the coupling constant, $g$, acts on only non-relativistic matter, at late time, we
ignore the radiation component in the Universe.
In the next section we will find analytic solutions of the D-BIonic and DBI
theories.
\section{ANALYTIC SOLUTIONS}
\label{analytic}
Here we shall discuss some particular forms of $f(\phi)$ and $V(\phi)$, i.e.,
\begin{equation*}
f(\phi)=\epsilon f_0 e^{-\mu\phi} ~~{\rm and}~~~
V(\phi)=V_0 e^{-\lambda\phi}\,.
\end{equation*}
We assume $ f_0>0, V_0>0$. We also assume $\lambda>0$ without loss of generality
(If $\lambda<0$, redefining the scalar field as $\phi \rightarrow -\phi$, we find
our action.).
The parameter $\epsilon=1$ gives the DBI
theory, while $\epsilon=-1$
gives the D-BIonic theory.
Since we are interested in the special form of the kinetic term in the D-BIonic or DBI theory,
we look for (asymptotic) solution with $\gamma = \gamma_0$ = constant.
This condition leads to $f(\phi)\dot\phi^2$ = constant.
From our ansatz of the function $f(\phi)$, we solve for (asymptotic) solution of
the scalar field as
\begin{equation*}
\phi=-{2\over \mu}\ln \left({t\over t_0}\right) +\phi_0
\,,
\end{equation*}
where $\phi_0$ is the value of $\phi$ at $t=t_0$. Taking derivatives with respect to time, we find
\begin{equation*}
\dot \phi = - \frac{2}{\mu t}.
\end{equation*}
Clearly, $\dot \phi > 0$ when $\mu < 0$. This corresponds to the scalar field motion
rolling down the runaway exponential potential. I
We assume that a scale factor increases as a power-law expansion:
\begin{equation*}
a=a_0\left({t\over t_0}\right)^p
\,.
\end{equation*}
This is natural because the kinetic term is proportional to $t^{-2}$.
If we do not assume a power-law expansion, the kinetic term does not play any important role in the spacetime dynamics,
and then it gives the same results as those with the conventional canonical kinetic term.
\begin{widetext}
Consequently, assuming that matter is given only by dust fluid ($w=0$),
the basic equations given in Sec. \ref{basic_equations} are reduced to be
\begin{align*}
&{2\over \mu t^2}-{6p\over \gamma_0^2\mu t^2}
-{\lambda V_0\over \gamma_0^3}e^{-\lambda \phi_0}
\left({t\over t_0}\right) ^{2\lambda\over \mu}
-{2(\gamma_0+2)(\gamma_0-1)\over \mu \gamma_0(\gamma_0+1) t^2} = -{g\over \gamma_0^3}\rho_{\rm m}
\,,
\\
&{p^2\over t^2}={1\over 3}\left[
{4\gamma_0^2\over (\gamma_0+1)\mu^2 t^2}+V_0 e^{-\lambda
\phi_0}\left({t\over t_0}\right) ^{2\lambda\over \mu}+\rho_{\rm m}
\right]\,, \\
& \dot \rho_{\rm m} +{3 p\over t}\rho_{\rm m}=-{2g\over t}\rho_{\rm m}
\,.
\end{align*}
The last equation is easily integrated as
\begin{equation*}
\rho_{\rm m}=\rho_0\left({t\over t_0}\right)^{-q}
\,,
\end{equation*}
where we define
\begin{equation*}
q\equiv 3p+{2g\over \mu}
\,.
\end{equation*}
We then rewrite the basic equations as
\begin{align}
V_0e^{-\lambda\phi_0}\left({t\over t_0}\right)^{2\lambda\over \mu}
&={-2\mu\gamma_0[3p(\gamma_0+1)-2\gamma_0]+g(3p^2(\gamma_0+1)\mu^2-4\gamma_0^2)\over \mu^2t^2(\gamma_0+1)(\lambda+g)} \,,
\label{Veom}
\\
\rho_0\left({t\over t_0}\right)^{-q}
&={2\mu\gamma_0[3p(\gamma_0+1)-2\gamma_0]+\lambda(3p^2(\gamma_0+1)\mu^2-4\gamma_0^2)\over \mu^2t^2(\gamma_0+1)(\lambda+g)} \,.
\label{rhoeom}
\end{align}
\end{widetext}
Obviously, if either the term with $V_0$ or another one with $\rho_0$ is dominant, we do not find
any asymptotic solution with our ansatz. In fact, if $\lambda / \mu > -1$ or $q<2$, at late time we obtain
$V_0= 0$ or $\rho_0=0$, which does not give any interesting solutions.
Hence we consider the cases with
\begin{equation*}
{\lambda\over \mu} \leq -1~~{\rm and}~~~q\geq 2
\,.
\end{equation*}
For the case with $\lambda / \mu < -1$ or $q>2$, the potential term or matter term
does not contribute asymptotically in the dynamics.
Then we shall classify the (asymptotic) solutions into the following four cases: \\
{\bf I}. Both the potential term and the matter density do contribute in the dynamics ($\lambda/\mu=-1$ and $q=2$),
\\
{\bf II}. The matter density does not contribute, but the potential term does ($\lambda/\mu=-1$ and $q>2$),
\\
{\bf III}. The potential term does not contribute, but the matter density does ($\lambda/\mu<-1$ and $q=2$),
\\
{\bf IV}. Both the potential term and the matter density do not contribute in the dynamics ($\lambda/\mu<-1$ and $q>2$).
In this text, we discuss only the case that the potential plays an important role, i.e., the cases I and II.
These correspond to the scaling solution and the conventional quintessence solution in
the coupled quintessence model, respectively.
In Appendix, we shall discuss the other two cases ({\bf III} and {\bf IV}).
\subsection{Case I : $\mu=-\lambda$ and $q=2$}
From the definition of $q$, we have
\begin{equation*}
p={2\over 3}\left(1+{g\over \lambda}\right)
\,,
\end{equation*}
and using the equation of $p$, the full equations of (\ref{Veom}) and (\ref{rhoeom}) solve as
\begin{align}
V_0 e^{-\lambda\phi_0} t_0^2 &={4\gamma_0
\over \lambda^2(\gamma_0+1)}-2p+3p^2 \,,
\label{VD1} \\
\rho_0t_0^2&=
-{4\gamma_0\over \lambda^2}+2p\,.
\label{rhoD1}
\end{align}
Since $\gamma_0$ contains $e^{-\lambda\phi_0} t_0^2$
as
\begin{equation*}
\gamma_0={1\over \sqrt{1-{4\epsilon f_0\over
\lambda^2e^{-\lambda\phi_0} t_0^2}}}
\,,
\end{equation*}
substituting $e^{-\lambda\phi_0} t_0^2$ into the Eq.~(\ref{VD1}),
we obtain the equation for $\gamma_0$:
\begin{align}
&[3(1-\epsilon f_0V_0)
+g(g+\lambda)]\gamma_0^2
\nonumber \\
&~~~~~~~~~~
-3\gamma_0
-g(g+\lambda)=0
\,,
\label{gamma_I}
\end{align}
whose solution is given by
\begin{align*}
\gamma_0 =\gamma_0^{(\pm)} \equiv {3\pm \sqrt{D} \over 2[3(1-\epsilon f_0V_0)
+g(g+\lambda)]}
\,,
\end{align*}
where the discriminant $D$ is defined by
\begin{equation*}
D= 4g(g+\lambda)[3(1 - \epsilon f_0V_0) + g(g+\lambda)] + 9
\,.
\end{equation*}
$D$ must be non-negative in order to find a real solution for $\gamma_0$.
For the D-BIonic ($\epsilon=-1$), it is always positive definite, then the root for $\gamma_0$
exists.
For the DBI ($\epsilon=+1$), we find the condition as
\begin{equation}
f_0V_0 \leq 1 + \frac{1}{3} g (g+\lambda) + \frac{3}{4g(g+\lambda)}
\,.~~~~
\label{con_DBI1}
\end{equation}
Since it turns out that $\gamma_0^{(-)}$ branch solution does not give the
accelerating Universe
for both DBI and D-BIonic,
we then
consider only $\gamma_0^{(+)}$ solution.
For the DBI case, we obtain the additional condition otherwise even $\gamma_0^{(+)}$ gives
the value smaller than 1
\begin{equation}
f_0V_0 < 1 + \frac{1}{3} g (g+\lambda) \,,
\label{con_DBI2}
\end{equation}
This condition is tighter than {the previous one (\ref{con_DBI1}),
then the condition (\ref{con_DBI2}) always gives the non-negative discriminant.
From Eq. (\ref{rhoD1}) and the equation of $p$, we find
\begin{align*}
\rho_0t_0^2&=
{4\left[
\lambda\left(g+\lambda \right)-3\gamma_0 \right]\over 3
\lambda^2}
\,,
\end{align*}
this leads to the additional condition
\begin{equation}
\lambda\left(g+\lambda \right)-3 \gamma_0 \geq 0
\label{con_density}
\,.
\end{equation}
The above condition gives the constraint on the coupling constant for the existence of the solution
as
\begin{equation}
g\geq g_{\rm cr}
\,,
\label{con_A1}
\end{equation}
with
\begin{equation*}
g_{\rm cr}\equiv {\lambda\over 2}\left[-1+
\sqrt{\left(1- {6\over \lambda^2} \right)^2+{12\epsilon f_0V_0\over \lambda^2}}\right]
\,.
\end{equation*}
In order to find an accelerating Universe,
since
\begin{equation*}
p=\frac{2}{3}\left(1 + \frac{g}{\lambda}\right) >1\,, \nonumber
\end{equation*}
we obtain
\begin{equation}
g>{\lambda\over 2} \,.
\label{con_A2}
\end{equation}
Therefore, Eqs.~(\ref{con_A1}) and (\ref{con_A2}) are the conditions of $g$ for realising the scaling solution {\bf I}
giving an accelerating Universe.
Eq.~(\ref{con_A1}) gives the tighter condition for $\lambda < \lambda_{\rm cr}$,
where $\lambda_{\rm cr}$ is given by
\begin{equation*}
\lambda_{\rm cr}^2\equiv
2\left[-(1-\epsilon f_0V_0)+\sqrt{(1-\epsilon f_0V_0)^2+3}\right]
\,,
\end{equation*}
while Eq.~(\ref{con_A2}) gives the tigher condition for $\lambda > \lambda_{\rm cr}$,
The EOS parameter of the scalar field is given by
\begin{equation*}
w_\phi=-1+{3\gamma_0\over 3\gamma_0+g(g+\lambda)}
\,.
\end{equation*}
We also introduce the effective EOS parameter $w_{\rm eff}$ by
\begin{eqnarray*}
w_{\rm eff}\equiv -1-{2\dot H\over 3H^2}=-1+{2\over 3p}.
\end{eqnarray*}
The present solution gives
\begin{eqnarray*}
w_{\rm eff}=-{g\over g+\lambda}.
\end{eqnarray*}
The matter density and the scalar field density are scaled in this solution.
Then we can evaluate the asymptotic values of $\Omega_m$ and $\Omega_\phi$ as follows:
\begin{align*}
\Omega_{\rm m}&=
{\lambda(g+\lambda)-3\gamma_0\over (g+\lambda)^2} \,,
\\
\Omega_\phi&={g(g+\lambda)+3\gamma_0\over (g+\lambda)^2}
\,.
\end{align*}
We find the scaling solution {\bf I} for accelerating Universe by contributions from both potential and matter density,
which is given by $\gamma_0^{(+)}$ and $p$, with the constraints on $g$. Note that there is another constraint (\ref{con_DBI2})
on $f_0 V_0$ for the DBI theory.
\subsection{Case II : $\mu=-\lambda$ and $q>2$}
In this case, the matter density does not contribute the dynamics asymptotically,
the basic equations for the asymptotic solution (\ref{Veom}) and (\ref{rhoeom})
give Eq.~(\ref{VD1}) and
\begin{eqnarray*}
-2\lambda\gamma_0[3p(\gamma_0+1)-2\gamma_0]
+\lambda(3p^2(\gamma_0+1)\lambda^2-4\gamma_0^2)=0
\,,
\end{eqnarray*}
which gives
\begin{equation}
p={2\gamma_0\over \lambda^2}
\,, \label{pC2}
\end{equation}
unless $p=0$.
Then we obtain from Eq.~(\ref{VD1})
\begin{equation}
V_0 e^{-\lambda \phi_0}t_0^2={4\gamma_0^2\over \lambda^4}\left[3-{\lambda^2\over \gamma_0+1}\right]
\,. \label{VC2}
\end{equation}
Since we assume $V_0>0$, we have a constraint
\begin{equation*}
\lambda^2<3(\gamma_0+1)
\,.
\end{equation*}
Using the definition of $\gamma_0$, we eliminate $e^{-\lambda \phi_0}t_0^2$ in Eq.~(\ref{VC2}),
and we find the equation for $\gamma_0$ as
\begin{equation}
3\gamma_0^2-\lambda^2\gamma_0+\lambda^2(1-\epsilon f_0V_0)-3=0
\,. \label{gammaC2}
\end{equation}
Then $\gamma_0$ is given by
\begin{align*}
&\gamma_0=\gamma_0^{(\pm)}
\nonumber \\
&~~\equiv
{\lambda^2\over 6}\left[1\pm \sqrt{1-2(1-\epsilon f_0V_0) \left({6\over \lambda^2}\right)
+\left({6\over \lambda^2}\right)^2}\right]
\,.
\end{align*}
The existence of the real roots for this equation, we find the condition such that
\begin{equation*}
\left({\lambda^2\over 6}\right)^2-2(1-\epsilon f_0V_0) \left({\lambda^2\over 6}\right)
+1\geq 0
\,.
\end{equation*}
For the DBI
($\epsilon = +1$), this condition is always satisfied, and then
we can find the solution.
On the other hand, for the D-BIonic
($\epsilon = -1$),
we have the condition on $\lambda$ for the existence of the root.
We find
\begin{equation*}
\lambda^2\geq \lambda_+^2~~{\rm or}~~~\lambda^2\leq \lambda_-^2\,,
\end{equation*}
where
\begin{equation*}
\lambda_\pm^2
\equiv 6\left[1-\epsilon f_0V_0\pm \sqrt{(1-\epsilon f_0V_0)^2-1}
\right]\,.
\end{equation*}
Our ansatz $q>2$ gives another constraint such that
\begin{eqnarray*}
3\gamma_0>\lambda(g+\lambda)
\,,
\end{eqnarray*}
which is reduced to
\begin{eqnarray*}
g&<&g_{\rm cr}
\,.
\end{eqnarray*}
For the power of expansion, $p$, substituting $\gamma_0$ into Eq. (\ref{pC2}), we find
\begin{align*}
&p=p^{(\pm)}
\nonumber \\
&~~
\equiv
{1 \over 3}
\left[1\pm \sqrt{1-2(1-\epsilon f_0V_0) \left({6\over \lambda^2}\right)
+\left({6\over \lambda^2}\right)^2}\right]
\,.
\end{align*}
However, in order to obtain the accelerating Universe ($p>1$), only positive-branch ($\gamma_0^{(+)}$ and $p^{(+)}$)
is possible. We then find the condition for $p>1$ is given by
\begin{equation*}
3\left({\lambda^2\over 6}\right)^2+
2(1-\epsilon f_0V_0) \left({\lambda^2\over 6}\right)
-1<0\,,
\end{equation*}
i.e.,
\begin{equation*}
0<\lambda^2<\lambda_{\rm cr}^2\,,
\end{equation*}
where the critical value
$\lambda_{\rm cr}$ is the same as the one defined in the previous subsection.
This condition always satisfies the constraint of $\lambda^2\leq \lambda^2_{-}$ for the D-BIonic. Therefore,
we have the accelerating Universe solution {\bf II} with $\gamma_0^{(+)}$ and $p^{(+)}$,
where there is the upper bound $\lambda_{\rm cr}^2$.
Note that when $\epsilon f_0 V_0 =0$, we recover the conventional acceleration condition in the quintessence model such that
$\lambda_{\rm cr}=\sqrt{2}$. For the DBI theory, the constraint becomes weaker
($\lambda_{\rm cr}>\sqrt{2}$), while
for the D-BIonic theory, it becomes stronger ($\lambda_{\rm cr}<\sqrt{2}$).
The EOS parameter of the scalar field in this case is given by
\begin{equation*}
w_\phi=-1+{\lambda^2\over 3\gamma_0}\,.
\end{equation*}
When $\gamma_0 = 1$, the $w_\phi$ is the same as that in the quintessence model
\cite{Copeland:1997et}.
\begin{widetext}
\subsection{The solution {\bf I} or the solution {\bf II}}
\label{expectation}
Here first we summarise the above results in Table \ref{Table I} and Fig \ref{F1}.
\begin{table}[H]
\begin{center}
\begin{tabular}{|c||c|c|c|c|}
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.5em]
&\multicolumn{2}{|c|}{The solution {\bf I}} & \multicolumn{2}{|c|}{The solution {\bf II}}\\[.2em]
\hline
&& &&\\[-.5em]
Theory& DBI ($\epsilon=1$) & ~~~~D-BIonic ($\epsilon=-1$) ~~~~ & DBI ($\epsilon=1$) & D-BIonic ($\epsilon=-1$)
\\[.2em]
\hline \hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.5em]
$\gamma_0$
&\multicolumn{2}{|c|}{~~~$\displaystyle{3+ \sqrt{4g(g+\lambda)[3(1 - \epsilon f_0V_0) + g(g+\lambda)] + 9} \over 2[3(1-\epsilon f_0V_0)
+g(g+\lambda)]}$~~~}
&\multicolumn{2}{|c|}{~~~$\displaystyle{{\lambda^2\over 6}+\sqrt{\left({\lambda^2\over 6}\right)^2-2(1-\epsilon f_0V_0) \left({\lambda^2\over 6}\right)+1}}$~~~}
\\[1em]
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.5em]
$p$
&\multicolumn{2}{|c|}{$\displaystyle{2\over 3}\left(1+{g\over \lambda}\right)$}
&\multicolumn{2}{|c|}{$\displaystyle{2\gamma_0\over \lambda^2}$}
\\[1em]
\hline
\cline{4-5}
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
& \multicolumn{2}{|c|}{$g>g_{\rm cr}$} & \multicolumn{2}{|c|}{$g<g_{\rm cr}$}
\\[.2em]
\cline{2-5}
existence && &&\\[-.6em]
&~~~$f_0V_0<1+{1\over 3}g(g+\lambda)$~~~& --- &~~~~~~~~~~~~~---~~~~~~~~~~~~~
&~$\lambda^2>\lambda_+^2$ or $\lambda^2<\lambda_-^2$~
\\[.2em]
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
acceleration&\multicolumn{2}{|c|}{$g>{\lambda\over 2}$} & \multicolumn{2}{|c|}{$\lambda^2<\lambda_{\rm cr}^2$}
\\[.4em]
\cline{4-5}
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
stability&\multicolumn{2}{|c|}{$g>g_{\rm cr}$} &\multicolumn{2}{|c|}{$g<g_{\rm cr}$}
\\[.4em]
\hline
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
$w_\phi$&\multicolumn{2}{|c|}{$\displaystyle{-1+{3\gamma_0\over 3\gamma_0+g(g+\lambda)}}$}&\multicolumn{2}{|c|}{$
\displaystyle{-1+{\lambda^2\over 3\gamma_0}}$}
\\[1em]
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
$w_{\rm eff}$&\multicolumn{2}{|c|}{$\displaystyle{-{g\over g+\lambda}}$}&\multicolumn{2}{|c|}{$\displaystyle{-1+{\lambda^2\over 3\gamma_0}}$}
\\[1em]
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
$\Omega_{\rm m}$&\multicolumn{2}{|c|}{$\displaystyle{\lambda(g+\lambda)-3\gamma_0\over (g+\lambda)^2} $}&\multicolumn{2}{|c|}{$0$}
\\[1em]
\hline
&\multicolumn{2}{|c|}{} & \multicolumn{2}{|c|}{}\\[-.6em]
$\Omega_{\phi}$&\multicolumn{2}{|c|}{$\displaystyle{g(g+\lambda)+3\gamma_0\over (g+\lambda)^2}$}&\multicolumn{2}{|c|}{$1$}
\\[1em]
\hline
\end{tabular}
\caption{Two analytic solutions with $\mu=-\lambda$ in the D-BIonic and DBI theories with $f=\epsilon f_0 e^{-\mu\phi}$,
$V=V_0 e^{-\lambda\phi}$, and $A=e^{g\phi}$.
$g_{\rm cr}$, $\lambda_{\rm cr}$ and $\lambda_\pm$ are defined in the text.
The case I gives a scaling solution, in which the ratio of matter
energy density to the scalar field energy density is constant, while the scalar field energy becomes dominant in the case II solution.
For the accelerating universe, the existence condition coincides with the stability condition, which will be analysed in Sec \ref{stability}.}
\label{Table I}
\end{center}
\end{table}
\begin{figure}[h]
\includegraphics[width=5cm]{F13.eps} ~~
\includegraphics[width=5cm]{F23.eps} ~~
\includegraphics[width=5cm]{F33.eps}
\\
(a)\hskip 5cm (b) \hskip 5cm (c) \\
\caption{The existence regions of two accelerating
solutions in the parameter space ($\lambda, g$) for (a) the D-BIonic ($\epsilon=-1$) , (b) the canonical kinetic term ($\epsilon=0$) and (c) the DBI ($\epsilon=1$) .
We set $f_0V_0=1$.
The light orange and light blue regions correspond to the solutions of the case I and II, respectively. The red dashed curve denotes $g=g_{\rm cr}$,
while the blue dot-dashed line shows $\lambda=\lambda_{\rm cr}$.
The green curve gives $\Omega_{\rm m}=0.3$, while the black dashed lines denote $w_{\phi}=-0.97, -0.95$ and $-0.9$, respectively from the above. The red solid lines denote $p=1$.}
\label{F1}
\end{figure}
\end{widetext}
If there is no matter coupling with the scalar field ($g=0$), the solution {\bf II} will
be realised. When there exists the coupling ($g\neq 0$), there are two solutions in the range of $\lambda < \lambda_{\rm cr}$.
The question is which asymptotic solution
is found, {\bf I} or {\bf II} ?
We expect the case with the larger power exponent of the cosmic expansion $p$
will be realised \cite{FujiiMaeda}.
For the solution {\bf II}, the power exponent $p_{\rm II}$ is given by
\begin{equation*}
p_{\rm II}={1\over 3}\left[
1+\sqrt{1-2(1-\epsilon f_0V_0)\left({6\over \lambda^2}\right)+\left({6\over \lambda^2}\right)^2}\right]
\,,
\end{equation*}
which depends on $\epsilon f_0V_0$ and $\lambda$,
whereas for the solution {\bf I} is
\begin{equation*}
p_{\rm I}= \frac{2}{3} \left( 1 + \frac{g}{\lambda}\right)
\,,
\end{equation*}
which is fixed by $\lambda$ and $g$.
So our conjecture is that
if $p_{\rm II} > p_{\rm I}$, then matter contribution is ignored,
which is a usual quintessence model with the DBI or D-BIonic kinetic term,
while when $p_{\rm II} < p_{\rm I}$, the existence of matter assists the
acceleration of the cosmic expansion. Even if $\lambda$ is too large to obtain a usual
quintessence scenario, we find the acceleration for
\begin{equation*}
g>{\lambda\over 2}
\,.
\end{equation*}
The critical value of the coupling constant $g$ is obtained by setting
$p_{\rm I}=p_{\rm II}$, giving $g=g_{\rm cr}$
with
\begin{equation*}
g_{\rm cr}\equiv -{\lambda\over 2}\left[1-
\sqrt{1-2(1-\epsilon f_0V_0)\left({6\over \lambda^2}\right)+\left({6\over \lambda^2}\right)^2}\right]
\,.
\end{equation*}
The critical value $g_{\rm cr}$ is the same as that for the existence obtained in the previous subsection.
When $g>g_{\rm cr}$, the power exponent of the solution {\bf I}
is larger than that of the solution {\bf II}.
As we will see in the next section, the stability condition is also the same.
As a result,
when $g>{\lambda\over 2}$
and $g>g_{\rm cr}$, we find the accelerated expansion of the Universe
assisted by matter fluid.
\section{STABILITY ANALYSIS}
\label{stability}
In order to confirm the expectations in Sec.~\ref{expectation},
we need to analyse the stability of those solutions {\bf I} and {\bf II}.
In this section we will use the dynamical system approach.
\subsection{Dynamical System and Fixed points}
Starting from the Friedmann equation (\ref{Friedmann}), we obtain the first constraint equation on this system:
\begin{equation}
\Omega_{\rm m} = 1 - x^2 - y^2 \,,
\label{cons1}
\end{equation}
where we introduce the following dimensionless variables;
\begin{equation*}
x \equiv \frac{\gamma}{\sqrt{3(\gamma + 1)}} \frac{\dot \phi}{H}, \quad y \equiv \frac{\sqrt{V}}{\sqrt{3} H}.
\end{equation*}
Instead of time derivatives, we use the derivatives with respect to the e-folding number, $N = \ln a$.
We then obtain the following autonomous equations:
\begin{align}
\frac{dx}{dN} =& -\frac{3}{2}x \left[{1\over \gamma}(1-x^2)+y^2\right]
\nonumber \\
&~~~+ \frac{\sqrt{3(\gamma+1)}}{2\gamma}
\left[\lambda y^2 -
g (1 - x^2 - y^2) \right]\,,
\label{eq_x}
\\[1em]
\frac{dy}{dN} =& \frac{3}{2}y\left[{1\over \gamma} x^2 + (1 - y^2)\right]
- \lambda \frac{\sqrt{3(\gamma+1)}}{2\gamma} x y
\,.
\label{eq_y}
\end{align}
Since the variable $\gamma$ is included in the above equations, in order to close
the system, we need the equation for $\gamma$, which is given by
\begin{align}
\frac{d \gamma}{d N} &= \frac{(\gamma -1)\sqrt{3 (\gamma + 1)} }{\gamma x} \times \nonumber \\
&\left[ - \sqrt{3(\gamma + 1)}x - \mu x^2 + \lambda y^2 -g (1 - x^2 -y^2) \right]
\,.
\label{gammaN}
\end{align}
However, note that $\gamma$ is described as
\begin{eqnarray*}
\gamma=1+\epsilon f_0 V_0 e^{-(\lambda+\mu)\phi}{x^2\over y^2}
\,.
\end{eqnarray*}
Hence when $\mu=-\lambda$, $\gamma$ is not the independent variable.
Eqs. (\ref{eq_x}) and (\ref{eq_y}) give a closed set of the dynamical system.
By virtue of these dynamical variables, the cosmological parameters are given by
\begin{align*}
\Omega_{\phi} &= x^2 + y^2\,, \\
w_{\phi} &= \frac{x^2 - \gamma y^2}{\gamma(x^2 + y^2)}\,, \\
w_{\rm eff} &= \frac{1}{\gamma} \left( x^2 -\gamma y^2\right)\,.
\end{align*}
We are interested in the fixed points $(x,y)=(x_0,y_0)$ with $\gamma = \gamma_0 = $ constant, which yields
$d \gamma / d N = 0$. Since $\gamma>0$, we find the following two
possibilities: \\
{\bf (i)} $\gamma_0 =1$, this is the same as the coupled quintessence with the conventional
canonical kinetic term, which is not our interest. \\
{\bf (ii)} The intermediate value of $\gamma_0$, i.e. $0<\gamma_0<1$ for the D-BIonic theory, while $1<\gamma_0<\infty$ for
the DBI theory,
which is obtained from the condition such that the square bracket in Eq.~(\ref{gammaN}) is equal to zero.
We find
\begin{equation}
\gamma_0=-1+{1\over 3 x_0^2} \left[ \lambda y_0^2 - \mu x_0^2 -g (1 - x_0^2 -y_0^2)\right]^2 \,.
\label{gammaiii}
\end{equation}
Since $\gamma_0=1$ does not give new solution, we will discuss only the case {\bf (ii)}.
By setting $dx/dN = 0$ and $dy/dN = 0$ with $\gamma=\gamma_0$,
we find fixed points ($x_0,y_0$) as shown in Table \ref{tab2}.
\begin{table}[h]
\centering
\begin{tabular}{ c | c c | c}
\hline
\hline
& $x_0$ & $y_0$ & solution\\[.5em]
\hline
& & & \\[-.5em]
(1) & $-1$ & 0 &{\small\bf IV}$_-$\\[1em]
(2) & $ 1$ & 0 &{\small\bf IV}$_+$\\[.5em]
(3) & $-\frac{g \sqrt{3(1+\gamma_0)}}{3 }$ & 0 &{\small\bf III}\\[1em]
(4) & $\frac{\lambda}{\sqrt{3(1+\gamma_0)}}$ & $\sqrt{1 - \frac{\lambda^2}{3(1+\gamma_0)}}$ &{\small\bf II} \\[1em]
(5) & $\frac{\sqrt{3} \gamma_0}{\sqrt{1+\gamma_0} (g + \lambda)}$ &
$\sqrt{\frac{3 \gamma_0 + (1+\gamma_0)g (g+\lambda)}{(1+\gamma_0)(g + \lambda)^2}}$&{\small\bf I} \\[.5em]
\hline
\hline
\end{tabular}
\caption{Five fixed points. }
\label{tab2}
\end{table}
There are five fixed points, which satisfy the necessary condition of $y_0 \geq 0$, whereas $x$ can be positive or negative depending
on the sign of $\dot \phi$.
We expect that these fixed points correspond to the (asymptotic) analytic solutions given in the previous section
and Appendix \ref{other_solutions}.
We shall describe each point in the following:
\subsubsection{\rm Fixed points (1) and (2)}
The
simplest fixed points
are given by
\begin{align*}
(1) \qquad (x_0,y_0) &= (-1,0) \,, \nonumber \\
(2) \qquad (x_0,y_0) &= (1,0) \,. \nonumber \\
\end{align*}
From Eq.~(\ref{gammaiii}), we obtain
\begin{equation*}
\gamma_0 = \frac{\mu^2 -3}{3} \,,
\end{equation*}
and the cosmological parameters as
\begin{align*}
\Omega_{\phi} &= 1 \,, \\
\Omega_{\rm m} &= 0 \,, \\
w_{\phi} &= \frac{1}{\gamma_0} \,, \\
w_{\rm eff} &= \frac{1}{\gamma_0} \,.
\end{align*}
Thus, the fixed points (1) and (2) correspond to the asymptotic solution {\bf IV}$_\pm$ given in Appendix \ref{other_solutions}.
Since this solution does not give an accelerating Universe, we will not analyse the stability,
although we expect it is unstable unless $V_0=0$ \cite{Copeland}.
\subsubsection{\rm Fixed point (3)}
Next simple fixed point is found as
\begin{align*}
(3) \qquad (x_0,y_0) &= \left(-\frac{ g \sqrt{3(1+\gamma_0)}}{3},0\right) \,.\nonumber \\
\end{align*}
From Eq.~(\ref{gammaiii}), we obtain
\begin{equation*}
\gamma_0 = \frac{g(\mu - g)}{3 - g(\mu - g)} \,,
\end{equation*}
and the cosmological parameters as
\begin{align*}
\Omega_{\phi} &= \frac{g^2 (1 + \gamma_0)}{3} \,, \\
\Omega_{\rm m} &= \frac{3 - g^2 (1 + \gamma_0)}{3} \,, \\
w_{\phi} &= \frac{1}{\gamma_0} \,, \\
w_{\rm eff} &= \frac{g^2 (1 + \gamma_0)}{3 \gamma_0} \,.
\end{align*}
Then, the fixed points (3) corresponds to the asymptotic solution {\bf III} discussed in Appendix \ref{other_solutions}.
Since this solution does not give an accelerating Universe either, we will not analyse the stability.
(In this case, we also expect the fixed point is unstable \cite{Copeland}).
\subsubsection{\rm Fixed point (4)}
One interesting fixed point is given by
\begin{align*}
(4) \qquad (x_0,y_0) = \left(\frac{\lambda}{\sqrt{3(1+\gamma_0)}},\sqrt{1 - \frac{\lambda^2}{3(1+\gamma_0)}}\right) \,.
\end{align*}
From Eq. (\ref{gammaiii}), we find
\begin{equation*}
\mu = - \lambda \,.
\end{equation*}
By using the definition of $\gamma$ and the above relation, we have
\begin{equation}
\gamma = \frac{\epsilon f_0 V_0 x^2 + y^2}{y^2} \,.
\label{cons2}
\end{equation}
Substituting $x_0$ and $y_0$ of the fixed point (4) in Eq. (\ref{cons2}), we obtain the equation for $\gamma_0$ as
\begin{equation*}
3\gamma_0^2-\lambda^2\gamma_0+(1-\epsilon f_0V_0)\lambda^2-3=0
\,.
\end{equation*}
This is Eq.~(\ref{gammaC2}) of the solution {\bf II} as we expect. $\gamma_0$ is given by
\begin{equation*}
\gamma_0 =
{\lambda^2\over 6}\left[1 + \sqrt{1-2(1-\epsilon f_0V_0)\left({ 6\over \lambda^2} \right)
+\left({ 6\over \lambda^2}\right)^2}\right]
\,.
\end{equation*}
Only the larger root of the solutions is chosen because it
gives the accelerated expansion ($p > 1$).
The cosmological parameters also confirm that there is no contribution from matter density at the fixed point (4).
\begin{align*}
\Omega_{\phi} &= 1 \,, \\
\Omega_{\rm m} &= 0 \,, \\
w_{\phi} &= -1 + \frac{\lambda^2}{3\gamma_0} \,, \\
w_{\rm eff} &= -1 + \frac{\lambda^2}{3\gamma_0} \,.
\end{align*}
The fixed point (4) is the same as the point (C4) in Ref. \cite{Copeland}.
\subsubsection{\rm Fixed point (5)}
The last fixed point gives another interesting solution:
\begin{align*}
&(5) \qquad \nonumber \\
&(x_0,y_0) =
\left(\frac{\sqrt{3} \gamma_0}{\sqrt{1+\gamma_0} (g + \lambda)},
\sqrt{\frac{3\gamma_0 + (1+\gamma_0)g(g+\lambda)}{(1+\gamma_0)(g + \lambda)^2}}\right) \,.
\end{align*}
In the similar way as the fixed point (4),
we also have $\mu = -\lambda$ in this solution from Eq.~(\ref{gammaiii}),
and then
we obtain from Eq.~(\ref{cons2}) the quadratic equation for $\gamma_0$ as
\begin{equation*}
[3(1-\epsilon f_0V_0)
+g(g+\lambda)]\gamma_0^2
-3\gamma_0
-g(g+\lambda)=0
\,.
\end{equation*}
This is the sams as Eq.~(\ref{gamma_I}) which is found in the solution {\bf I}. The solution is.
\begin{equation*}
\gamma_0 = \frac{3 + \sqrt{4g(g+\lambda)[3(1 - \epsilon f_0 V_0 ) + g(g +\lambda)]+ 9}}{2 (3(1 - \epsilon f_0 V_0 )
+ g(g +\lambda))} \,.
\end{equation*}
We choose only larger root because of the same reason as the previous subsection.
The cosmological parameters are
\begin{align}
\Omega_{\phi} &= \frac{g (g +\lambda) + 3\gamma_0}{(g+\lambda)^2} \,, \\
\Omega_{\rm m} &= \frac{\lambda (g+\lambda) - 3\gamma_0}{(g+\lambda)^2} \,, \\
w_{\phi} &= - \frac{g (g+\lambda)}{3\gamma_0 + g(g+\lambda)} \,,
\label{wphiD} \\
w_{\rm eff} &= - \frac{g}{(g+\lambda)} \,.
\label{weffD}
\end{align}
This is the scaling solution as we have seen in the solution {\bf I}.
The fixed point (5) is the extension of the fixed point (C5) in Ref. \cite{Copeland}.
In fact, it is the same as
the fixed point (C5) when there is no coupling ($g = 0$).
Next, we will analyse the stability of the fixed points (4) and (5) (the solutions {\bf I} and {\bf II}).
\subsection{Linear stability}
Substituting Eq.~(\ref{cons2}) into Eqs.~(\ref{eq_x}) and (\ref{eq_y}) , we obtain the autonomous system only for $x$ and $y$:
\begin{align*}
\frac{dx}{dN} = F(x,y) \,, \quad \frac{dy}{dN} = G(x,y) \,.
\end{align*}
Considering linear perturbations
\begin{equation*}
\begin{pmatrix} x \\ y \end{pmatrix} =
\begin{pmatrix} x_0+\delta x \\ y_0+\delta y \end{pmatrix} \,,
\end{equation*}
we find
\begin{equation*}
\frac{d}{dN} \begin{pmatrix} \delta x \\ \delta y \end{pmatrix} =
\mathcal{M}_0 \begin{pmatrix} \delta x \\ \delta y \end{pmatrix} \,,
\end{equation*}
where
\begin{equation*}
\mathcal{M}_0 = \begin{pmatrix} \frac{\partial F}{\partial x}\Big{|}_0 && \frac{\partial F}{\partial y}\Big{|}_0 \\[.5em]
\frac{\partial G}{\partial x}\Big{|}_0 && \frac{\partial G}{\partial y}\Big{|}_0 \end{pmatrix} \,.
\end{equation*}
Each component of the matrix $\mathcal{M}$ is given by
\begin{widetext}
\begin{align*}
\frac{\partial F}{\partial x}\Big{|}_0=& \frac{1}{2x_0 \gamma_0^2 \sqrt{1+ \gamma_0}}
\Big{\{}3x_0 \sqrt{1+\gamma_0} \left[\gamma_0-2+( \gamma_0+2)x_0^2 - \gamma_0^2 y_0^2 \right] \nonumber \\
& +\sqrt{3}\left[(\gamma_0-1)(\gamma_0+2)(g-(g+\lambda) y_0^2)+(\gamma_0^2+\gamma_0+2)x_0^2\right]
\Big{\}} \,, \\
\frac{\partial F}{\partial y}\Big{|}_0 =& \frac{1}{2y_0 \gamma_0^2 \sqrt{1+ \gamma_0}}
\Big{\{}6x_0 \sqrt{1+\gamma_0} \left[(\gamma_0-1)(x_0^2-1)-\gamma_0^2 y_0^2\right] \nonumber \\
& + \sqrt{3}\left[g(\gamma_0-1)(\gamma_0+2)(x_0^2-1)+(g+\lambda)(3\gamma_0^2+3\gamma_0-2)y_0^2\right]
\Big{\}} \,, \\
\frac{\partial G}{\partial x}\Big{|}_0 =& \frac{y_0}{\gamma_0^2\sqrt{1+\gamma_0}}
\left(3x_0\sqrt{1+\gamma_0} - \sqrt{3}\lambda\right) \,, \\
\frac{\partial G}{\partial y}\Big{|}_0 =& - \frac{3}{2\gamma_0^2 } \left[(2-3\gamma_0)x_0^2+3\gamma_0^2 y_0^2-\gamma_0^2\right]
- {\sqrt{3}\lambda(\gamma_0^2+\gamma_0-1)\over \gamma_0^2\sqrt{1+\gamma_0}} x_0
\,.
\end{align*}
\end{widetext}
Setting
\begin{align*}
\delta x, \delta y \propto e^{\omega N}\,,
\end{align*}
we find the quadratic equation for the eigenvalues $\omega$ of the matrix $\mathcal{M}_0$.
If both eigenvalues are negative (or real parts are negative for complex eigenvalues),
the fixed point is stable against linear perturbations.
\subsubsection{\rm Fixed point (4)}
For the fixed point (4), we find two real eigenvalues of the matrix $\mathcal{M}_0$ as
\begin{equation*}
\omega_1 = -3 + \frac{\lambda^2}{2\gamma_0} \,, ~~\omega_2 =-3 + \frac{\lambda (g + \lambda)}{\gamma_0} \,.
\end{equation*}
In order for the fixed point to be stable, both eigenvalues must be negative, which condition requires
\begin{equation*}
\lambda^2 < 6 \gamma_0 \quad {\rm and} \quad \lambda (g + \lambda) < 3\gamma_0 \,.
\end{equation*}
The first condition is always true when we choose $\gamma_0^{(+)}$ for the accelerating universe solution.
While, the second condition gives
\begin{equation*}
g < g_{\rm cr} \,.
\end{equation*}
This confirms our anticipation in the previous section that we must impose the condition
$g < g_{\rm cr}$ in order to obtain the stable solution
of {\bf II}.
The solution {\bf II} in the light blue region in Fig. \ref{F1} is stable.
\subsubsection{\rm Fixed point (5)}
In the similar way as the fixed point (4), the eigenvalues of the fixed point (5) are obtained as
\begin{equation*}
\omega_\pm = \frac{-3 \gamma_0^{3/2} (2g+ \lambda) \pm \sqrt{\mathcal{D}}}{4\gamma_0^{3/2}(g+\lambda)} \,.
\end{equation*}
with
\begin{eqnarray*}
&&
\mathcal{D}\equiv 9\gamma_0^3 (2g+\lambda)^2
\nonumber \\
&&~~~
+24[3\gamma_0+2g(g+\lambda)][3\gamma_0-\lambda(g+\lambda)]]
~~~~~
\end{eqnarray*}
From the condition (\ref{con_density}) of the solution {\bf I} ,
\begin{eqnarray*}
\mathcal{D}\leq 9\gamma_0^3 (2g+\lambda)^2 \,.
\end{eqnarray*}
Hence if $\mathcal{D}\geq 0$, $\sqrt{\mathcal{D}}$ is always smaller than
$3 \gamma_0^{3/2} (2g+ \lambda)$. Thus, we find $\omega_\pm \leq 0$. Consequently the solution is stable.
When $\mathcal{D}<0$, the square root term $\sqrt{\mathcal{D}}$ is pure imaginary, then
${\rm Re}(\omega_\pm)<0$. It guarantees that the solution is again stable (with spiral trajectories).
We also find the marginal stable condition $\omega_+=0$ when $3\gamma_0-\lambda(g+\lambda)=0$, which corresponds to
$\rho_0=0$, i.e., $g=g_{\rm cr}$. This gives the boundary of the stable region in the parameter space.
As a result, the solution {\bf I} in the light orange region in Fig. \ref{F1} is always stable.
\subsection{Non-linear stability}
In order to see whether the stable solutions are natural or not, we have to study not only the linear stability
but also the global (or non-linear) stability.
Here we solve the basic equations numerically and show those fixed points
are globally stable.
\begin{figure}[htb]
\includegraphics[width=8.5cm]{numerC.eps} \\
(a) $\lambda=0.5$\\[1em]
\includegraphics[width=8.5cm]{numerD.eps}\\
(b) $\lambda=1.6$
\caption{The trajectries of numerical solutions of the autonomous equations and fixed points for the D-BIonic gravity theory.
The black thick curve denotes the limiting condition of $\Omega_{\rm m} = 0$
and the black dashed lines correspond to the boundaries of $\gamma = 0$.
We choose $g = 1$, and $\epsilon f_0 V_0 = -1$.
(a) The top figure shows that the trajectories of the solutions converge to the fixed point $(x_0,y_0)=(0.206,0.978)$, which is
the fixed point (4) with $\Omega_{\rm m}=0$ (the solution {\bf II}).
(b) For the bottom figure, the trajectories converge to another stable fixed point $(x_0,y_0)=(0.378,0.758)$ which is
the fixed point (5) with $\Omega_{\rm m}=0.28 $ (the solution {\bf I}).}
\label{phasespace}
\end{figure}
In Fig {\ref{phasespace}}, we present some examples for the D-BIonic theory.
We choose $g=1$ and $\epsilon f_0 V_0 = -1$, giving
$\lambda_{\rm cr} = 1.14$.
In Fig {\ref{phasespace}} (a), we show the case of $\lambda=0.5$, which is $g < g_{\rm cr}=5.23$.
The trajectories of the numerical solutions show that they converge to the stable fixed point
$(x_0,y_0)=(0.206,0.978)$ with $\gamma_0=0.96$, which is the fixed point (4) (the solution {\bf II}).
The black thick curve denotes the limiting condition of $\Omega_{\rm m} = 0$ found by (\ref{cons1})
and the black dashed lines correspond to the boundaries of $\gamma = 0$ given by the definition (\ref{cons2}).
In Fig {\ref{phasespace}} (b), we depict the trajectories of the solutions for the case of $\lambda=1.6$,
which satisfies $\lambda > \lambda_{\rm cr}$ as well as $\lambda < 2 g$.
The trajectories converge to another stable fixed point
$(x_0,y_0)=(0.378,0.758)$ with $\gamma_0=0.75$, which is the fixed point (5) (the solution {\bf I}).
In this case we find the asymptotic value of $\Omega_{\rm m}=0.28 (\neq 0)$.
We also present the time evolution of the density parameters $\Omega_{\rm m}$ and $\Omega_\phi$
for the solution {\bf I}
in Fig. \ref{evo_Omega}.
For the case {\bf II}, we expect those values approach to $0$ and $1$.
But the case {\bf I} shows those asymptotic values are some intermediate values
between $0$ and $1$. Since the present observational values show the latter case,
if we select the case {\bf II}, we may need to fine-tune the present time to explain the observed values (the coincidence problem).
On the other hand, when we adopt the case {\bf I}, we may explain the observed values just by the
asymptotic ones. We need not to fine-tune the present time.
\begin{figure}[htb]
\includegraphics[width=7cm]{omegasolutionI.eps}\\
\caption{The time evolution of the density parameters $\Omega_{\rm m}$ and $\Omega_\phi$ in terms of the red shift $z$.
We choose $\lambda = - \mu = 1.6$, $\epsilon f_0 V_0 = -1$ and
$g=1$ just as Fig. \ref{phasespace} (b).
The values approach $(\Omega_{\rm m},\Omega_\phi)=(0.28, 0.72)$ at the fixed point (5)
(the solution {\bf I}).}
\label{evo_Omega}
\end{figure}
\section{Observational constraints}
\label{observations}
In this section, we study whether we can explain the coincidence problem of dark energy and dark matter or not.
We assume that the Universe at present is described by the scaling solution {\bf I}.
From observations, we have the constraints on the cosmological parameters as
$w_{\rm DE} = -0.97\pm 0.05$, $\Omega_{\rm DE} = 0.692 \pm 0.012$, and $\Omega_{\rm CDM + B} = 0.308 \pm 0.012$ \cite{Aubourg}.
As for the constraint on the coupling constant $g$,
it was shown $|g| \, \mbox{\raisebox{-1.ex 0.13$ from the CMB observation \cite{Amendola2}.
Although our model is different from theirs (the type {\bf II} tracking solution with the canonical kinetic term),
we expect the coupling constant is not so large.
Here we then assume the upper bound value on $g$, i.e., $g\approx 0.1$.
From the EOS parameter of dark energy,
it gives the strong constraint on $\gamma_0$ as follows:
$\gamma_0$ of the solution {\bf I} is given by
\begin{eqnarray}
\gamma_0=-{g(g+\lambda)(1+w_\phi)\over 3w_\phi}
\,.
\label{gamma_DE_EOS}
\end{eqnarray}
Since the acceleration condition is $\lambda<2g$,
we find
\begin{eqnarray}
\gamma_0\, \mbox{\raisebox{-1.ex 3\times 10^{-4}\ll 1
\label{gamma_obs_con}
\,.
\end{eqnarray}
Obviously, $\epsilon$ must be negative ($\epsilon = -1$), and then
only the D-BIonic theory can provide such a solution.
The condition (\ref{gamma_obs_con}) yields very large
value of $f_0 V_0$.
In fact, assuming $f_0 V_0\gg 1$, we find from Eqs.~(\ref{gamma_I}) and (\ref{gamma_DE_EOS})
\begin{eqnarray}
f_0 V_0\sim {3w_\phi^2\over g(g+\lambda)(1+w_\phi)^2} \, \mbox{\raisebox{-1.ex 10^5
\label{f0V0_obs_con}
\,.
\end{eqnarray}
Note that there is no upper bound on $f_0 V_0$ in the D-BIonic theory unlike Eq.~(\ref{con_DBI2}) in the DBI case.
In addition to the above cosmological constraint, we have another constraint for the screening at smaller scale.
According to \cite{Burrage}, the solar system constraints on the D-BIonic
theory is
\begin{equation*}
\sqrt{g} \Lambda \lesssim 4 \times 10^{-5} \, {\rm eV} \,,
\end{equation*}
where $\Lambda$ is a typical mass scale of the screening and defined by the action (\ref{action0}) in Appendix A.
Comparing our exponential form of $f(\phi)$ to the original paper of the D-BIonic theory, we obtain
\begin{equation}
| f(\phi) | = f_0 e^{-\mu \phi_0} = \Lambda^{-4} \, \mbox{\raisebox{-1.ex 3.9 \times 10^{15} \, {\rm eV^{-4}}\,,
\label{DBIonic}
\end{equation}
for $g=0.1$.
Since we assume that the scalar field is the source of dark energy, we get
\begin{equation*}
\rho_{\rm DE} = \rho_{\phi} =\frac{\gamma_0^2}{\gamma_0 + 1} \dot\phi |_0^2 + V(\phi_0) \simeq V(\phi_0) \,,
\end{equation*}
where we have used the fact that
the kinetic term is very small because $\gamma_0\ll 1$.
As for the potential of the scalar field, from observations we have constraint as
\begin{equation}
V(\phi) = V_0 e^{-\lambda \phi_0} \approx 2.6 \times 10^{-47} \, {\rm GeV^4} \,.
\label{DE}
\end{equation}
The multiplication of Eqs.~(\ref{DBIonic}) and (\ref{DE}) gives
\begin{equation*}
f_0 V_0 \approx 1.0 \times 10^{5} \,.
\end{equation*}
Surprisingly, this is the same order of magnitude in order to satisfy the coincidence problem discussed above.
In Fig. \ref{F4}, we present the parameter space ($\lambda, g$) where we may
find the solution for the dark energy problem.
\begin{figure}[h]
\includegraphics[width=6cm]{F4.eps}
\caption{The existence regions of two accelerating
solutions {\bf I} and {\bf II}, in the parameter space ($\lambda, g$) for the D-BIonic ($\epsilon=-1$).
We set $f_0V_0=10^5$.
The red dashed curve ($g=g_{\rm cr}$) and
the blue dot-dashed line ($\lambda=\lambda_{\rm cr}$) are almost the same.
The green curve denotes $\Omega_{\rm m}=0.3$, while the black dashed lines give $w_\phi=-0.97, -0.95$ and $-0.9$, respectively from the above. The red solid lines denote $p=1$. The red circle corresponds to the parameters we adopted ($\lambda=0.05, g=0.1$).}
\label{F4}
\end{figure}
Using $f_0 V_0 = 10^5$, $\lambda = 0.05$, and $g = 0.1$, which is shown
by the red circle in Fig. \ref{F4},
we obtain
\begin{align*}
\Omega_{\phi} &\approx 0.697 \,, \\
\Omega_{\rm m} &\approx 0.303 \,, \\
w_{\phi} &\approx - 0.96 \,.
\end{align*}
These are dark energy density, dark matter density, and the EOS parameter of dark energy of the Universe today.
Note that the eigenvalues of this fixed point is
\begin{equation*}
\omega_\pm\approx -1.25 \pm 3.415 \times 10^4 \;i\,,
\end{equation*}
and then this solution is of course stable.
This also shows the typical time scale to approach the solution {\bf I} is one e-folding time.
This means that by the D-BIonic gravity theory we may be able to solve the coincidence problem,
which is difficult to realise in the original coupled quintessence model.
\section{Conclusions}
\label{conclusions}
In this work, we study the cosmological dynamics of the D-BIonic and DBI scalar field coupled with matter fluid.
We assume the exponential forms for the potential and the coupling functions.
We find two interesting analytic solutions of the D-BIonic
theory as well as the DBI theory,
which describe the accelerated expansion of the Universe.
One is similar to the conventional quintessence in the DBI theory, and the scalar field energy density becomes dominant.
The other one is a new scaling solution
because it is found from non-canonical kinetic term as well as the matter coupling term.
This gives non-zero density parameter $\Omega_{\rm m}$, whose value depends on the coupling constants.
For the original coupled quintessence model, although we have a solution that may solve the coincidence problem,
there is some difficulty such that it requires a large coupling constant between dark energy (scalar field) and matter fluid,
which is inconsistent with observational data from CMB.
However, in the case of the D-BIonic
theory,
we find a successful coupled quintessence model by use of a newly found scaling solution with small coupling constant $g$.
This may naturally solve the coincidence problem as well because the density parameter $\Omega_{\rm m}$ is
the value of the attractor solution.
The solution is expected to satisfy
the observational data of the Universe for dark energy as well as the solar system constraint for the screening.
We find that the D-BIonic
can solve both the dark energy problem and the coincidence problem.
Finally, since our analysis is only the background behaviour of the Universe, we have to analyse the details furthermore, including
the evolution of density perturbations as well as the CMB data, in order to confirm our model.
Furthermore this work has been based on exponential forms of the inverse D3-brane-like tension $f(\phi)$,
the potential term $V(\phi)$, and the conformal factor $A(\phi)$,
which can be extended
to be power-law functions.
We leave
them for future works.
\section*{Acknowledgement}
S.P. is supported by Japanese Government (Monbukagakusho) Scholarship.
This work was supported in part by Grants-in-Aid from the
Scientific Research Fund of the Japan Society for the Promotion of Science
No. 16K05362 (K.M.) and No. 16K17709 (S.M.).
\newpage
|
1,314,259,994,969 | arxiv | \section{Introduction}
This article concerns the phase retrieval problem as follows: let $\mathbf{z}\in\mathcal{C}^n$ be an unknown vector, given $m$ known sensing vectors $\{\mathbf{a}_i\}_{i=1}^m\in\mathcal{C}^n$ and the observations
\begin{equation}\label{eq:problem}
y_i=|\mathbf{a}_i^*\mathbf{z}|, i=1,2,\cdots,m,
\end{equation}
then can we reconstruct $\mathbf{z}$ from the observations $\{y_i\}_{i=1}^m$?
Many algorithm has been proposed for this problem, including approaches based on convex relaxation to semidefinite optimization~\cite{Chai2011,Candes_PhaseLift,Waldspurger2015,Candes2014,Gross2015}, convex relaxation to linear program~\cite{pmlr-v54-bahmani17a,Goldstein2016,Hand2016,Hand20162,NIPS2018_8082}, nonconvex approaches based on Wirtinger flows, i.e., gradient flow in the complex setting~\cite{Candes7029630,NIPS2015_5743,Zhang:2016:PNP:3045390.3045499,NIPS2016_6319,cai2016,NIPS2016_6061,Soltanolkotabi2017,Chen2018,Candes7029630,Soltanolkotabi2017,7541725}, alternate minimization (Gerchberg-Saxton) algorithm and its variants~\cite{Gerchberg72,Fienup78,Fienup82,Bauschke03,Waldspurger2016,Netrapalli7130654,Zhang2017,zhang2020}, and algorithms based on Douglas-Rachford splitting~\cite{doi:10.1137/18M1170364}. This work investigates the randomized Kaczmarz, which is simple to implement and has shown competitive performance in simulations. This algorithm is first proposed by Wei in \cite{Wei_2015} and it is shown that the method performs comparably with the state-of-the-art Wirtinger flow methods, when the
sensing vectors are from real or complex Gaussian distributions, or when they follow the unitary model or the coded diffraction pattern (CDP) model. The work also includes a preliminary convergence analysis. The convergence analysis is improved by Tan and Vershynin in \cite{10.1093/imaiai/iay005}, which shows that the algorithm is successful when there are as many Gaussian measurements as the dimension, up to a constant factor, and the initialization is in a ``basin of linear convergence''. However, their results only apply when the signal $\mathbf{z}$ and the measurement vectors $\{\mathbf{a}_i\}_{i=1}^m$ are real-valued. As discussed in~\cite[Section 7.2]{10.1093/imaiai/iay005}, there is no straightforward generalization of their technique from the real-valued case to the complex-valued case. In a related work~\cite{tan2019online}, Tan and Vershynin show that constant step
size online stochastic gradient descent (SGD) converges from arbitrary initializations for a
non-smooth, non-convex amplitude squared loss objective, and this online SGD is strongly reminiscent to the randomized Kaczmarz algorithm from numerical analysis. However, the analysis in \cite{tan2019online} is still based on the real-valued setting.
\subsection{Randomized Kaczmarz algorithm for solving linear systems}
The Kaczmarz method \cite{kaczmarz1379} is an iterative algorithm for solving a system of linear equations
\begin{equation}\label{eq:linearsystem}
\mathbf{a}_i^*\mathbf{x}=b_i, i=1,\cdots,m.
\end{equation}
In the $k$-th iteration, a linear equation (out of $m$ equations) is selected and the new estimate $\mathbf{x}^{(k+1)}$ is obtained by projecting the current estimate $\mathbf{x}^{(k)}$ to the hyperplane corresponding to the solution set of the linear equation. The deterministic version of the Kaczmarz method usually selects the linear equation in a cyclic manner, and the randomized Kaczmarz method selects the linear equation randomly. When the randomized
Kaczmarz method randomly picks up a system with the probability proportional to $1/\|\mathbf{a}_i\|^2$, the randomized Kaczmarz method has been shown to converge linearly in \cite{Strohmer2008} with a rate of $1-\kappa(\mathbf{A})$, where $\kappa(\mathbf{A})$ is the condition number of $\mathbf{A}=[\mathbf{a}_1,\cdots,\mathbf{a}_m]\in\mathbb{R}^{m\times n}$. For additional analysis on this method and its variants, we refer the readers to~\cite{NEEDELL2014199,Needell2016}.
\subsection{Randomized Kaczmarz algorithm for phase retrieval}
The randomized Kaczmarz algorithm can be generalized to the phase retrieval problem \eqref{eq:problem} naturally. While the solution of each equation is not a hyperplane anymore, the projection to the solution set still has an explicit formula as follows. Let $\mathcal{A}_i=\{\mathbf{z}: y_i=|\mathbf{a}_i^*\mathbf{z}|\}$, then
\begin{equation}\label{eq:projection}
P_{\mathcal{A}_i}(\mathbf{x})=\mathbf{x}-\left(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\right) \mathbf{x}^*\frac{\mathbf{a}_i\mathbf{a}_i^*}{\|\mathbf{a}_i\|^2}.
\end{equation}
A randomized Kaczmarz update projects the estimate to the nearest point in $\mathcal{A}_{r(k)}$ at the $k$-th iteration, where $r(k)$ is randomly chosen from $\{1, \cdots, m\}$, and the algorithm can be written as
\begin{equation}\label{eq:Kaczmarz}
\mathbf{x}^{(k+1)}=P_{\mathcal{A}_{r(k)}}(\mathbf{x}^{(k)}).
\end{equation}
\subsection{Contribution and Main Result}
The main contribution of this paper is a guarantee on the linear convergence of randomized Kaczmarz algorithm as follows:
\begin{itemize}
\item First, this paper establishes a deterministic condition such that the algorithm converges linearly with high probability. Intuitively, the condition requires that an objective function is strongly convex in a neighborhood around the true signal $\mathbf{z}$.
\item Second, this paper proves that when the sensing vectors are sampled uniformly from a unit sphere in $\mathcal{C}^n$, and the number of sensing vectors $m$ satisfies $m>O(n\log n)$ as $m,n\rightarrow\infty$, the deterministic condition is satisfied with high probability. A similar result is also obtained for the unitary model, where the sensing vectors are from the columns of random orthogonal matrices.
\end{itemize}
This paper generalizes the result in Tan and Vershynin in \cite{10.1093/imaiai/iay005} from the real-valued case to the complex-valued case. The generalization is not straightforward and the approach to obtain the deterministic condition is very different in this work. In comparison, since the phases can only be either $1$ or $-1$ in the real-valued case, \cite{10.1093/imaiai/iay005} divides all sensing vectors into ``good measurements'' with correct phases and ``bad measurements'' with possibly incorrect phases, and control the total influence of bad measurements. However, as remarked in \cite[Section 7.2]{10.1093/imaiai/iay005}, this method would not work in the complex-valued case since the phases are no longer $\pm1$ and each measurement contributes an error that scales with the phase difference, and we can no longer simply sum up the influence of bad measurements as in \cite[Lemma 2.1]{10.1093/imaiai/iay005}.
We remark that if $\mathbf{a}_i$ are scaled such that $\|\mathbf{a}_i\|=1$ for all $1\leq i\leq n$, then the update formula \eqref{eq:Kaczmarz} can also be considered as the stochastic gradient descent algorithm that minimizes $\frac{1}{m}\sum_{i=1}^m\Big(|\mathbf{a}_i^*\mathbf{x}|-y_i \Big)^2$, with step size chosen to be $1$. In this sense, our work is related to \cite{bassily2018exponential}, which the convergence of the stochastic gradient descent algorithm for generic objective functions has been studied, and both their work and this work are based on the convexity of the objective function. However, we remark that their result can not be directly applied here since it assumes a specific step size that depends on the smoothness constant and the Polyak-Lojasiewicz condition of the objective function, which is unclear for this objective function.
\subsection{Notation}
Throughout the paper, $C$ and $c$ are absolute constants that do not depend on $m, n$, and can change from line to line. We also implicitly assume that $m,n$ are sufficiently large, for example, we write $m>n+10$ when $m>n\log n$ is assumed. $\mathrm{Re}(x)$ represents the real component of a complex number $x$. For a set $\mathcal{S}$, $|\mathcal{S}|$ represents the cardinality of the set.
\section{Main Result}
We first present the main contribution of this paper, as well as a sketch of the proof and some discussion.
\begin{thm}\label{thm:main}
(a) Assuming that the sensing vectors $\{\mathbf{a}_i\}_{i=1}^m$ are i.i.d. sampled from the uniform distribution on the unit sphere in $\mathcal{C}^n$, and in each iteration, the randomized Kaczmarz algorithm randomly picks up each equation with probability $1/m$. Then there exist absolute constants $C_0, c_0, L$ that does not depend on $m,n$ such that if $m \geq C_0n\log n$ as $m,n\rightarrow\infty$, then for all $\|\mathbf{x}^{(0)}-\mathbf{z}\|\leq c_0\sqrt{\delta_1}\|\mathbf{z}\|$ and $\epsilon>0$, we have \begin{equation}\label{eq:mainn}Pr\left(\|\mathbf{x}^{(k)}-\mathbf{z}\|^2\leq \epsilon \|\mathbf{x}^{(0)}-\mathbf{z}\|^2\right) \geq 1-\delta_1-\frac{\left(1-\frac{L}{n}\right)^k}{\epsilon\left(1-\delta_1\right)}-C\exp(-cn). \end{equation}
(b) For the unitary model that $m=Kn$ for some integer $K$, and for any $1\leq k\leq K$, $[\mathbf{a}_{(k-1)n+1},\cdots,\mathbf{a}_{kn}]\in\mathcal{C}^{n\times n}$ is a random orthogonal matrix in $\mathcal{C}^{n\times n}$, there exists some constants $C_0, c_0, L$ such that if $m \geq C_0n\log n$ and $\sqrt{n}>\log^2 m$, then \eqref{eq:mainn} also holds as $m,n\rightarrow\infty$.
\end{thm}
This theorem shows the linear convergence of as follows: if $\delta_1<\frac{1}{2}$, and $k\geq \log(2\epsilon\delta_2)/\log(1-L/n)$, then with probability at least $1-\delta_1-\delta_2-C\exp(-cn)$, we have $\|\mathbf{x}^{(k)}-\mathbf{z}\|^2\leq \epsilon \|\mathbf{x}^{(0)}-\mathbf{z}\|^2$. If we let $\delta_1=\delta_2=\delta/2$, the number of iterations to achieve accuracy $\epsilon$ with probability $1-\delta$ is in the order of $O\left(n\log\frac{1}{\epsilon\delta}\right)$.
The proof of Theorem~\ref{thm:main} is divided into three steps. The first step establishes a condition in \eqref{eq:assumption}, with which the algorithm converges with high probability. This condition describes the regularity of an objective function in a local neighborhood around $\mathbf{z}$. The second step establishes an explicit lower bound of the key parameter $L$ in the condition \eqref{eq:assumption}. In the third step, we apply tools from random matrix theory and measure concentration to analyze the explicit formula of $L$ and show that it can be chosen as a constant as $n,m\rightarrow\infty$. The three steps for part (a) are described in Sections~\ref{sec:step1}, ~\ref{sec:step2}, and ~\ref{sec:step3} respectively, and the third step for part (b) is described in Section~\ref{sec:step4}. The main results of these sections are summarized in Theorems~\ref{thm:main1} and ~\ref{thm:main2}, ~\ref{thm:main3}, and ~\ref{thm:step4}. Combining these theorems, we have the proof of Theorem~\ref{thm:main}.
\begin{proof}[Proof of Theorem~\ref{thm:main}]
WLOG we assume $\|\mathbf{z}\|=1$ for the rest of the paper. In the statement of Theorem~\ref{thm:main3}, $\alpha$ in \eqref{eq:main3} can be chosen such that $\alpha>1$ and $\frac{6.6}{\alpha-1}<\frac{c_1}{72}$ and $c_0$ in \eqref{eq:main3} can be chosen such that $(2+4\alpha)C4\sqrt{2}c_0\alpha<\frac{c_1}{72}$. Since when $\alpha$ is large, $c_0$ can be chosen in the order of $1/\alpha^2$ and $c_0\alpha$ is in the order of $1/\alpha$, there exists a choice of $(\alpha,c_0)$ such that the assumption $2c_0\alpha<1$ in Theorem~\ref{thm:main3} holds. Then Theorem~\ref{thm:main2} and Theorem~\ref{thm:main3} imply that $L=\frac{c_1}{72}$ satisfies the assumption \eqref{eq:assumption}. With this assumption satisfied, Theorem~\ref{thm:main1} implies Theorem~\ref{thm:main}(a). The proof of Theorem~\ref{thm:main}(b) is similar to the proof of (a), with Theorem~\ref{thm:main3} replaced by Theorem~\ref{thm:step4}.
\end{proof}
\section{Step 1: Convergence under a deterministic condition}\label{sec:step1}
This section connects the convergence of the randomized Kaczmarz algorithm with the function
\begin{equation}\label{eq:obj}
f(\mathbf{x})=\frac{1}{m}\sum_{i=1}^m\Big(|\mathbf{a}_i^*\mathbf{x}|-y_i \Big)^2
\end{equation}and its directional derivatives defined by
\[
f'_{\mathbf{v}}(\mathbf{x})=\lim_{t\rightarrow 0^+}\frac{f(\mathbf{x}+t\mathbf{v})-f(\mathbf{v})}{t}= \frac{1}{m}\sum_{i=1}^m\Big(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\Big)\big(\mathbf{a}_i^*\mathbf{v}\mathbf{x}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{x}\mathbf{v}^*\mathbf{a}_i\big).
\]
The result of this section depends on the following local regularity assumption on $f$:
\begin{equation}\label{eq:assumption}
f(\mathbf{x})+f'_{\mathbf{z}-\mathbf{x}}(\mathbf{x})+\frac{L}{n}\|\mathbf{z}-\mathbf{x}\|^2 \leq f(\mathbf{z}),\,\,\text{for all $\mathbf{x}$ such that $\|\mathbf{x}-\mathbf{z}\|\leq c_0$.}
\end{equation}
We remark that this formulation is identical to the definition of strong convexity, so this assumption is related to the strong convexity of $f(\mathbf{x})$ in the local neighborhood $\mathbf{x}\in B(\mathbf{z},c_0)$. However, it is slightly less restrictive in the sense that it only requires \eqref{eq:assumption} for a fixed $\mathbf{z}$ (if $\mathbf{z}$ is replaced by any $\mathbf{y}\in B(\mathbf{z},c_0)$, then this is equivalent to strong convexity).
We also remark that this objective function \eqref{eq:obj} has been studied in \cite{NIPS2016_6319,8049465,tan2019online} and its local regularity property has been studied in \cite{NIPS2016_6319,8049465}. However, these works study a different regularity assumption in~\cite[(12)]{NIPS2016_6319} and~\cite[(39)]{8049465}, which can be written as:
\[
f'_{\mathbf{x}-\mathbf{z}}(\mathbf{x})\geq \frac{\mu}{2}\|f'(\mathbf{x})\|^2+\frac{\lambda}{2}\|\mathbf{x}-\mathbf{z}\|^2.
\]
In addition, similar to \cite{10.1093/imaiai/iay005}, these works only theoretically analyze the real-valued setting.
The main result of this section is summarized as follows. It states that under the assumption \eqref{eq:assumption}, the algorithm converges linearly with high probability.
\begin{thm}\label{thm:main1}
Assume that $\|\mathbf{z}\|=1$ and $\|\mathbf{a}_i\|=1$ for all $1\leq i\leq m$, \eqref{eq:assumption} holds, the randomized Kaczmarz algorithm randomly picks up an equation with the same probability $1/m$, and the algorithm is initialized such that $\|\mathbf{x}^{(0)}-\mathbf{z}\|\leq c_0\sqrt{\delta_1}$ for some $0\leq \delta_1\leq 1$. Then for any $\epsilon>0$,
\begin{equation}\label{eq:main1}
\Pr(\|\mathbf{x}^{(k)}-\mathbf{z}\|^2\leq \epsilon \|\mathbf{x}^{(0)}-\mathbf{z}\|^2) \geq 1-\delta_1-\frac{\left(1-\frac{L}{n}\right)^k}{\epsilon\left(1-\delta_1\right)}.
\end{equation}
\end{thm}
\begin{proof}
Let $P$ be the random mapping $P_{\mathcal{A}_i}$ where $i$ is uniformly sampled from $\{1,\cdots, m\}$, apply the projection formula \eqref{eq:projection} with the assumption $\|\mathbf{a}_i\|=1$ for all $1\leq i\leq m$, then
\begin{align*}
&\operatorname{\mathbb{E}}_{P}\|P\mathbf{x}-\mathbf{z}\|^2=\operatorname{\mathbb{E}}_{i\sim\{1,\cdots,m\}}\Big\|\mathbf{x}-\Big(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\Big) \mathbf{x}^*\mathbf{a}_i\mathbf{a}_i^*-\mathbf{z}\Big\|^2\\=&\operatorname{\mathbb{E}}_{i}\left[\Big(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\Big)^2|\mathbf{a}_i^*\mathbf{x}|^2-\mathrm{Re} \left(2\Big(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\Big)\mathbf{x}^*\mathbf{a}_i\mathbf{a}_i^*(\mathbf{x}-\mathbf{z})\right)\right]+\|\mathbf{z}-\mathbf{x}\|^2\\
=&\operatorname{\mathbb{E}}_i\Big[({|\mathbf{a}_i^*\mathbf{x}|}-{y_i})^2\Big]+2 \operatorname{\mathbb{E}}_i \left[\mathrm{Re}\left( \Big(1-\frac{y_i}{|\mathbf{a}_i^*\mathbf{x}|}\Big)\mathbf{x}^*\mathbf{a}_i\mathbf{a}_i^*(\mathbf{z}-\mathbf{x})\right)\right]+\|\mathbf{z}-\mathbf{x}\|^2
\\
=&f(\mathbf{x})+f'_{\mathbf{z}-\mathbf{x}}(\mathbf{x})+\|\mathbf{z}-\mathbf{x}\|^2\leq \Big(1-\frac{L}{n}\Big)\|\mathbf{z}-\mathbf{x}\|^2,
\end{align*}
where the last inequality applies the assumption \eqref{eq:assumption} and $f(\mathbf{z})=0$, and the last equality use the fact that $\mathbf{y}+\mathbf{y}^*=2\mathrm{Re}(\mathbf{y})$ (this is a fact that we will apply repetitively later).
The rest of the proof follows from the proof in \cite[Section 3]{10.1093/imaiai/iay005}. Let $\tau=\min\{k: \|\mathbf{x}^{(k)}-\mathbf{z}\|\leq c_0\},$ then following the proof in \cite[Theorem 3.1]{10.1093/imaiai/iay005}, we have $
P(\tau<\infty)\leq \left(\frac{c_0\sqrt{\delta_1}}{c_0}\right)^2=\delta_1.$ Following the proof in \cite[Corollary 3.2]{10.1093/imaiai/iay005}, \eqref{eq:main1} is proved.
\end{proof}
\section{The property of an implicit objective function}\label{sec:step2}
In this section, we will give an explicit formula for $L$ defined in \eqref{eq:assumption}. The formula will be based on a few additional definitions as follows. Let $f_i(\mathbf{x})=({|\mathbf{a}_i^*\mathbf{z}|}-{|\mathbf{a}_i^*\mathbf{x}|})^2$, and define the first and the second directional derivatives of $f_i$ of direction $\mathbf{v}$ at $\mathbf{x}$ by
\begin{align}
f'_{i,\mathbf{v}}(\mathbf{x})=\lim_{t\rightarrow 0}\frac{f_i(\mathbf{x}+t\mathbf{v})-f_i(\mathbf{x})}{t},\\
f''_{i,\mathbf{v}}(\mathbf{x})=\lim_{t\rightarrow 0}\frac{f'_i(\mathbf{x}+t\mathbf{v})-f_i(\mathbf{x})}{t}.
\end{align}
It can be shown that the directional derivatives have explicit expressions
\begin{align*}
f'_{i,\mathbf{v}}(\mathbf{x})&=\mathbf{a}_i^*\mathbf{x}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{x}^*\mathbf{a}_i-|\mathbf{a}_i^*\mathbf{z}|\frac{\mathbf{a}_i^*\mathbf{x}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{x}^*\mathbf{a}_i}{|\mathbf{a}_i^*\mathbf{x}|},\\
f''_{i,\mathbf{v}}(\mathbf{x})&=2\mathbf{a}_i^*\mathbf{v}\bv^*\mathbf{a}_i-|\mathbf{a}_i^*\mathbf{z}|\frac{2\mathbf{a}_i^*\mathbf{v}\bv^*\mathbf{a}_i}{|\mathbf{a}_i^*\mathbf{x}|}+|\mathbf{a}_i^*\mathbf{z}|\frac{(\mathbf{a}_i^*\mathbf{x}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{x}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{x}|^3}.
\end{align*}
In addition, since $f(\mathbf{x})=\sum_{i=1}^mf_i(\mathbf{x})$, the first and the second directional derivative of $f(\mathbf{x})$ are $f'_{\mathbf{v}}(\mathbf{x})=\sum_{i=1}^mf'_{i,\mathbf{v}}(\mathbf{x})$ and $f''_{\mathbf{v}}(\mathbf{x})=\sum_{i=1}^mf''_{i,\mathbf{v}}(\mathbf{x})$.
\begin{thm}\label{thm:main2}
For any $\mathbf{v}, \mathbf{z}\in\mathcal{C}^n$ with $\|\mathbf{v}\|=\|\mathbf{z}\|=1$ and $\beta>0$, define
\begin{equation}\label{eq:define_S}
\mathcal{S}(\mathbf{v},\beta)=\{1\leq i\leq m: \beta |\mathbf{a}_i^*\mathbf{v}|\geq |\mathbf{a}_i^*\mathbf{z}|\}
\end{equation}
then for any $\alpha>1$,
\begin{align}\nonumber
L=&\frac{n}{m}\min_{\|\mathbf{v}\|=1} \left\{\frac{m}{2}f''_{\mathbf{v}}(\mathbf{z})- \frac{6}{\alpha-1}\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2-(2+4\alpha)\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}|\mathbf{a}_i^*\mathbf{v}|^2\right\}\\
=&\frac{n}{m}\min_{\|\mathbf{v}\|=1} \left\{\!\!\frac{1}{2}\!\sum_{i=1}^m\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\!-\! \frac{6}{\alpha\!-\!1}\!\!\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2\!-\!(2\!+\!4\alpha)\!\!\!\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!\!\!\!|\mathbf{a}_i^*\mathbf{v}|^2\!\!\right\}.\label{eq:define_L}
\end{align}
satisfies the assumption \eqref{eq:assumption}.
\end{thm}
\begin{proof}[Proof of Theorem~\ref{thm:main2}] We will first prove \eqref{eq:assumption} with $L$ defined in \eqref{eq:define_L} when $\|\mathbf{x}-\mathbf{z}\|= c_0$. For this case, there exists $\mathbf{v}=\frac{\mathbf{x}-\mathbf{z}}{\|\mathbf{x}-\mathbf{z}\|}$ such that $\mathbf{x}=\mathbf{z}+ c_0\mathbf{v}$ and $\|\mathbf{v}\|=1$. For any $i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)$, we have $|\mathbf{a}_i^*\mathbf{z}|\geq c_0\alpha |\mathbf{a}_i^*\mathbf{v}|$ and the triangle inequality implies
\[
\frac{|\mathbf{a}_i^*(\mathbf{x}-\mathbf{z})|}{|\mathbf{a}_i^*\mathbf{z}|} \leq \frac{1}{\alpha}, \,\,\, \left|\frac{|\mathbf{a}_i^*\mathbf{z}|}{|\mathbf{a}_i^*\mathbf{x}|}-1\right|\leq \frac{1}{\alpha-1},\,\,\,\frac{|\mathbf{a}_i^*\mathbf{z}|}{|\mathbf{a}_i^*\mathbf{x}|}\leq \frac{\alpha}{\alpha-1}.
\]
Applying Lemma II.13 from \cite{zhang2020},
\[
\left|\frac{\mathbf{a}_i^*\mathbf{x}}{|\mathbf{a}_i^*\mathbf{x}|}-\frac{\mathbf{a}_i^*\mathbf{z}}{|\mathbf{a}_i^*\mathbf{z}|}\right|\leq 2\min\Big(\frac{|\mathbf{a}_i^*(\mathbf{x}-\mathbf{z})|}{|\mathbf{a}_i^*\mathbf{z}|},1\Big)=\frac{2}{\alpha}.
\]
Applying these inequalities, we have that for $i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)$,
\begin{align}
\nonumber&|f''_{i,\mathbf{v}}(\mathbf{x})-f''_{i,\mathbf{v}}(\mathbf{z})|\leq 2\left|\frac{|\mathbf{a}_i^*\mathbf{z}|}{|\mathbf{a}_i^*\mathbf{x}|}-1\right| \mathbf{a}_i^*\mathbf{v}\bv^*\mathbf{a}_i+\left|\frac{|\mathbf{a}_i^*\mathbf{z}|}{|\mathbf{a}_i^*\mathbf{x}|}-1\right|\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!+\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\\\nonumber&+ \frac{|\mathbf{a}_i^*\mathbf{z}|}{2|\mathbf{a}_i^*\mathbf{x}|} \Big[\big(\frac{\mathbf{a}_i^*\mathbf{x}}{|\mathbf{a}_i^*\mathbf{x}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{x}^*\mathbf{a}_i}{|\mathbf{x}^*\mathbf{a}_i|}\big)^2-\big(\frac{\mathbf{a}_i^*\mathbf{z}}{|\mathbf{a}_i^*\mathbf{z}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{z}^*\mathbf{a}_i}{|\mathbf{z}^*\mathbf{a}_i|}\big)^2\Big]\\\nonumber
\leq & \frac{2}{\alpha-1}\mathbf{a}_i^*\mathbf{v}\bv^*\mathbf{a}_i+\frac{1}{\alpha-1}\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\\\nonumber&+ \frac{|\mathbf{a}_i^*\mathbf{z}|}{2|\mathbf{a}_i^*\mathbf{x}|} \left[\big(\frac{\mathbf{a}_i^*\mathbf{x}}{|\mathbf{a}_i^*\mathbf{x}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{x}^*\mathbf{a}_i}{|\mathbf{x}^*\mathbf{a}_i|}\big)-\big(\frac{\mathbf{a}_i^*\mathbf{z}}{|\mathbf{a}_i^*\mathbf{z}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{z}^*\mathbf{a}_i}{|\mathbf{z}^*\mathbf{a}_i|}\big)\right]\\\nonumber&\left[\big(\frac{\mathbf{a}_i^*\mathbf{x}}{|\mathbf{a}_i^*\mathbf{x}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{x}^*\mathbf{a}_i}{|\mathbf{x}^*\mathbf{a}_i|}\big)+\big(\frac{\mathbf{a}_i^*\mathbf{z}}{|\mathbf{a}_i^*\mathbf{z}|}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\frac{\mathbf{z}^*\mathbf{a}_i}{|\mathbf{z}^*\mathbf{a}_i|}\big)\right]\\\leq
&\frac{2}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2+\frac{2}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2+\frac{\alpha}{2(\alpha-1)}\left[\frac{4}{\alpha}|\mathbf{a}_i^*\mathbf{v}|\right]\Big[4|\mathbf{a}_i^*\mathbf{v}|\Big]\leq \frac{12}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2,\label{eq:S1}
\end{align}
where the intermediate inequalities use the fact $|\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i|\leq |\mathbf{a}_i^*\mathbf{z}||\mathbf{a}_i^*\mathbf{v}|.$
For any $i\in\mathcal{S}(\mathbf{v},c_0\alpha)$ and any $0\leq t\leq c_0$, we have
\[\left|\frac{\mathbf{a}_i^*(\mathbf{z}+t\mathbf{v})}{|\mathbf{a}_i^*(\mathbf{z}+t\mathbf{v})|}-\frac{\mathbf{a}_i^*(\mathbf{z}+c_0\mathbf{v})}{|\mathbf{a}_i^*(\mathbf{z}+c_0\mathbf{v})|}\right|\leq 2\]
and
\begin{align}\nonumber
&|f_{i,\mathbf{v}}'(\mathbf{z}+t\mathbf{v})-f_{i,\mathbf{v}}'(\mathbf{z}+c_0\mathbf{v})|\nonumber\\=& 2(t-c_0)|\mathbf{a}_i^*\mathbf{v}|^2-2|\mathbf{a}_i^*\mathbf{z}|\mathrm{Re}\left(\left(\frac{\mathbf{a}_i^*(\mathbf{z}+t\mathbf{v})}{|\mathbf{a}_i^*(\mathbf{z}+t\mathbf{v})|}-\frac{\mathbf{a}_i^*(\mathbf{z}+c_0\mathbf{v})}{|\mathbf{a}_i^*(\mathbf{z}+c_0\mathbf{v})|}\right)\mathbf{v}\mathbf{a}_i^*\right)\nonumber\\\leq&2(c_0-t)|\mathbf{a}_i^*\mathbf{v}|^2+4 |\mathbf{a}_i^*\mathbf{z}||\mathbf{a}_i^*\mathbf{v}|\leq (2(c_0-t)+4c_0\alpha)|\mathbf{a}_i^*\mathbf{v}|^2.\label{eq:S2}
\end{align}
Combining the two cases studied in \eqref{eq:S1} and \eqref{eq:S2}, we have\begin{align*}
&m [ f(\mathbf{x})-f(\mathbf{z})+f'_{\mathbf{x}-\mathbf{z}}(\mathbf{x})]=m[f(\mathbf{z}+c_0\mathbf{v})-f(\mathbf{z})-c_0f'_{\mathbf{v}}(\mathbf{z}+c_0\mathbf{v})]\\=&m \int_{t=0}^{c_0}\Big[f'_{\mathbf{v}}(\mathbf{z}+t\mathbf{v})-f'_{\mathbf{v}}(\mathbf{z}+c_0\mathbf{v})\Big]{\,\mathrm{d}} t\\=&
\sum_{i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)}\int_{t=0}^{c_0}\Big[f_{i,\mathbf{v}}'(\mathbf{z}+t\mathbf{v})-f_{i,\mathbf{v}}'(\mathbf{z}+c_0\mathbf{v})\Big]{\,\mathrm{d}} t\\&+\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\int_{t=0}^{c_0}\Big[f_{i,\mathbf{v}}'(\mathbf{z}+t\mathbf{v})-f_{i,\mathbf{v}}'(\mathbf{z}+c_0\mathbf{v})\Big]{\,\mathrm{d}} t\\
= & \!-\!\!\!\!\!\!\sum_{i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!\int_{t=0}^{c_0}\!\!\int_{a=t}^{c_0}\!\! f_{i,\mathbf{v}}''(\mathbf{z}+a\mathbf{v}) {\,\mathrm{d}} a {\,\mathrm{d}} t+\!\!\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!\int_{t=0}^{c_0}\!\!\Big[f_{i,\mathbf{v}}'(\mathbf{z}+t\mathbf{v})-f_{i,\mathbf{v}}'(\mathbf{z}+c_0\mathbf{v})\Big]{\,\mathrm{d}} t
\\\leq & - \sum_{i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)}\int_{t=0}^{c_0}\int_{a=t}^{c_0}\Big[f''_{i,\mathbf{v}}(\mathbf{z})-\frac{12}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2\Big]{\,\mathrm{d}} a {\,\mathrm{d}} t\\&+\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\int_{t=0}^{c_0}\Big[(2(b-t)+4c_0\alpha)|\mathbf{a}_i^*\mathbf{v}|^2\Big]{\,\mathrm{d}} t
\\=&c_0^2\left\{- \sum_{i\not\in\mathcal{S}(\mathbf{v},c_0\alpha)} \Big[\frac{f''_{i,\mathbf{v}}(\mathbf{z})}{2}-\frac{6}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2\Big]+\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}(1+4\alpha)|\mathbf{a}_i^*\mathbf{v}|^2 \right\}
\\\leq &c_0^2\left\{ - \sum_{i=1}^m \Big[\frac{f''_{i,\mathbf{v}}(\mathbf{z})}{2}-\frac{6}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2\Big]\!+\!\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\frac{f''_{i,\mathbf{v}}(\mathbf{z})}{2}+\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!(2+4\alpha)|\mathbf{a}_i^*\mathbf{v}|^2 \right\}\\
\leq &{\|\mathbf{x}-\mathbf{z}\|^2}\left\{ - \frac{m}{2}f''_{\mathbf{v}}(\mathbf{z}) + \sum_{i=1}^m \frac{6}{\alpha-1}|\mathbf{a}_i^*\mathbf{v}|^2+\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}(2+4\alpha)|\mathbf{a}_i^*\mathbf{v}|^2 \right\},
\end{align*}
where the first inequality follows from \eqref{eq:S1} and \eqref{eq:S2}, and the last inequality applies the observation that $|f''_{i,\mathbf{v}}(\mathbf{z})|=|\frac{(\mathbf{a}_i^*\mathbf{x}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{x}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{x}|^2}|\leq 2|\mathbf{a}_i^*\mathbf{v}|^2$. Recall the definition of $L$ in \eqref{eq:assumption}, we have proved \eqref{eq:assumption} with $L$ defined in \eqref{eq:define_L} when $\|\mathbf{x}-\mathbf{z}\|= c_0$.
When $\|\mathbf{x}-\mathbf{z}\|< c_0$, applying the same procedure we can show that \eqref{eq:assumption} holds with $L$ defined by
\begin{equation}\label{eq:define_L1}
\frac{n}{m}\min_{\|\mathbf{v}\|=1} \left\{\frac{m}{2}f''_{\mathbf{v}}(\mathbf{z})- \frac{6}{\alpha-1}\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2-(2+4\alpha)\sum_{i\in\mathcal{S}(\mathbf{v},\|\mathbf{x}-\mathbf{z}\|\alpha)}|\mathbf{a}_i^*\mathbf{v}|^2\right\}.
\end{equation}
By definition, $\|\mathbf{x}-\mathbf{z}\|< c_0$ implies that $\mathcal{S}(\mathbf{v},\|\mathbf{x}-\mathbf{z}\|\alpha)\subseteq \mathcal{S}(\mathbf{v},c_0\alpha)$. As a result, the expression in \eqref{eq:define_L1} is greater or equal than $L$ defined in \eqref{eq:define_L}, and \eqref{eq:assumption} with $L$ defined in \eqref{eq:define_L} also holds when $\|\mathbf{x}-\mathbf{z}\| < c_0$, and Theorem~\ref{thm:main2} is proved.
\end{proof}
\section{The Gaussian model}\label{sec:step3}
In this section, we will show that under the Gaussian model, $L$ defined in \eqref{eq:assumption} and \eqref{eq:define_L} can be estimated and has a lower bound.
\begin{thm}\label{thm:main3}
Assuming that the sensing vectors $\{\mathbf{a}_i\}_{i=1}^m$ are selected uniformly and independently from the unit sphere in $\mathcal{C}^n$ and $2c_0\alpha>1$, then there exists $C_0$ that does not depend on $n$ and $m$ such that when $m>C_0n\log n$, with probability at least $1-Cn\exp(-cn)$, $L$ defined in \eqref{eq:define_L} has a lower bound
\begin{equation}\label{eq:main3}
L\geq \frac{c_1}{24}-\frac{6.6}{\alpha-1}-(2+4\alpha)C4\sqrt{2}c_0\alpha.
\end{equation}
\end{thm}
\begin{proof}
The proof is based on bounding each component in \eqref{eq:define_L} separately in Lemma~\ref{lemma:term2}, Lemma~\ref{lemma:term1}, and Lemma~\ref{lemma:term3}. Combining these estimations with $\delta=\exp(-n\log n)$ in Lemma~\ref{lemma:term2} and $\beta=2c_0\alpha$ in Lemma~\ref{lemma:term3}, Theorem~\ref{thm:main3} is proved.\end{proof}
The following is a restatement of~\cite[Lemma 5.2]{vershynin2010introduction}. While the original setting is real-valued, it is easy to generalize it to the complex-valued setting by treating $\mathcal{C}^n$ as $\mathbb{R}^{2n}$.
\begin{lemma}[Covering numbers of the sphere]\label{lemma:covering}
There exists an $\epsilon$-net over the unit sphere in $\mathcal{C}^{n}$ equipped with the Euclidean metric, with at most $(1+\frac{2}{\epsilon})^{2n}$ points.
\end{lemma}
The following lemma bounds the second term in \eqref{eq:define_L}. In fact, this term is the squared operator norm of $\mathbf{A}$ and has been well studied, and the following result follows from \cite[Lemma 5.8]{10.1093/imaiai/iay005}.
\begin{lemma}[The bound on the second term in \eqref{eq:define_L}]\label{lemma:term2}
If $m\geq C(n+\sqrt{\log(1/\delta)})$, then for any $\delta>0$,
\[
\Pr\left(\max_{\|\mathbf{v}\|=1}\frac{1}{m}\sum_{i=1}^m\|\mathbf{a}_i^*\mathbf{v}\|^2\leq \frac{1.1}{n}\right)\geq 1-\delta.
\]
\end{lemma}
The following lemma bounds the first term in \eqref{eq:define_L}. The proof is based on an $\epsilon$-net argument. The proof is deferred to Section~\ref{sec:term1}.
\begin{lemma}[The bound on the first term in \eqref{eq:define_L}]\label{lemma:term1}
For any fixed $\mathbf{z}, \mathbf{v}\in\mathbb{C}^n$ with $\|\mathbf{z}\|=\|\mathbf{v}\|=1$, assuming that $\{\mathbf{a}_i\}_{i=1}^m$ are selected uniformly and independently from the unit sphere in $\mathbb{C}^n$, there exists $c_1>0$ such that
\begin{align}\nonumber
& \Pr\left(\min_{\|\mathbf{v}\|=1}\frac{1}{m}\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\geq \frac{c_1m}{24n}\right)\\\geq& 1-\left(\exp\left(-\frac{m}{576}\right)+\exp\left(-\frac{m}{8}\right)\right)\left(1+\frac{192n}{c_1}\right)^{2n}. \label{eq:term1_5}
\end{align}
\end{lemma}
The following lemma bounds the last term in \eqref{eq:define_L}. We remark that it shares some similarities with the estimation in~\cite[Theorem 5.7]{10.1093/imaiai/iay005}, in the sense that both estimations depend on a ``wedge''. However, the ``wedge'' $\mathcal{S}(\mathbf{v},\beta/2)$ in the complex setting is more complicated and the argument based on VC theory in \cite{10.1093/imaiai/iay005} does not apply. Instead, the proof of Lemma~\ref{lemma:term3} consists of two parts: first, we control the size of $\mathcal{S}(\mathbf{v},\beta/2)$ based on an $\epsilon$-net argument. Then we can invoke the metric entropy/chaining argument in \cite{10.1093/imaiai/iay005}. The proof is deferred to Section~\ref{sec:term3}.
\begin{lemma}[The bound on the third term in \eqref{eq:define_L}]\label{lemma:term3}
Assume $\beta>1$ and $m\geq \max(n,\frac{n\log n}{2\beta^2})$, then there exists $C$ that does not depend on $n, m, \beta$ such that
\[
\Pr\!\left(\! \max_{\|\mathbf{v}\|=1} \! \!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},\beta/2)}\!\!\!\!|\mathbf{a}_i^*\mathbf{v}|^2 \leq
C \sqrt{2}\beta\frac{m}{n}\!\right)\!\!\geq 1-2n\exp(-n)-2\exp\!\Big(\!-\frac{m\beta^4}{2}\!\Big)(1+2n)^{2n}.
\]
\end{lemma}
\subsection{Proof of Lemma~\ref{lemma:term1}} \label{sec:term1}
\begin{proof}
The proof will be based on an $\epsilon$-net argument. In the first step, we will show that for any $\mathbf{v}$ with $\|\mathbf{v}\|=1$, the event in the LHS of \eqref{eq:term1_5} happens with high probability. Second, we will establish a perturbation bound on $\mathbf{v}$ when the perturbation is smaller than $\epsilon$. Then a standard $\epsilon$-net argument will be applied.
First, we note that
\begin{align}\label{eq:term1_0}
&\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}= \frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2\|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2} \|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2
\\= &\frac{(\mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\hat{\mathbf{z}}|^2} \|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2,\nonumber
\end{align}
where $\mathbf{b}_i, \hat{\mathbf{z}}, \hat{\mathbf{v}}\in\mathcal{C}^2$ are defined by $\mathbf{b}_i=\frac{P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)}{\|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|}$, $\hat{\mathbf{z}}=P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}\mathbf{z}$, and $\hat{\mathbf{v}}=P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}\mathbf{v}$. Here $\mathbf{b}_i$ is sampled uniformly and independently from a unit circle in $\mathcal{C}^2$, and it is independent of $\|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|$.
Considering that $P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)$ is a projection of a random unit vector from $\mathcal{C}^n$ to $\mathcal{C}^2$, there exists $c_1>0$ such that (in fact, one can verify it with $c_1=0.8$)
\[
\Pr\left(\|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2\geq \frac{c_1}{n}\right)\geq \frac{3}{4}.
\]
Then Hoeffding's inequality suggests that
\begin{equation}\label{eq:term1_1}
P\left(\sum_{i=1}^mI\left( \|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2\geq \frac{c_1}{n}\right)\geq \frac{m}{2}\right)\geq 1-\exp\Big(-\frac{m}{8}\Big).
\end{equation}
For the component $\frac{(\mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\hat{\mathbf{z}}|^2}$, note that $\mathbf{b}_i$ is distributed uniformly on a circle in $\mathcal{C}^2$, so WLOG we may assume that $\hat{\mathbf{z}}=[1,0]$ and $\hat{\mathbf{v}}=[\cos \theta, \sin\theta]$, and then
\begin{align*}
& \mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i=2\mathrm{Re}( \mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i)=2\mathrm{Re}(\cos\theta|\mathbf{b}_{i,1}|^2+\sin\theta\mathbf{b}_{i,1}^*\mathbf{b}_{i,2})\\=&2\cos\theta|\mathbf{b}_{i,1}|^2 +2\cos\theta\sin\theta \mathrm{Re}(\mathbf{b}_{i,1}^*\mathbf{b}_{i,2}),
\end{align*}
and applying $|\mathbf{b}_i^*\hat{\mathbf{z}}|^2=|\mathbf{b}_{i,1}|^2$,
\begin{equation}\label{eq:temp1}
\frac{(\mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\hat{\mathbf{z}}|^2}=\cos^2\theta |\mathbf{b}_{i,1}|^2+2\cos\theta\sin\theta \mathrm{Re}(\mathbf{b}_{i,1}^*\mathbf{b}_{i,2})+\sin^2\theta \frac{(\mathrm{Re}(\mathbf{b}_{i,1}^*\mathbf{b}_{i,2}))^2}{|\mathbf{b}_{i,1}|^2}.
\end{equation}
By the symmetry of the distribution of $\mathbf{b}_{i,2}$, when $\mathbf{b}_{i,1}$ is fixed, $\operatorname{\mathbb{E}}_{\mathbf{b}_{i,2}} {\mathrm{Re}(\mathbf{b}_{i,1}^*\mathbf{b}_{i,2})}=0$ and $\operatorname{\mathbb{E}}_{\mathbf{b}_{i,2}}\frac{(\mathrm{Re}(\mathbf{b}_{i,1}^*\mathbf{b}_{i,2}))^2}{|\mathbf{b}_{i,1}|^2}=\frac{1}{2}\operatorname{\mathbb{E}}_{\mathbf{b}_{i,2}} \frac{|\mathbf{b}_{i,1}^*\mathbf{b}_{i,2}|^2}{|\mathbf{b}_{i,1}|^2}=\frac{1}{2}\operatorname{\mathbb{E}}_{\mathbf{b}_{i,2}}|\mathbf{b}_{i,2}|^2=\frac{1}{2}(1-|\mathbf{b}_{i,1}|^2)$. Combining it with $\operatorname{\mathbb{E}} |\mathbf{b}_{i,1}|^2=1/2$ and \eqref{eq:temp1}, we have
\[
\operatorname{\mathbb{E}}\left[ \frac{(\mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\hat{\mathbf{z}}|^2}\right]=\frac{1}{2}\cos^2\theta+\frac{1}{4}\sin^2\theta\geq \frac{1}{4}.
\]
Since for each $1\leq i\leq m$,
\[
\frac{(\mathbf{b}_i^*\hat{\mathbf{z}}\hat{\mathbf{v}}^*\mathbf{b}_i+\mathbf{b}_i^*\hat{\mathbf{v}}\hat{\mathbf{z}}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\hat{\mathbf{z}}|^2}\leq 2|\mathbf{b}_i^*\hat{\mathbf{v}}|^2\leq 2,
\]
When the event in \eqref{eq:term1_1} holds,
\begin{equation}\label{eq:temp2}
\text{there exists a set $\mathcal{I}$ of size $m/2$ such that for all $i\in\mathcal{I}$, $\|P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}(\mathbf{a}_i)\|^2\geq \frac{c_1}{n}$.}\end{equation} Hoeffding's inequality then gives the estimation
\begin{equation}\label{eq:term1_2}
\Pr\left(\frac{2}{m}\sum_{i\in\mathcal{I}}\frac{(\mathbf{b}_i^*\mathbf{z}\mathbf{v}^*\mathbf{b}_i+\mathbf{b}_i^*\mathbf{v}\mathbf{z}^*\mathbf{b}_i)^2}{2|\mathbf{b}_i^*\mathbf{z}|^2}\geq \frac{1}{4}-t\right)\geq 1-\exp\left(-\frac{mt^2}{4}\right).
\end{equation}
Combining \eqref{eq:term1_0}, \eqref{eq:temp2}, and \eqref{eq:term1_2},
\begin{equation}\label{eq:term1_3}
\Pr\left(\frac{2}{m}\sum_{i\in\mathcal{I}}\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\geq \Big(\frac{1}{4}-t\Big)\frac{c_1}{n}\right)\geq 1-\exp\left(-\frac{mt^2}{4}\right)-\exp\Big(-\frac{m}{8}\Big),
\end{equation}
which leads to
\begin{equation}\label{eq:term1_4}
\Pr\left(\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\geq \Big(\frac{1}{4}-t\Big)\frac{c_1m}{2n}\right)\geq 1- \exp\left(-\frac{mt^2}{4}\right)-\exp\Big(-\frac{m}{8}\Big).
\end{equation}
Second, we will establish a perturbation bound on $\mathbf{v}$. For any $\mathbf{v}'$ such that $\|\mathbf{v}'-\mathbf{v}\|\leq \epsilon$,
\begin{align*}
& \sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}-\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}'^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}'\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\\=&4\sum_{i=1}^m\frac{\mathrm{Re}(\mathbf{a}_i^*\mathbf{z}(\mathbf{v}+\mathbf{v}')^*\mathbf{a}_i)\mathrm{Re}(\mathbf{a}_i^*\mathbf{z}(\mathbf{v}-\mathbf{v}')^*\mathbf{a}_i)}{2|\mathbf{a}_i^*\mathbf{z}|^2}\leq 4\epsilon\sum_{i=1}^m\|\mathbf{a}_i\|^2=4m\epsilon.
\end{align*}
Combining it with \eqref{eq:term1_4}, Lemma~\ref{lemma:covering}, and a standard $\epsilon$-net argument, we have
\begin{align*}\nonumber
& \Pr\left(\min_{\|\mathbf{v}\|=1}\frac{1}{m}\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\geq \frac{c_1m}{2n}\Big(\frac{1}{4}-t\Big)-4m\epsilon\right)\\\geq& 1-\left(\exp\left(-\frac{mt^2}{4}\right)+\exp\left(-\frac{m}{8}\right)\right)\left(1+\frac{2}{\epsilon}\right)^{2n}.
\end{align*}
Set $t=\frac{1}{12}$ and $\epsilon=\frac{c_1}{96n}$, \eqref{eq:term1_5} and Lemma~\ref{lemma:term1} are proved.
\end{proof}
\subsection{Proof of Lemma~\ref{lemma:term3}} \label{sec:term3}
In this section, we will first present a few lemmas and their proof, and then prove Lemma~\ref{lemma:term3} in the end. In particular, we will estimate the expected value of $|\mathcal{S}(\mathbf{v},\beta)|$ in Lemma~\ref{lemma:term34}; then we will investigate the perturbation of $|\mathcal{S}(\mathbf{v},\beta)|$ when $\mathbf{v}$ is perturbed in Lemma~\ref{lemma:term31}. Next, an $\epsilon$-net argument will be used to give a uniform upper bound on $|\mathcal{S}(\mathbf{v},\beta)|$ for all $\mathbf{v}$. Combining this uniform upper bound and Lemma~\ref{lemma:term32}, Lemma~\ref{lemma:term3} is proved.
\begin{lemma}\label{lemma:term34}
Fix $\mathbf{v}$ with $\|\mathbf{v}\|=1$ and assume $\beta\geq 1$, then for each $1\leq i\leq m$, \[
\Pr\{|\mathbf{a}_i^*\mathbf{z}|\leq \beta |\mathbf{a}_i^*\mathbf{v}|\}\leq \frac{\beta^2}{1+\beta^2}.
\]
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lemma:term34}]
First, we will show that it is sufficient to prove the case $\mathbf{v}\perp\mathbf{z}$. Assume $\mathbf{v}\not\perp\mathbf{z}$, then there exists $\mathbf{u}$ such that $\|\mathbf{u}\|=1$, $\mathbf{u}\perp\mathbf{z}$, $\mathbf{v}\in\mathrm{Sp}(\mathbf{u},\mathbf{z})$. WLOG assume that $\mathbf{v}=e^{i\eta_1}\cos\theta\mathbf{z}+e^{i\eta_2}\sin\theta\mathbf{u}$, then we have $|\mathbf{a}_i^*\mathbf{v}|^2=\cos^2\theta|\mathbf{a}_i^*\mathbf{z}|^2+\sin^2\theta|\mathbf{a}_i^*\mathbf{u}|^2$ and
\begin{equation}\label{eq:buv}
\frac{|\mathbf{a}_i^*\mathbf{v}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2} = \frac{\cos^2\theta|\mathbf{a}_i^*\mathbf{z}|^2+\sin^2\theta|\mathbf{a}_i^*\mathbf{u}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2}=\cos^2\theta+\sin^2\theta\frac{|\mathbf{a}_i^*\mathbf{u}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2}
\end{equation}
Assuming that $i\in \mathcal{S}(\mathbf{v},\beta)$, then $\frac{|\mathbf{a}_i^*\mathbf{v}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2}\geq \frac{1}{\beta^2}\geq 1$ and \eqref{eq:buv} implies
\[
\frac{|\mathbf{a}_i^*\mathbf{u}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2}\geq \frac{|\mathbf{a}_i^*\mathbf{v}|^2}{|\mathbf{a}_i^*\mathbf{z}|^2},
\]
so $i$ is also in $\mathcal{S}(\mathbf{u},\beta)$. Therefore, $\mathcal{S}(\mathbf{v},\beta)\subseteq \mathcal{S}(\mathbf{u},\beta)$, and it is sufficient to prove Lemma~\ref{lemma:term34} for the case $\mathbf{v}\perp\mathbf{z}$.
Note that $\Pr\{|\mathbf{a}_i^*\mathbf{z}|\leq \beta |\mathbf{a}_i^*\mathbf{v}|\}$ does not change with the scaling of $\mathbf{a}_i$, so we may assume that $\mathbf{a}_i$ are sampled from complex Gaussian distribution $CN(0,\sqrt{2}\mathbf{I})$, i.e., each real and imaginary component are sampled from $N(0,1)$. With $\mathbf{v}\perp\mathbf{z}$,
\[\Pr\left(\mathbf{a}_i^*\mathbf{z}|\leq \beta |\mathbf{a}_i^*\mathbf{v}|\right)=\Pr\left(\sqrt{g_1^2+g_2^2}\leq \beta \sqrt{g_3^2+g_4^2}\right),\] assuming that $g_1,g_2,g_3,g_4$ are i.i.d. sampled from $N(0,1)$. By calculation, both $\sqrt{g_1^2+g_2^2}$ and $\sqrt{g_3^2+g_4^2}$ have the probability density function $xe^{-x^2/2}$ with $x\geq 0$. Therefore,
\begin{align*}
&\Pr\left(\sqrt{g_1^2+g_2^2}\leq \beta \sqrt{g_1^2+g_2^2}\right)=\int_{x=0}^\infty xe^{-x^2/2}\int_{y=\frac{x}{\beta}}^\infty ye^{-y^2/2}{\,\mathrm{d}} y{\,\mathrm{d}} x \\=& \int_{x=0}^\infty xe^{-x^2/2}e^{-x^2/2\beta^2}{\,\mathrm{d}} x=\frac{\beta^2}{1+\beta^2},
\end{align*}
and Lemma~\ref{lemma:term34} is then proved.
\end{proof}
\begin{lemma}\label{lemma:term31}
For any $\mathbf{v}, \mathbf{v}'\in\mathcal{C}^n$ with $\|\mathbf{v}-\mathbf{v}'\|\leq\epsilon$, we have
\[
\Big|\mathcal{S}\Big(\mathbf{v}',\frac{\beta}{1-\epsilon c_2}\Big)\Big|\leq |\mathcal{S}(\mathbf{v},\beta)| + |\{1\leq i\leq m: \|\mathbf{a}_i\|/|\mathbf{a}_i^*\mathbf{v}|\geq c_2\}|.
\]
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lemma:term31}]
If $i\in\mathcal{S}(\mathbf{v},\beta)$ and $\|\mathbf{a}_i^*\|/|\mathbf{a}_i^*\mathbf{v}|\leq c_2$, then
\[
|\mathbf{a}_i^*\mathbf{v}|\leq |\mathbf{a}_i^*\mathbf{v}'|+\epsilon\|\mathbf{a}_i^*\|\leq |\mathbf{a}_i^*\mathbf{v}'|+\epsilon c_2|\mathbf{a}_i^*\mathbf{v}|,
\]
so $\|\mathbf{a}_i^*\mathbf{z}\|\leq \beta|\mathbf{a}_i^*\mathbf{v}|\leq
\frac{\beta}{1-\epsilon c_2}|\mathbf{a}_i^*\mathbf{v}'|$.
\end{proof}
The following is a restatement of \cite[Theorem 5.7]{10.1093/imaiai/iay005}. While the statement is proved for the real-valued case, it can be generalized to the complex-valued case by treating the real component and the imaginary component separately.
\begin{lemma}\label{lemma:term32}
Let $0<\delta<1/2$, $0<c_3<1$, suppose $m\geq \max(n,\log(1/\delta)/c_3)$, then with probability at least $1-2\delta$, for any set $\mathcal{S}\in\{1,\cdots,m\}$ with $|\mathcal{S}|\leq c_3 m$, we have
\[
\Big\|\sum_{i\in\mathcal{S}}\mathbf{a}_i\mathbf{a}_i^*\Big\| \leq C \sqrt{c_3}\frac{m}{n}.
\]
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lemma:term3}]
To investigate the probability that $\|\mathbf{a}_i^*\|/|\mathbf{a}_i^*\mathbf{v}|\geq c_2$, WLOG we may assume scale $\mathbf{a}_i\in\mathcal{C}^n$ and assume that the real and the complex component of each element of $\mathbf{a}_i$ is sampled from $N(0,1)$. Then $|\mathbf{a}_i^*\mathbf{v}|$ has a p.d.f. of $f(x)=xe^{-x^2/2}$ for $x\geq 0$, and \begin{equation}\label{eq:temp4}
\Pr(|\mathbf{a}_i^*\mathbf{v}|\geq t)=\exp(-t^2/2).
\end{equation}
In addition, $\|\mathbf{a}_i^*\|^2$ has the same distribution of $\chi^2$ distribution with a degree of freedom $2n$. As a result, the tail bound of the $\chi^2$ distribution~\cite[Example 2.11]{wainwright2019high} implies (using one-sided inequality, apply $t=1$ and replace $n$ by $2n$ from their notation)
\begin{equation}\label{eq:temp5}
\Pr(\|\mathbf{a}_i\|^2\geq 4n)\leq \exp(-n/4).
\end{equation}
Combining \eqref{eq:temp4} with $t=\frac{2\sqrt{n}}{c_2}$ and \eqref{eq:temp5}, we have
\[
\Pr(\|\mathbf{a}_i^*\|/|\mathbf{a}_i^*\mathbf{v}|\geq c_2 )\leq 1-\exp(-2n/c_2^2)+\exp(-n/4).
\]
Combining it with Lemma~\ref{lemma:term34}, we have that for each $1\leq i\leq m$,
\[
\Pr\left(\frac{\|\mathbf{a}_i\|}{|\mathbf{a}_i^*\mathbf{v}|}\geq c_2\right)\leq 1-\exp\Big(-\frac{2n}{c_2^2}\Big)+\exp\Big(-\frac{n}{4}\Big)
\]
Applying Hoeffding inequality, for each $\mathbf{v}$,
\begin{align}\nonumber
&\Pr\left(\Big|\Big\{1\leq i\leq m: \frac{\|\mathbf{a}_i\|}{|\mathbf{a}_i^*\mathbf{v}|}\geq c_2\Big\}\Big|\geq m\Big(1-\exp\Big(-\frac{2n}{c_2^2})+\exp\Big(-\frac{n}{4}\Big) +t\Big)\right)\\\leq &\exp(-2mt^2). \label{eq:temp6}
\end{align}
Similarly, applying Hoeffding inequality to Lemma~\ref{lemma:term34}, for each $\mathbf{v}$,
\begin{align}\label{eq:temp7}
&\Pr\left(\Big|\mathcal{S}(\mathbf{v},\beta)\Big\}\Big|\geq m\Big(\frac{\beta^2}{1+\beta^2} +t\Big)\right)\leq \exp(-2mt^2).
\end{align}
Combining \eqref{eq:temp6}, \eqref{eq:temp7} with $c_2=1/2\epsilon$, then the standard $\epsilon$-net argument, Lemma~\ref{lemma:covering}, and Lemma~\ref{lemma:term31} imply
\begin{align}\nonumber
& \Pr\left(\max_{\|\mathbf{v}\|=1}\Big|\mathcal{S}\Big(\mathbf{v},\frac{\beta}{2}\Big)\Big|\geq m\Big(1-\exp(-8n\epsilon^2)+\exp\big(-\frac{n}{4}\big) +\frac{\beta^2}{1+\beta^2}+2t\Big)\right)\\\leq& 2\exp(-2mt^2)(1+2/\epsilon)^{2n}. \label{eq:temp9}
\end{align}
Let $\epsilon=1/n$, $\eta=\exp(-n\log n)$, $t=\beta^2/2$, $c_3=2\beta^2$, note that $1-\exp(-8n\epsilon^2)+\exp\big(-\frac{n}{4}\big)\rightarrow 0$ as $n\rightarrow\infty$, Lemma~\ref{lemma:term3} is then proved using Lemma~\ref{lemma:term32} and the fact that $\Big\|\sum_{i\in\mathcal{S}}\mathbf{a}_i\mathbf{a}_i^*\Big\|\geq \sum_{i\in\mathcal{S}}|\mathbf{a}_i^*\mathbf{v}|^2$.
\end{proof}
\section{The unitary model}\label{sec:step4}
In this section, we will show that under the unitary model, $L$ defined in \eqref{eq:assumption} and \eqref{eq:define_L} can be estimated and has a lower bound as follows':
\begin{thm}\label{thm:step4}
Under the unitary model, there exists $c_0$ and $\alpha$ such that $2c_0\alpha<1$, and
\begin{equation}\label{eq:unitary1}
L=\frac{n}{m}\min_{\|\mathbf{v}\|=1} \left\{\!\!\frac{1}{2}\!\sum_{i=1}^m\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\!-\! \frac{6}{\alpha\!-\!1}\!\!\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2\!-\!(2\!+\!4\alpha)\!\!\!\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!\!\!\!|\mathbf{a}_i^*\mathbf{v}|^2\!\!\right\}.
\end{equation}
is bounded from below with high probability, that is, $L>c$ for some $c>0$ with probability $1-Cn\exp(-Cn)$.
\end{thm}
We will control each of the three expressions in \eqref{eq:unitary1} separately. For the component $\frac{6}{\alpha\!-\!1}\!\!\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2$, by definition it is equivalent to $\frac{6}{\alpha-1}K=\frac{6}{\alpha-1}\frac{m}{n}$. Assuming that we have that for some $c_4>0$,
\begin{equation}\label{eq:unitary2}
\Pr\left\{\min_{\|\mathbf{v}\|=1}\!\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\leq \frac{c_4}{2}\frac{m}{n}\!\!\right\}\geq 1-C\exp(-Cm+n\log n).
\end{equation}
and that there exists $c_8>0$ such that for any $\beta$,
\begin{equation}\label{eq:unitary3}
\Pr\left\{\max_{\|\mathbf{v}\|=1}\sum_{i\in\mathcal{S}(\mathbf{v},\beta)}\!\!|\mathbf{a}_i^*\mathbf{v}|^2\leq c_8 \beta \frac{m}{n}\!\!\right\}\geq 1-C\exp(-Cm+n\log n),
\end{equation}
then we can choose $\alpha$ and $c_0$ such that $2c_0\alpha<1$ and $L>c$ for some $c>0$ with probability $1-Cn\exp(-Cn)$. That is, Theorem~\ref{thm:step4} is proved.
\subsection{Proof of \eqref{eq:unitary2}}
For the component $\min_{\|\mathbf{v}\|=1}\sum_{i=1}^m\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}$, we will use a two-step procedure: First, we will show that for any fixed $\mathbf{v}\in\mathcal{C}^n$ with $\|\mathbf{v}\|=1$, there exists some $c_4>0$ such that
\begin{equation}\label{eq:singlev}
\Pr\left\{\!\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\leq c_4\frac{m}{n}\!\!\right\}\geq 1-C\exp(-Cm).\end{equation} Second, we will apply an $\epsilon$-net argument to the set $\{\mathbf{v}\in\mathcal{C}^n: \|\mathbf{v}\|=1\}$.
To prove \eqref{eq:singlev}, considering that the expression only depends on the inner products $\mathbf{a}_i^*\mathbf{v}$ and $\mathbf{a}_i^*\mathbf{z}$, it is equivalent to work with $\tilde{\mathbf{a}}_i=P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}\mathbf{a}_i$, $\tilde{\mathbf{v}}=P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}\mathbf{v}$, and $\tilde{\mathbf{z}}=P_{\mathrm{Sp}(\mathbf{v},\mathbf{z})}\mathbf{z}$, that is, $\tilde{\mathbf{a}}_i, \tilde{\mathbf{v}}, \tilde{\mathbf{z}}\in\mathcal{C}^2$ are obtained by projecting these vectors to the subspace spanned by $\mathbf{v}$ and $\mathbf{z}$. Then for any $1\leq k\leq K$, $[\tilde{\mathbf{a}}_{(k-1)n+1},\cdots,\tilde{\mathbf{a}}_{kn}]\in\mathcal{C}^{n\times 2}$ is a random orthogonal matrix in $\mathcal{C}^{n\times 2}$. While $\{\tilde{\mathbf{a}}_i\}_{i=1}^m$ are not independently distributed, their correlation is weak and can be decomposed as follows: For $1\leq k\leq K$, let $\mathbf{S}_k$ be a random matrix that represents the covariance of a Gaussian matrix of $n\times 2$, then $\{\mathbf{b}_i\}_{i=1}^m$ defined by $\mathbf{b}_i=\tilde{\mathbf{a}}_i\sqrt{\mathbf{S}_{\lfloor (i-1)/n\rfloor+1}}$ are i.i.d. sampled from $N(0,\mathbf{I}_{2\times 2})$. Let $t$ be a parameter such that
\begin{equation}\label{eq:event}
\Pr\{\max_{1\leq k\leq K}\|\sqrt{\mathbf{S}_{k}}/\sqrt{n}-\mathbf{I}\|\leq t\}\geq 1/2.
\end{equation}
By the property of the singular values of a random Gaussian matrix \cite[Theorem 2.13]{Szarek2001} and a union bound over $K=\frac{m}{n}$ matrices $\{\mathbf{S}_k\}_{k=1}^K$, we may let $t=\frac{c\log \frac{m}{n}}{\sqrt{n}}$, which converges to zero as $n,m\rightarrow\infty$ since $\sqrt{n}>\log^2 m$. We remark that
\begin{equation}\label{eq:correlation}
\text{when the event in \eqref{eq:event} holds, $\|\tilde{\mathbf{a}}_i-\frac{1}{\sqrt{n}}\mathbf{b}_i\|\leq t\|\tilde{\mathbf{a}}_i\|$ for all $1\leq i\leq m$.}
\end{equation}
Let $\mathcal{I}_1=\{1\leq i\leq m: \|\tilde{\mathbf{a}}_i^*\|\geq c_5/\sqrt{n}, |\tilde{\mathbf{a}}_i^*\mathbf{z}|\geq c_6\|\tilde{\mathbf{a}}_i^*\|, |\tilde{\mathbf{a}}_i^*\mathbf{v}|\geq c_6\|\tilde{\mathbf{a}}_i^*\|, \frac{\mathrm{Re}(\tilde{\mathbf{a}}_i^*\tilde{\mathbf{z}}\tilde{\mathbf{v}}^*\!\tilde{\mathbf{a}}_i)}{\|\tilde{\mathbf{v}}^*\!\tilde{\mathbf{a}}_i\|\|\tilde{\mathbf{z}}^*\!\tilde{\mathbf{a}}_i\|}\geq c_7\}$, and let $\mathcal{I}_2=\{1\leq i\leq m: \|{\mathbf{b}}_i^*\|\geq c_5(1-t), |{\mathbf{b}}_i^*\mathbf{z}|\geq (c_6-t)\|{\mathbf{b}}_i^*\|, |{\mathbf{b}}_i^*\mathbf{v}|\geq (c_6-t)\|{\mathbf{b}}_i^*\|, \frac{\mathrm{Re}({\mathbf{b}}_i^*\tilde{\mathbf{z}}\tilde{\mathbf{v}}^*\!{\mathbf{b}}_i)}{\|\tilde{\mathbf{v}}^*\!{\mathbf{b}}_i\|\|\tilde{\mathbf{z}}^*\!{\mathbf{b}}_i\|}\geq c_7-3t/c_6\}$. By definition, when the event in \eqref{eq:event} holds, then
${\mathcal{I}_1}\subseteq \mathcal{I}_2$ and $\bar{\mathcal{I}_2}\supseteq \bar{\mathcal{I}_1}$. Since the event in \eqref{eq:event} is independent of $\{\tilde{\mathbf{a}}_i\}_{i=1}^m$, for any $\alpha>0$,
\begin{align*}
&\Pr\{|\bar{\mathcal{I}}_2|\geq \alpha m\}\geq \Pr\{|\bar{\mathcal{I}}_1|\geq \alpha m\}\Pr\{\max_{1\leq k\leq K}\|\mathbf{S}_k-n\mathbf{I}\|/n\leq t\}\\\geq& \frac{1}{2}\Pr\{|\bar{\mathcal{I}}_1|\geq \alpha m\}.
\end{align*}
On the other hand, clearly we can choose $c_5, c_6, c_7, 0<\alpha<1$ such that
\[
\Pr\{|\bar{\mathcal{I}}_2|\geq \alpha m\}=\Pr\{|{\mathcal{I}}_2|\leq (1-\alpha)m\}\leq C\exp(-Cm),
\]
and it suggests that
\[
\Pr\{|{\mathcal{I}}_1|\leq (1-\alpha)m\}=\Pr\{|\bar{\mathcal{I}}_1|\geq \alpha m\}\leq 2\Pr\{|\bar{\mathcal{I}}_2|\geq \alpha m\}\leq C\exp(-Cm).
\]
As a result, with probability $1-C\exp(-Cm)$, $|{\mathcal{I}}_1|\geq (1-\alpha)m$ and as a result,
\[
\sum_{i=1}^m\!\frac{(\tilde{\mathbf{a}}_i^*\tilde{\mathbf{z}}\tilde{\mathbf{v}}^*\!\tilde{\mathbf{a}}_i\!\!+\!\!\tilde{\mathbf{a}}_i^*\tilde{\mathbf{v}}\tilde{\mathbf{z}}^*\!\tilde{\mathbf{a}}_i\!)^2}{2|\tilde{\mathbf{a}}_i^*\mathbf{z}|^2}\geq (1-\alpha)c_5^2c_6^2c_7^2\frac{m}{n},
\]
and \eqref{eq:singlev} is proved with $c_4=(1-\alpha)c_5^2c_6^2c_7^2$.
It remains to apply an $\epsilon$-net argument, which is a standard argument: by noting that
\[
\sum_{i=1}^m\Big|\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}-\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}'^*\!\mathbf{a}_i\!\!+\!\!\mathbf{a}_i^*\mathbf{v}'\mathbf{z}^*\!\mathbf{a}_i\!)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\Big|\leq \sum_{i=1}^m\|\mathbf{a}_i\|^2\|\mathbf{v}-\mathbf{v}'\|^2=m\|\mathbf{v}-\mathbf{v}'\|^2
\]
and use $\epsilon=\frac{c_4}{2\sqrt{n}}$, we have \eqref{eq:unitary2}.
\subsection{Proof of \eqref{eq:unitary3}}
The proof of \eqref{eq:unitary3} will be similar to the proof of \eqref{eq:unitary2}. First, it will be proved for a fixed $\mathbf{v}$, and then an $\epsilon$-net argument will be used. In the proof of \eqref{eq:unitary3} for any fixed $\mathbf{v}$, we apply $\tilde{\mathbf{a}}_i$ and $\mathbf{b}_i$ as defined in the proof of \eqref{eq:unitary2}.
For any fixed $\mathbf{v}$, the first part of the proof of Lemma~\ref{lemma:term34} implies that it is sufficient to assume $\mathbf{v}\perp\mathbf{z}$ and $\tilde{\mathbf{v}}\perp\tilde{\mathbf{z}}$. When $i\in\mathcal{S}(\mathbf{v},\beta)$, then $\beta\|\tilde{\mathbf{a}}_i^*\tilde{\mathbf{v}}\|\geq \|\tilde{\mathbf{a}}_i^*\tilde{\mathbf{z}}\|$. When the event \eqref{eq:event} holds, then \eqref{eq:correlation} suggests that $\frac{\|\mathbf{b}_i^*\tilde{\mathbf{v}}\|}{\|\mathbf{b}_i^*\tilde{\mathbf{z}}\|}\geq \frac{1-t}{\beta+t}$, which happens with probability smaller than $(\frac{\beta+t}{1-t})^2$ by Lemma~\ref{lemma:term34}.
Applying Hoeffding's inequality for subgaussian distributions, there exists $\gamma_0>0$ such that with probability $C\exp(-Cm)$, $\sum_{1\leq i\leq m: \frac{\|\mathbf{b}_i^*\tilde{\mathbf{v}}\|}{\|\mathbf{b}_i^*\tilde{\mathbf{z}}\|}\geq \frac{1-t}{\beta+t}}|\mathbf{b}_i^*\tilde{\mathbf{v}}|^2\geq (\frac{\beta+t}{1-t})^2\gamma_0 m$. By the same argument as the proof of \eqref{eq:singlev} and $|\mathbf{b}_i^*\tilde{\mathbf{v}}|^2-|\tilde{\mathbf{a}}_i^*\tilde{\mathbf{v}}|^2\leq t^2\|\tilde{\mathbf{a}}_i\|^2$, we have that with probability at most $2C\exp(-Cm)$,
\[
\sum_{i\in\mathcal{S}(\mathbf{v},\beta)}|\tilde{\mathbf{a}}_i^*\mathbf{v}|^2\geq (\frac{\beta+t}{1-t})^2\gamma_0 \frac{m}{n}+t^2\frac{m}{n}.
\]
Combining it with an $\epsilon$-net argument with $\epsilon=c/n$ and Lemma~\ref{lemma:term31} (with $c_2=n/2c$), note that
\[
\{1\leq i\leq m: \|\mathbf{a}_i\|/|\mathbf{a}_i^*\mathbf{v}|\geq c_2\}\|\mathbf{a}_i^*\mathbf{v}\|^2\leq \{1\leq i\leq m: \|\mathbf{a}_i\|/|\mathbf{a}_i^*\mathbf{v}|\geq c_2\}\|\mathbf{a}_i\|^2/c_2^2\leq m/c_2^2,
\]
\eqref{eq:unitary3} is proved.
\section{Discussion}
\subsection{Comparison with existing analysis of real-valued objects}
This section compares the analysis in this case with the analysis of the same algorithm of real-valued objects in Tan and Vershynin \cite{10.1093/imaiai/iay005}, since both works have deterministic conditions of convergence and verify the deterministic condition under a probabilistic model.
First, the deterministic condition in \cite{10.1093/imaiai/iay005} can be rewritten as follows: there exist $\theta$ such that for all ``wedges of angle $\theta$'' $\mathcal{W}$ in $\mathbb{R}^n$,
\begin{equation}\label{eq:compare1}
\frac{1}{m}\lambda_{\min}\Big(\sum_{i=1}^m\mathbf{a}_i\mathbf{a}_i^T-4\sum_{\mathbf{a}_i\in \mathcal{W}}\mathbf{a}_i\mathbf{a}_i^T\Big) \geq \frac{c}{n},
\end{equation}
and here wedge of angle $\theta$ represents the region of the sphere between two hemispheres with normal vectors making an angle of $\theta$.
In comparison, combining Theorems~\ref{thm:main1} and~\ref{thm:main2}, the deterministic result in this paper requires the existence of some $c_0>0$ and $\alpha>1$ such that
\begin{equation}\label{eq:compare2}
\min_{\|\mathbf{v}\|=1} \!\left\{\!\frac{1}{2}\!\sum_{i=1}^m\!\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\!\mathbf{a}_i\!+\!\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\!\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}\!-\! \frac{6}{\alpha\!-\!1}\!\!\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2\!-\!(2\!+\!4\alpha\!)\!\!\!\!\!\!\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}\!\!\!\!\!\!|\mathbf{a}_i^*\mathbf{v}|^2\!\right\}\!\geq \! \frac{c}{n}.
\end{equation}
The term $\sum_{i=1}^m\mathbf{a}_i\mathbf{a}_i^T$ in \eqref{eq:compare1} is comparable to the terms $\frac{1}{2}\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}- \frac{6}{\alpha-1}\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2$ in \eqref{eq:compare2}. Under the real-valued setting, the latter can be simplified to $\left(1-\frac{6}{\alpha-1}\right)\sum_{i=1}^m|\mathbf{a}_i^*\mathbf{v}|^2$, and minimizing it over all $\|\mathbf{v}\|=1$ gives the smallest eigenvalue of $\sum_{i=1}^m\mathbf{a}_i\mathbf{a}_i^T$.
The term $\sum_{\mathbf{a}_i\in \mathcal{W}}\mathbf{a}_i\mathbf{a}_i^T$ in \eqref{eq:compare1} and the set $\mathcal{W}$ are also comparable to the term $(2+4\alpha)\sum_{i\in\mathcal{S}(\mathbf{v},c_0\alpha)}|\mathbf{a}_i^*\mathbf{v}|^2$ in \eqref{eq:compare2} and the set $\mathcal{S}(\mathbf{v},c_0\alpha)$.
In fact, the set $\mathcal{S}(\mathbf{v},c_0\alpha)$ also has the ``wedge'' shape under the real-valued setting, and both works attempt to show that the number of sensing vectors in the set is small.
Second, the probabilistic analysis in these two works also shares connections, which is natural since there are similarities in the deterministic conditions. However, \cite{10.1093/imaiai/iay005} achieves the bound $m=O(n)$, which is a logarithmic factor better than the bound $m=O(n\log n)$ in Theorem~\ref{thm:main}. Looking into the analysis of both works, the extra $\log n$ factor comes from the estimation of $\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}$ in Lemma~\ref{lemma:term1} and the estimation of the size of $\mathcal{S}(\mathbf{v},c_0\alpha)$ in Lemma~\ref{lemma:term3}, where simple $\epsilon$-net arguments are used. In comparison, in the real-valued setting \cite{10.1093/imaiai/iay005}, $\sum_{i=1}^m\frac{(\mathbf{a}_i^*\mathbf{z}\mathbf{v}^*\mathbf{a}_i+\mathbf{a}_i^*\mathbf{v}\mathbf{z}^*\mathbf{a}_i)^2}{2|\mathbf{a}_i^*\mathbf{z}|^2}=2\sum_{i=1}^m\|\mathbf{a}_i^*\mathbf{v}\|^2$ and a standard result on the eigenvalue of $\sum_{i=1}^m\mathbf{a}_i\mathbf{a}_i^*$ can be used; and the number of the sensing vectors in the set $\mathcal{W}$ is uniformly bounded by applying VC theory, and these arguments do not have natural generalizations to the complex-valued setting. In comparison, the $\epsilon$-net argument gives an additional $\log n$ factor. It would be interesting to investigate whether there exist more careful arguments for the complex-valued setting such that the $\log n$ factor could be removed.
Finally, while there are similarities between this work and \cite{10.1093/imaiai/iay005}, the fundamental difference comes from the argument for the deterministic condition in Theorems~\ref{thm:main1} and~\ref{thm:main2}, which relates the convergence of the randomized Kaczmarz algorithm with the local convexity of an objective function. In comparison, the straightforward calculation in \cite{10.1093/imaiai/iay005} is based on the fact that there only exist two phases of $\pm 1$ in the real-valued setting.
\subsection{Initialization}
Theorem~\ref{thm:main} requires an initialization such that $\|\mathbf{x}^{(0)}-\mathbf{z}\|\leq c_0\sqrt{\delta_1}$. Many schemes have been proposed for obtaining a good initialization \cite[Section B]{10.1093/imaiai/iay005}. For example, we may use the truncated spectral method that let $\hat{\mathbf{x}}^{(0)}=\lambda_0\tilde{\mathbf{x}}$, where $\lambda_0=\sqrt{\frac{1}{m}\sum_{i=1}^m b_i^2}$ and $\tilde{\mathbf{x}}$ in the leading eigenvector of $
Y=\frac{1}{m}\sum_{i=1}^mb_i^2\mathbf{a}_i\mathbf{a}_i^* I(b_i\leq 3\lambda_0).$ Following the analysis in \cite[Section B]{10.1093/imaiai/iay005}, one can show that the requirement on the initialization $\|\hat{\mathbf{x}}^{(0)}-\mathbf{z}\|\leq c_0\sqrt{\delta_1} \|\mathbf{z}\|$ holds as long as $m\geq C(\log(1/\delta)+n)/( c_0^2\delta_1)$.
However, considering that this construction of the initialization is dependent on the isotropy of the distribution of the sensing vectors, it is still interesting to investigate whether the randomized Kaczmarz algorithm works with random initialization, similar to the results in \cite{tan2019online,zhang2020}, and we leave it as a possible future direction.
\subsection{Other probabilistic models}
Sections~\ref{sec:step3} and~\ref{sec:step4} verifies the deterministic condition in Theorems~\ref{thm:main1} and~\ref{thm:main2} when the sensing vectors are sampled i.i.d. from a uniform distribution on the sphere or generated from a unitary model. However, there exist many other models of generating sensing vectors, such as coded diffraction model in \cite{Wei_2015}, and it would be interesting to see whether the deterministic condition in Theorems~\ref{thm:main1} and~\ref{thm:main2} could be verified for more generic models.
\section{Summary}
This paper justifies the convergence of the randomized Kaczmarz algorithm for phase retrieval of complex-valued objects. Specifically, the paper first establishes a deterministic condition for its convergence, and then demonstrates that when the sensing vectors are sampled uniformly from a unit sphere in $\mathcal{C}^n$ and the number of sensing vectors $m$ satisfies $m>O(n\log n)$ as $n,m\rightarrow\infty$, then this deterministic condition holds with high probability.
\bibliographystyle{spmpsci}
|
1,314,259,994,970 | arxiv | \section{Introduction}
\label{sec:intro}
The goal of speech representation learning is to find a transform from speech that makes high-level information more accessible to SLP (Speech and Language Processing) downstream tasks, as speech signal possess a rich set of acoustic and linguistic content, including phonemes, words, semantic meanings, tone, speaker characteristics, and even sentiment information.
In this paper, we propose Mockingjay to learn speech representations through unsupervised training without the use of any labels.
We use multi-layer transformer encoders and multi-head self-attention \cite{TRANSFORMER} to achieve bidirectional encoding; this framework allows our model to consider past and future contexts at the same time.
To achieve unsupervised pre-training for speech representations, Mockingjay learns under the proposed Masked Acoustic Model (MAM) task. During training, masked frames are given, and the model learns to reconstruct and predict the original frames.
Hence we gave the name Mockingjay, a bird that mimics sound.
The proposed framework is illustrated in Figure~\ref{fig:training}.
\begin{figure}[t]
\centering
\includegraphics[width=\linewidth]{training.png}
\caption{The proposed Masked Acoustic Model pre-training task, 15\% of input the frames are masked to zero at random.}
\label{fig:training}
\vspace{-10pt}
\end{figure}
\subsection{Related work}
\label{sssec:related work}
Unsupervised speech representation learning \cite{WAVENET_AUTOENCODERS, SPEECH2VEC, AUDIO_WORD2VEC, CPC, APC, WAV2VEC, VQWAV2VEC, ROBUSTCPC, MBV} is effective in extracting high-level properties from speech. SLP downstream tasks can be improved through speech representations because surface features such as log Mel-spectrograms or waveform can poorly reveal the abundant information within speech.
Contrastive Predictive Coding (CPC)~\cite{CPC} and wav2vec~\cite{WAV2VEC} use a multi-layer CNN to encode past context, representations are learned by predicting the future in latent space under a contrastive binary classification task.
Autoregressive Predictive Coding (APC)~\cite{APC} uses autoregressive models to encode temporal information of past acoustic sequence; the model predicts future frames like an RNN-based language model~\cite{RNN}, optimized with reconstruction loss.
Unidirectional models are commonly used in the previous approaches \cite{WAVENET_AUTOENCODERS, SPEECH2VEC, AUDIO_WORD2VEC, CPC, APC, WAV2VEC}. However, this constraint on model architectures limits the potential of speech representation learning.
\begin{figure}[t]
\centering
\includegraphics[width=\linewidth]{model.png}
\caption{The proposed Mockingjay training framework.}
\label{fig:model}
\vspace{-10pt}
\end{figure}
The recently proposed vq-wav2vec~\cite{VQWAV2VEC} approach attempts to apply the well-performing Natural Language Processing (NLP) algorithm BERT~\cite{BERT} on continuous speech. Input speech is discretized to a K-way quantized embedding space, so continuous speech could act like discrete units similar to word tokens in NLP tasks. In vq-wav2vec~\cite{VQWAV2VEC}, an exhaustive two-stage training pipeline with massive computing resources are required to adapt speech to NLP algorithm, as the quantization process is against the continuous nature of speech. Unlike \cite{VQWAV2VEC} that adapts speech to BERT~\cite{BERT} through quantization, the proposed approach can be seen as a modified version of BERT~\cite{BERT} for direct application on continuous speech.
\subsection{Proposed Method}
\label{sssec:proposed method}
Unlike previous left-to-right unidirectional approaches that only consider past sequences to predict information about future frames, the proposed method allows us to train a bidirectional speech representation model, alleviating the unidirectionality constraint of previous methods.
As a result, the Mockingjay model obtains substantial improvements in several SLP tasks.
Moreover, as previous approaches restrict the power of the pre-trained models to representation extraction only \cite{CPC, APC, WAV2VEC, VQWAV2VEC}, the proposed method is robust as it can be fine-tuned easily on downstream tasks. We show that fine-tuning for 2 epochs easily acquires significant improvement.
The proposed approach outperforms other representations and features. When compared to the commonly used log Mel-features, we outperformed it by 35.2\% (absolute improvement) for phoneme classification accuracy, 28.0\% (absolute improvement) for speaker recognition accuracy, and 6.4\% (absolute improvement) for sentiment discrimination accuracy on a spoken content dataset unseen during pre-train.
We also experiment in low resource settings to show that Mockingjay is capable of improving supervised training in real-life low-resource scenarios.
With only 0.36 hours (0.1\%) of transcribed speech, the proposed approach outperforms Mel-features with 360 hours (100\%) of labels.
\section{MOCKINGJAY}
\label{sec:mockingjay}
In this section, we first introduce model architecture and its designs, secondly we explain the proposed unsupervised context prediction task, and finally we explain how the proposed model is used with downstream task models.
\subsection{Model Architecture}
\label{sssec:model architecture}
We use a multi-layer Transformer encoder with multi-head self-attention for left-and-right bidirectional encoding, this architecture is illustrated in Figure~\ref{fig:model}.
Each encoder layer has two sub-layers, the first is a multi-head self-attention network, and the second is a feed-forward layer, each sub-layer has a residual connection followed by layer normalization \cite{LAYERNORM}, based on the design described in \cite{TRANSFORMER}.
All encoder layers in the model, as well as the sub-layers, produce outputs of identical dimensions denoted as $H_{dim}$.
In Figure~\ref{fig:model}, we denote the feed-forward size as $F_{dim}$, the number of self-attention heads as $A_{num}$, and the total of Transformer layers as $L_{num}$. The Mockingjay representations can be extracted from the Transformer encoders' hidden state and labeled as $Hidden$, we explain how they are used as representations in Section~\ref{sssec:incorporating with downstream tasks}.
Since Transformer encoders contain no recurrence and convolution, we use positional encoding to make our model aware of the input sequence order \cite{TRANSFORMER}. As direct addition of acoustic features to positional encoding may lead to potential training failure \cite{DOWNSAMPLE}, the input frames are first projected linearly to the hidden dimension of $H_{dim}$ before adding with positional encoding \cite{TRANSFORMER_ASR}. We use sinusoidal positional encoding instead of learnable positional embeddings~\cite{LEARNABLE_EMBEDDING} because acoustic features can be arbitrarily long with high variance \cite{TRANSFORMER_ASR}.
We apply downsampling on input features to adapt our model to long sequences. To reduce the length of frames by a factor of $R_{factor}$, we use the reshape technique from \cite{DOWNSAMPLE, TRANSFORMER_ASR} by stacking $R_{factor}$ consecutive frames into one step.
\subsection{Masked Acoustic Modeling}
\label{sssec:masked acoustic modeling}
We propose the Masked Acoustic Modeling task, where we randomly select 15\% of the input frames, and the model predicts the selected frames based on its left and right context, as illustrated in Figure~\ref{fig:training}.
During training, we add a prediction head consists of two layers of feed-forward network with layer-normalization, using the last encoder layer as it's input. We use L1 Loss to minimize reconstruction error between prediction and ground-truth frames on the selected 15\%.
The prediction head is not used once the model is trained.
During training, for the selected 15\% of frames, 1) we mask it all to zero 80\% of the time, 2) replace all with a random frame 10\% of the time, and 3) leave the frames untouched 10\% of the time.\footnote{This process is similar to the Cloze task~\cite{MLM} and the Masked Language Model task from BERT~\cite{BERT}, but we mask frames of speech to zero instead of using the MASK token.}
We introduce this sub-random process instead of always masking the frames to alleviate the mismatch between training and inference, as masked frames do not appear during inference time.
Note that in contrast to BERT~\cite{BERT}, where the sub-random process is performed token-wise on the i-th chosen token, our sub-random process is performed utterance-wise. In other words, our model may receive inputs as ground-truth frames for 3) 10\% of the time, rather with some of the inputs always augmented as in \cite{BERT}.
To avoid the model exploiting local smoothness of acoustic frames, we propose additional consecutive masking where we mask consecutive frames $C_{num}$ to zero. The model is required to infer on global structure rather than local information.
We also use dynamic masking~\cite{ROBERTA} where masking patterns are sampled from an uniform distribution every time we feed a sequence to the model, unlike the static mask employed in \cite{BERT} where masking is performed during data preprocessing.
We only use a single context prediction task to train our representation model, as suggested by \cite{ROBERTA}. Unlike BERT~\cite{BERT} and ALBERT~\cite{ALBERT} that needs two tasks to train their language model.
In our preliminary experiments, we found that the sentence prediction task used in \cite{BERT, ALBERT} is not helpful, as additional tasks can potentially harm training behavior. We do not include details due to space limitations.
\subsection{Incorporating with Downstream Tasks}
\label{sssec:incorporating with downstream tasks}
Mockingjay representations are essentially the Transformer encoder hidden states.
There are many ways to incorporate learned representations to downstream tasks.
In this work, we mainly extract representations from the last layer.
However, we also expose the deep internals of Mockingjay to downstream models, where we use a mixture of representations from all layers, similar to the ELMO~\cite{ELMO} approach. In other words, we use a learnable weighted sum to integrate hidden states from all layers.
Last but not least, a pre-trained Mockingjay model can be fine-tuned with downstream models to create improving results, we update the pre-trained Mockingjay together with random initialized downstream task models.
\section{IMPLEMENTATION}
\label{sec:implementation}
In this work, we use two types of features as our model's output reconstruction target: the Mel-scale spectrogram and the linear-scale spectrogram.
As Mel-scale spectrogram is a more concise acoustic feature when compared to linear-scale spectrogram, we propose two model settings: \textit{BASE} and \textit{LARGE}.
Both of these models take Mel-features as input, and transform input Mel-features into high-level representations.
They use the same hidden dimension size of $H_{dim}$=768, feed-forward size of $F_{dim}$=3072, and attention heads of $A_{num}$=12, with the exception of layer number $L_{num}$, downsample factor $R_{factor}$, and consecutive masking number $C_{num}$, the differences in model settings are listed in Table~\ref{tb:model_settings}. We further analyze their differences in our experiment section.
\begin{table}[th]
\vspace{-5pt}
\caption{The proposed BASE and LARGE model settings}
\label{tb:model_settings}
\centering
\begin{tabular}{c | cc }
Model & BASE & LARGE \\
\toprule
Target & Mel & Linear \\
$L_{num}$ & 3 & 12 \\
$R_{factor}$ & 1 & 3 \\
$C_{num}$ & 7 & 3 \\
parameters & 21.4M & 85.4M \\
\bottomrule
\end{tabular}
\vspace{-10pt}
\end{table}
The proposed Mockingjay models are pre-trained on the LibriSpeech \cite{LIBRISPEECH} corpus train-clean-360 subset.
We use Adam \cite{ADAM}
where learning rate is warmed up over the first 7\% of 500k total training steps to a peak value of 4e-4 and then linearly decayed. A dropout~\cite{DROPOUT} of 0.1 is applied on all layers and attention weights. For downstream task fine-tuning, most of the hyperparameters are the same as in pre-training, with the exception of a learning rate of 4e-3, and the number of training epochs is set to 2 (which is approximately 50k steps). We train with a batch size of 6 using a single 1080Ti GPU.
We provide pre-trained models with our implementation, they are publicly available for reproducibility\footnote{https://github.com/andi611/Mockingjay-Speech-Representation}.
\section{EXPERIMENT}
\label{sec:experiment}
Following previous works \cite{WAVENET_AUTOENCODERS, SPEECH2VEC, AUDIO_WORD2VEC, CPC, APC, WAV2VEC, VQWAV2VEC}, we evaluate different features and representations on downstream tasks, including: phoneme classification, speaker recognition, and sentiment classification on spoken content.
For a fair comparison, each downstream task uses an identical model architecture and hyperparameters despite different input features.
We report results from 5 of our settings:
1) \textit{BASE} and 2) \textit{LARGE} where Mockingjay representations are extracted from the last encoder layer, 3) the \textit{BASE-FT2} where we fine-tune \textit{BASE} with random initialized downstream models for 2 epochs, and 4) the \textit{BASE-FT500} where we fine-tune for 500k steps, and finally 5) the \textit{LARGE-WS} where we incorporate hidden states from all encoder layers of the \textit{LARGE} model through a learnable weighted sum.
We did not fine-tune the \textit{LARGE} model, as it is meant for extracting representations.
Empirically we found that even with supervised training, a random initialized Mockingjay model followed by any downstream model is hard to be trained from scratch. This shows that the proposed pre-training is essentially indispensable.
\vspace{-10pt}
\subsection{Comparing with other representations}
\label{sssec:other representations}
The proposed approaches are mainly compared with APC~\cite{APC} representations, as they also experiment on phone classification and speaker verification. As reported in \cite{APC}, the APC approach outperformed CPC representations \cite{CPC, WAV2VEC, ROBUSTCPC} in both two tasks, which makes APC suitable as a strong baseline.
APC uses an unidirectional autoregressive model.
We compare the proposed approach with APC to show that our bidirectional approach has advantages in speech representation learning.
For fair comparison, we pre-train APC using their official implementations with the reported ideal parameters and settings, but expand the model's hidden size to $H_{dim}$=768 to match ours. We also report results on 160-dimensional log Mel-features, which helps evaluate the accessibility of speech information from regular acoustic features.
\vspace{-10pt}
\begin{figure}[t]
\centering
\includegraphics[width=\linewidth]{phone.png}
\caption{Comparing representations with phone classification accuracy across different amount of transcribed data.}
\label{fig:phone}
\vspace{-10pt}
\end{figure}
\subsection{Phoneme Classification}
\label{sssec:phoneme classification}
To measure the accessibility of phonetic information, we train linear phone classifiers using Mel-features, APC and Mockingjay representations from the LibriSpeech train-clean-360 subset.
We obtain force-aligned phoneme sequences with the Montreal Forced Aligner~\cite{ALIGNMENT}, where there are 72 possible phone classes.
Testing results on the LibriSpeech test-clean subset are presented in Figure~\ref{fig:phone}.
In the case where all 360 hours of labels are used to train the classifier, \textit{BASE} and \textit{LARGE} representations increase 11.8\% and 15.2\% accuracy from Mel-features.
The \textit{BASE-FT2} model outperforms all representations after 2 epochs of fine-tuning, with 10.2\% and 35.2\% absolute improvement over APC and Mel-features, respectively. We observe that fine-tuning for 2 epochs is enough to reveal our method's potential as there is only a small gap (3.9\%) between \textit{BASE-FT2} and \textit{BASE-FT500}. Furthermore, \textit{LARGE-WS} improves over \textit{LARGE}, just as we expected.
To demonstrate how pre-training on speech can improve supervised training in resource constrained scenarios where human labels are short, we train the classifier with reduced amount of training data.
The performance variation of different methods are plotted in Figure~\ref{fig:phone}, where we measure over various intervals of constrained training data to observe performance drop.
Both \textit{BASE} and \textit{LARGE} increase accuracy over Mel-features across various amount of transcribed data. Whereas the APC approach performs well on the full resource but fails to generalize for limited amount of labeled data. In the case where there are only 0.36 hours of data available, we improve accuracy by 22.7\% (an absolute improvement from Mel-features). Note that with only 0.36 hours (0.1\%) of labels available, \textit{BASE-FT2} (57.9\% acc) even outperformed Mel-features (49.1\% acc) that uses all 360 hours (100\%) of labeled data. We conclude that pre-training Mockingjay on speech substantially improves the performance on supervised task that requires human annotations.
\vspace{-8pt}
\subsection{Speaker Recognition}
\label{sssec:speaker recognition}
To demonstrate that the proposed approach performs constantly for all SLP downstream tasks, we report speaker recognition results on the LibriSpeech 100 hour selected subset, where train/test split is performed randomly with a 9:1 ratio, and there are 63 possible speakers.
We trained a simple one-layer RNN classifier for speaker recognition using different representations, results are listed in Table~\ref{tb:result} for comparison.
The proposed \textit{BASE} and \textit{LARGE} representations outperformed both APC and Mel-Features. \textit{BASE-FT2} further improves upon \textit{BASE} while achieving the highest accuracy, whereas \textit{LARGE-WS} also outperforms \textit{LARGE}.
\vspace{-8pt}
\subsection{Sentiment Classification on Spoken Content}
\label{sssec:sentiment classification}
To demonstrate domain invariant transferability of the proposed representation across different datasets, the Mockingjay model is pre-trained on LibriSpeech and applied on the MOSEI~\cite{MOSEI} dataset. We also use a simple one-layer RNN classifier, where the model is trained to extract linguistic meanings from speech and discriminates between sentiments.
The results listed in Table~\ref{tb:result} lead to an identical conclusion as in the speaker recognition task discussed above. Except that in the case of sentiment classification, \textit{LARGE-WS} achieved the highest score without the need of fine-tuning, demonstrating that a deeper model has great potential for extracting general speech representations. To conclude this section, we claim that the proposed representations are general and can be used on datasets with various unseen domains.
\vspace{-5pt}
\begin{table}[t]
\vspace{-10pt}
\caption{Comparing different methods with different tasks.}
\label{tb:result}
\centering
\begin{tabular}{l|c|c}
\toprule
\textbf{Methods} & \textbf{Speaker (acc)} & \textbf{Sentiment (acc)} \\
\midrule
Mel-Features & 70.06 & 64.63 \\
APC & 85.88 & 65.97 \\
Base & 94.54 & 67.38 \\
BaseFT2 & \textbf{98.05} & 68.45 \\
Large & 96.26 & 70.07\\
LargeWS & 96.40 & \textbf{71.05} \\
\bottomrule
\end{tabular}
\vspace{-10pt}
\end{table}
\section{CONCLUSION}
\label{sec:conclusion}
The proposed representation contains a variety of knowledge, including but not limited to phonetic, speaker, and sentiment information. We improve performance for a wide range of downstream tasks, and show promising results in low resource settings, as the learned speech representations are robust and can be transferred to different tasks across different datasets. In future work, we will investigate and deploy Mockingjay representations on more downstream SLP tasks, including ASR, voice conversion, and speech translation.
\vfill
\pagebreak
\bibliographystyle{IEEEbib}
|
1,314,259,994,971 | arxiv | \section{Introduction}
Recent advances in the scattering amplitude-based approach to the Post-Minkowskian expansion of classical general relativity
~\cite{Damour:2016gwp,Damour:2017zjx,Damour:2019lcq,Bjerrum-Bohr:2018xdl,Cheung:2018wkq,Kosower:2018adc,Bern:2019nnu,Antonelli:2019ytb,Cristofoli:2019neg,Kalin:2019rwq,Bjerrum-Bohr:2019kec,Cristofoli:2020uzm,Parra-Martinez:2020dzs,DiVecchia:2020ymx,Damour:2020tta,DiVecchia:2021ndb,Bern:2021dqo,DiVecchia:2021bdo,Bjerrum-Bohr:2021vuf,Bjerrum-Bohr:2021din,Bini:2021gat,Bautista:2021wfy,Cristofoli:2021vyo,Herrmann:2021lqe,Herrmann:2021tct,Mougiakakos:2021ckm,Jakobsen:2021smu,Damgaard:2021ipf,Brandhuber:2021eyq} have demonstrated that this new approach holds the promise
of significantly changing the efficiency of computations in general relativity.
The input from scattering amplitude calculations is increasing fast. At this point, full third-order Post-Minkowskian amplitude calculations of the scattering of
two massive objects are now available~\cite{Bern:2019nnu,DiVecchia:2020ymx,Damour:2020tta,DiVecchia:2021ndb,Bjerrum-Bohr:2021din,Brandhuber:2021eyq},
and the first results for fourth Post-Minkowskian order have already appeared~\cite{Bern:2021dqo}.
This amplitude-based approach generically computes one observable:
the scattering angle in what we can call the hyperbolic regime of the two-body problem in gravity. Although of interest in themselves, eventually
these results should be used to predict gravitational waveforms and other observables associated with two massive objects bound to each other. One
strategy for going from the scattering regime to the bound-state regime is based on the Effective One-Body (EOB) formalism
\cite{Buonanno:1998gg,Buonanno:2000ef}, suitably adapted from Post-Newtonian to Post-Minkowskian formulations
\cite{Damour:2016gwp,Damour:2017zjx,Damour:2019lcq}. So hugely
successful based on Post-Newtonian computations, it seems timely to revisit
this EOB approach and explore both its flexibility and its power of prediction.
The aim of this paper is to gather known results up to third Post-Minkowskian order in Newton's constant $G_N$ and include them in the most compact
manner in a Post-Minkowskian version of the EOB formalism. The choice
of isotropic coordinates is crucial for simplicity. Interestingly, once
in isotropic coordinates, we find that the simplest approach is to not expand around the probe limit of the two-body problem, which would
correspond to motion in the background metric of a Schwarzschild black hole. The way to achieve this is to enlarge the notion of the effective
metric so that it becomes energy dependent. This possibility appears
to be intuitively appealing and understandable for the gravitational scattering
of two massive objects which, due to the non-linearities of general relativity beyond the probe limit, create backreactions that depend on
energy and momentum. Although the effective metric itself depends on the energy, we can still impose the standard quadratic mass-shell condition: we will find the
correct map that describes the gravitational scattering of two massive objects such that
the scattering angle deduced from that metric coincides with the one computed from the Post-Minkowskian expansion of the full theory.
Choosing an angular momentum map that differs from the one conventionally
used~\cite{Buonanno:1998gg,Buonanno:2000ef} connects most straightforwardly
to the scattering amplitude-based approach to general relativity, and
we indeed end up describing the reduced
problem in terms of a massive object in an effective metric that only reduces to the Schwarzschild metric
in the probe limit. Moreover, as we will demonstrate, the motion is entirely described by this metric without, at least up to the present order,
introduction of correction terms of non-metric kind. By a canonical
transformation, we also recover the condition based on the standard angular
momentum map, without the need to include non-metric corrections. Expanding our metric around the Schwarzschild metric can rephrase
the solution in terms of the combination of a Schwarzschild metric plus additional non-metric terms, finding complete agreement with
the solution given in that form by Damour in ref. \cite{Damour:2019lcq}.
\section{Post-Minkowskian Kinematics and the Effective Metric}
While the EOB formalism is a standard tool for the gravitational wave physics community~\cite{Buonanno:1998gg,Buonanno:2000ef,Damour:2000we,Damour:2001tu,Buonanno:2005xu}, it is not widely known in the particle physics community.
Since the aim of this paper is to explore some of the consequences of
calculating classical general relativity observables with modern
scattering-amplitude methods, we begin this
section with an elementary introduction to the EOB formalism, phrased in a manner that may be more accessible to particle physicists.
We begin by considering free-particle kinematics in Minkowskian space. The aim is to describe the dynamics of two masses $m_1$ and $m_2$ moving with relative
velocity
\begin{equation}
v ~\equiv~ |\vec{v}| ~=~ |\vec{v}_1 - \vec{v}_2|
\end{equation}
in terms of a reduced mass
\begin{equation}
\mu ~\equiv~ \frac{m_1m_2}{m_1+m_2}
\end{equation}
moving with the same velocity $v$. It is convenient to introduce the total mass $M \equiv m_1 + m_2$ so that $\mu = m_1m_2/M$.
In terms of the original relativistic kinematics, the Lorentz contraction factor is
\begin{equation}
\gamma ~=~ \frac{E^2 - m_1^2 - m_2^2}{2m_1m_2} ~=~ \frac{p_1\cdot p_2}{m_1m_2}
\label{gamma}
\end{equation}
with $p_i$ being the two momenta and where $E$ is the total
energy. Solving eq.~(\ref{gamma}) for $E$ in this frame, we have
the relation
\begin{equation}\label{e:E}
E = M\sqrt{1 + 2\nu(\gamma - 1)} ~,
\end{equation}
where
\begin{equation}
\nu ~\equiv~ \frac{m_1m_2}{(m_1+m_2)^2} ~=~ \frac{\mu}{M} ~.
\end{equation}
Denoting by ${\cal{E}}_{\rm eff} =\mu\gamma$ the energy of the reduced mass $\mu$, this leads to the relation
\begin{equation}
H=E= M\sqrt{1 + 2\nu\left(\frac{{\cal{E}}_{\rm eff}}{\mu} - 1\right)} ~.
\label{Emap}
\end{equation}
This is the {\em energy map.}
\smallskip
To relate the corresponding magnitude of the three-momentum $p_{\rm eff} = |\vec{p}_{\rm eff}|$ of the
reduced mass to the center-of-mass momentum $p_{\infty}$
of the two masses, we use free relativistic kinematics with $\vec{p}_{\rm eff} = \mu\gamma\vec{v}$ and
$\gamma = 1/\sqrt{1-{\vec v}^2}$. This gives
\begin{equation}
\left (p_{\rm eff}\over\mu\right)^2~=~{
(E^2-(m_1+m_2)^2)(E^2-(m_1-m_2)^2)\over 4m_1^2m_2^2}
\end{equation}
which is easily compared to the center-of-mass momentum $p_\infty$
\begin{equation}
p_{\infty}^2 ={(E^2-(m_1+m_2)^2)(E^2-(m_1-m_2)^2)\over 4E^2} ~,
\end{equation}
yielding
\begin{equation}
\frac{ p_{\rm eff}}{\mu} ~=~ \frac{p_\infty E}{m_1m_2} ~.
\label{pmap}
\end{equation}
This is the {\em momentum map}.
\smallskip
Finally, we wish to relate the angular momentum $J_{\rm eff}$ of the reduced mass to the angular momentum $J$ of the two-particle system. We first
choose to do
this by insisting that impact parameter $b$ remains fixed. This is in contradistinction to the conventionally used prescription
of~\cite{Buonanno:1998gg} where, instead, angular momentum is kept fixed. We find our chosen relation more
convenient for the following analysis because it more directly connects with the expression for the scattering angle we obtain from the
two-body problem. The possibility of fixing $b$ instead of $J$ has been mentioned in ref.~\cite{Vines:2017hyw} but not pursued
there (see also ref.~\cite{Vines:2018gqi} for a related discussion). Fixing $b$, we get
\begin{equation}
b ~=~ \frac{J}{p_{\infty}} ~=~ \frac{J_{\rm eff}}{p_{\rm eff}}
\implies J_{\rm eff} ~=~ J\frac{p_{\rm eff}}{p_{\infty}}=J\,{E\over M}~.
\label{Jmap}
\end{equation}
This is our {\em angular momentum map}. We shall later show how to obtain the same results based on the conventional angular momentum
map where, instead, one equates $J$ with $J_{\rm eff}$. This will involve a canonical transformation, thus leaving physics invariant.
\section{The effective metric}
\label{sec:effective-metric}
So far, we have not considered interactions. One important lesson from
the scattering-amplitude approach to gravitational scattering in
general relativity is that at least up to, and
including third Post-Minkowskian order, there exists, in isotropic coordinates, a very simple relationship between center-of-mass momentum $p$ and the
effective classical potential $V_{\rm eff}(r,p)$ of the form~\cite{Bjerrum-Bohr:2019kec,Kalin:2019rwq,Bern:2019nnu}
\begin{equation}
p^2 = p_{\infty}^2 -V_{\rm eff}(r,E)
\label{kinematics}
\end{equation}
where, in $D=4$ dimensions,
\begin{equation}
V_{\rm eff}(r,E) = -\sum_{n=1}^{\infty} f_n \left(G_NM\over r\right)^n.
\end{equation}
The coefficients $f_i$ are deduced from the scattering angle
\begin{equation}
\chi = G_N\chi_1 + G_N^2\chi_2 + G_N^3\chi_3 + O(G_N^4)
\end{equation}
extracted from scattering-amplitude calculations order-by-order in the coupling $G_N$ as shown.
Up to third Post-Minkowskian order, the $f_i$-coefficients
extracted from the amplitude computations read~\cite{Damgaard:2021ipf}
\begin{eqnarray}\label{e:f1PM}
&f_1 =& 2(2\gamma^2 -1) {\mu^2M\over E},\\
\label{e:f2PM} &f_2 =& \frac{3 \left(5 \gamma ^2-1\right)}{2
} {\mu^2M\over E},\\
\label{e:f3PM} &f_3 =&-\mu^2\left(-{3\over2} {\left(2 \gamma ^2-1\right) \left(5
\gamma ^2-1\right)\over\gamma^2-1}
+2 {12 \gamma ^4-10 \gamma
^2+1\over \gamma^2-1} {E\over M} \right)\\
\nonumber &{\displaystyle -{2\over3}{\mu^2\nu M\over E }}&
\Bigg(2\gamma (14 \gamma ^2+25)
-\frac{ \left(1-2 \gamma
^2\right)^2}{\left(\gamma
^2-1\right)^2}(8-5 \gamma
^2)\sqrt{\gamma^2 - 1
+\left(\frac{6 \left(4 \gamma ^4-12 \gamma ^2-3\right)}{\sqrt{\gamma ^2-1}}-{ (6 \gamma ^3-9 \gamma ) \left(1-2 \gamma
^2\right)^2\over \left(\gamma
^2-1\right)^2}\right)\arccosh
(\gamma )\Bigg),
\end{eqnarray}
including all classical terms that contribute to this order. At fourth Post-Minkowskian order, radiation must be taken into account and it is not yet obvious
how this may affect, perturbatively in $G_N$, the order-by-order determination of the coefficients $f_i$.
Our aim now is to provide an effective one-body metric $g_{\mu\nu}^{\rm eff}$ for the reduced-mass problem that reproduces the scattering angle computed from
the expression of eq.~(\ref{angle}). Even if we specify isotropic coordinates it will quickly become clear that such an effective metric $g_{\mu\nu}^{\rm eff}$ is not unique
and part of our present purpose is therefore to explore the most optimal choice.
A general parametrization of $g_{\mu\nu}^{\rm eff}$ can be provided by
\begin{equation}
ds_{\rm eff}^2 = A(r)dt^2 - B(r)\left(dr^2 +r^2(d\theta^2+\sin^2\theta d\varphi^2)\right)
\end{equation}
where $A(r)$ and $B(r)$ are so far undetermined functions of $r$.
Because of the large set of coordinate transformations that are permissible within the choice of isotropic
coordinates, we parameterize the solutions employing the {\em Ansatz}
\begin{equation}\label{e:AB}
A(r)=\left(1-h(r)\over 1+h(r)\right)^2; \qquad B(r)=\left(1+h(r)\right)^4.
\end{equation}
In the limit $\nu \to 0$, we expect this effective metric to approach the Schwarzschild metric
which in isotropic coordinates corresponds to
\begin{equation}
h(r) \to \frac{G_NM}{2r}.
\end{equation}
One standard method for computing the scattering angle in such an
external metric is to determine the principal function $\mathcal S$ of the associated Hamilton-Jacobi equation
\begin{equation}\label{e:HJ}
g^{\alpha\beta}_{\rm eff}\partial_{\alpha}\mathcal S\partial_{\beta}\mathcal S = \mu^2.
\end{equation}
Because of conservation of the energy $ {\cal E}_{\rm eff}$ and
angular momentum $ J_{\rm eff}$, and considering the motion in the
orbital equatorial plane $\theta=\pi/2$, we use the standard separated {\em ansatz}
\begin{equation}
{\mathcal S}(r,t,\varphi) = {\cal E}_{\rm eff}t + J_{\rm eff}\varphi + W(r) ~,
\end{equation}
to obtain
\begin{equation}
\frac{{\cal E}^2_{\rm eff}}{A(r)} - \frac{J_{\rm eff}^2}{B(r)r^2} - \frac{1}{B(r)}\left(\frac{d W(r)}{d r}\right)^2 = \mu^2 ,
\end{equation}
and hence the scattering angle
\begin{equation}
\frac{\chi}{2} = J_{\rm eff}\int_{r_m}^\infty\frac{dr}{r^2}\frac{1}{\sqrt{\frac{B(r)}{A(r)}{\cal E}_{\rm eff}^2 - \frac{J_{\rm eff}^2}{r^2} - B(r)\mu^2}}-{\pi\over2},
\label{chiEOB}
\end{equation}
where $r_m$ is the distance of the closest radial approach in the scattering. This quantity is not independent and follows from
the other parameters of the expression~(\ref{chiEOB}) through the condition
\begin{equation}
p_r=\sqrt{\frac{B(r)}{A(r)}{\cal E}_{\rm eff}^2 - \frac{J_{\rm eff}^2}{r^2} - B(r)\mu^2} = 0 ~~{\rm at}~~~ r=r_m.
\end{equation}
Insisting on the angular momentum map of eq.~(\ref{Jmap}) and inserting also the momentum map~(\ref{pmap}), we can rewrite this as
\begin{equation}
\frac{\chi}{2} = b
\int_{r_m}^\infty\frac{dr}{r^2}\frac{1}{\sqrt{\frac{B(r)}{A(r)}{{\cal
E}_{\rm eff}^2\over p_{\rm eff}^2} - \frac{b^2}{r^2} -
{B(r)\mu^2\over p_{\rm eff}^2}}}-{\pi\over2}.
\label{angleeff}
\end{equation}
It is important to stress that we are employing momentum and angular momentum maps that were naturally provided at Minkowskian infinity and which
are now taken to hold also in the presence of interactions. To fix,
order-by-order in the coupling $G_N$, we compare the so far unknown functions
$A(r)$ and $B(r)$ we compare with the expression for the scattering angle obtained from
the kinematic relation~(\ref{kinematics}). This provides us with an alternative form of the radial action $W$ and
hence
\begin{equation}
\frac{\chi}{2} = -\int_{\hat{r}_{m}}^{\infty} dr \frac{\partial
p_r}{\partial J} - \frac{\pi}{2},
\end{equation}
where, after using $p_r^2 = p^2-{J^2\over r^2}$ and substituting eq.~(\ref{kinematics}), we obtain
\begin{equation}
\frac{\chi}{2} = b \int_{\hat{r}_{m}}^{\infty} \frac{dr}{r^2} \frac{1}{\sqrt{1 - \frac{b^2}{r^2}- \frac{V_{\rm eff}(r,E)}{p_{\infty}^2}}} - \frac{\pi}{2}.
\label{angle}
\end{equation}
Because the two expressions~(\ref{angleeff}) and~(\ref{angle}) are so
similar in form, we will now impose the strong requirement of the two
integrands being equal. From the equality of the integrands, it follows
that $r_m = \hat{r}_m$ since
the condition $p_r=0$ (which is the zero of the denominator) is the same for the two expressions.
Equality of the integrands is not required but since we
will be able to find systematic solutions to this condition, we impose
it. It translates into
\begin{equation}\label{e:VtoE}
1 - \frac{V_{\rm eff}(r,E)}{p_{\infty}^2}={B(r)\mu^2\over p_{\rm
eff}^2}\left( \frac{{\cal E}_{\rm eff}^2}{\mu^2 A(r)} - 1\right)~.
\end{equation}
This
expression can, after imposing ${\cal E}_{\rm eff}^2 = \mu^2 +
p_{\rm eff}^2= \gamma^2\mu^2$, be written
\begin{equation}
1 - \frac{V_{\rm eff}(r,E)}{p_{\infty}^2}={B(r)\over \gamma^2-1}\left( \frac{\gamma^2}{A(r)} - 1\right).
\label{VtoAB}
\end{equation}
It is clear at this stage that we should not be able to find solutions for the metric functions $A(r)$ and $B(r)$ that are independent
of $\gamma$, and they will, therefore, utilizing the above identification also depend on the effective energy. But if our objective
is to identify a class of metrics that reproduce the scattering angle of the actual two-body problem using an EOB formalism,
there is nothing to prevent us from pursuing this approach. Indeed, the only observable information we have at our disposal from
the amplitude side is the scattering angle, and all remaining dynamics must be extracted from it. So the condition~(\ref{VtoAB})
fulfils our requirement.
Using our parametrization for the metric coefficients in~\eqref{e:AB},
this becomes a polynomial equation of sixth order in $h(r)$
\begin{equation}\label{e:Heq}
\left(h(r)+{\gamma-1\over\gamma+1}\right)\left(h(r)+{\gamma+1\over\gamma-1}\right) (1+h(r))^4
=(1-h(r))^2\left(1+{E^2\over (\gamma^2-1)M^2}{V_{\rm eff}(r,E)\over \nu^2M^2}\right).
\end{equation}
This equation can always be solved in perturbation theory with $h(r)=\sum_{n\geq1} h_n (G M/r)^n$ for any perturbatively expanded
effective potential
$V_{\rm eff}=-\sum_{n\geq1} f_n (G_N M/r)^n$. It is clear that if we
had not used the simplifying ansatz~\eqref{e:AB} we would have, at
each new order in $G_N$, two new metric coefficients to fit for each
new condition from the scattering angle, allowing a large degree of
freedom in the parametrization of the effective metric.
\medskip
It is instructive to analyse in detail the first Post-Minkowskian
approximation.
Solving perturbatively for the coefficients $h_n$ in $h(r)=\sum_{n\geq1} h_n (G M/r)^n$, we obtain
\begin{align}
\label{e:h1} h_1&=\frac{1}{2} {E\over M}~,\\
\label{e:h2} h_2&=-\frac{3 \left(5 \gamma ^2-1\right)}{8\left(2 \gamma
^2-1\right)}\left(1-{M\over E}\right)\left(E\over M\right)^2~,
\end{align}
at the next order we split the expression for $h_3=h_3^{\rm
cons}+h_3^{\rm RR}$ into a conservative part
\begin{multline}\label{e:h3cons}
h_3^{\rm cons.}=\Bigg({811 \gamma ^6-224 \gamma ^5-1665
\gamma ^4-288 \gamma ^3+659 \gamma ^2+200 \gamma -45\over 48\left(1-2 \gamma ^2\right)^2
\left(\gamma ^2-1\right)}
-\frac{\gamma \left(14 \gamma ^2+25\right)}{6 (\gamma -1) \left(2 \gamma ^2-1\right)}{M\over E}\Bigg) \left(1-{M\over E}\right)\, \left(E\over M\right)^3\cr
-\frac{(\gamma +1) \left(4 \gamma ^4-12 \gamma ^2-3\right)
}{2\left(\gamma ^2-1\right)^{3\over 2} \left(2 \gamma ^2-1\right)}
\arccosh(\gamma)\left(1-{M^2\over E^2}\right)\, \left(E\over M\right)^3,
\end{multline}
and a radiation-reaction part
\begin{equation}\label{e:h3rr}
h_3^{\rm RR}=(2\gamma^2-1)\left(\frac{\gamma \left(2 \gamma ^2-3\right) \arccosh(\gamma )}{4 (\gamma -1)^3 (\gamma +1)^2}-\frac{(\gamma +1) \left(5 \gamma ^2-8\right)}{12 \left(\gamma ^2-1\right)^{5/2}}\right)\left(1-{M^2\over
E^2}\right)\, \left(E\over M\right)^3 .
\end{equation}
One can argue whether the radiation-reaction terms $h_3^{\rm RR}$ should be
included here. We have kept them because they are needed to produce the correct scattering angle in the high-energy limit.
\bigskip
In the probe limit, $\nu\to0$, the total energy $E$ in~\eqref{e:E} becomes the total
mass $M$. Up to third Post-Minkowskian order,
and including the radiation-reaction contributions, we find that the corrections
$h_2$ and $h_3$ all vanish as $(E-M)$. We thus recover the Schwarzschild solution in
isotropic coordinates since
\begin{equation}
\lim_{\nu\to0} h_1=\frac12; \qquad\lim_{\nu\to0} h_i=0~\textrm{for}~i=2,3.
\end{equation}
Because the $f_i$ coefficients in~\eqref{e:f1PM}--\eqref{e:f3PM} are
proportional to $\mu^2=\nu^2M^2$, the effective potential has an overall factor
of $\nu^2$ and it is convenient to separate it out by defining $V^{\rm probe}_{\rm eff}(r,M)$ through $V_{\rm
eff}(r,E) \equiv \nu^2 V^{\rm probe}_{\rm eff}(r,M)+O(\nu^3)$. Since, furthermore,
$p_\infty^2=M^2\nu^2(\gamma^2-1)+O(\nu^3)$, we of course also recover
the probe potential for the Schwarzschild metric in
isotropic coordinates,
\begin{equation}
V^{\rm probe}_{\rm eff}(r,M)
=
M^2(\gamma^2-1)
-M^2\left(1+{G_NM\over2r}\right)^4\left(\gamma^2\left(1+{G_NM\over2r}\over
1-{G_NM\over2r}\right)^2-1\right)\,.
\end{equation}
The effective energy function in isotropic
coordinates we propose here corresponds to
\begin{equation}\label{e:Eeff}
{\cal E}_{\rm eff}^2 = \left(1-h(r)\over 1+h(r)\right)^2\left[\mu^2 + \frac{J_{\rm eff}^2}{r^2(1 + h(r))^4}
+ \frac{p_r^2}{(1+ h(r))^4}\right]
\end{equation}
which in the probe limit becomes
\begin{equation}\label{e:Eeffprobe}
({\cal E}^{\rm probe}_{\rm eff})^2 = \left(1-{G_NM\over2r}\over 1+{G_NM\over2r}\right)^2\left[\mu^2 + \frac{p^2}{(1+ {G_NM\over2r})^4}\right]
\end{equation}
thus reproducing the Schwarzschild Hamiltonian given in eq.~(77) of~\cite{Jaranowski:1997ky}.
\medskip
So far, we have managed to find a simple effective EOB metric $g_{\mu\nu}^{\rm eff}$ which correctly reproduces
the scattering of two masses up to third Post-Minkowskian order. The main use of an EOB metric
is in the pseudo-elliptic regime of bound orbits where the total energy (minus rest mass) is negative, and we now briefly
consider the use of the metric $g_{\mu\nu}^{\rm eff}$ in this regime.
An obvious first check of the metric would be to confirm that it reproduces the periastron shift of
bound orbits to second order in the Post-Minkowskian expansion. Clearly, to first Post-Minkowskian order, the
motion is Newtonian with a $1/r$ potential and closed orbits. Adding to this the second-order solution for $h(r)$,
\begin{equation}
h(r) = \frac{G_NE}{2r} + \frac{3G_N^2\left(5 \gamma ^2-1\right) E(E-M)}{8\left(2 \gamma ^2-1\right)r^2},
\end{equation}
it is a straightforward exercise to compute the periastron shift $\Delta\Phi$ from the EOB metric to this order in $G_N$.
The result is
\begin{equation}
\Delta\Phi
=
\frac{3\pi G_N^2M^2\mu^2}{2J^2}\left(\frac{E}{M}\right)(5\gamma^2 - 1),
\end{equation}
which agrees with the computation of ref.~\cite{Kalin:2019inp} where it was derived by analytic continuation from
the scattering parameters. In the limit $E \simeq M$ and $\gamma \simeq 1$ it agrees with the classic result
of Robertson for the two-body problem to that order (see chap~8.6 of~\cite{Weinberg:1972kfs}).
\smallskip
Finally, we can see how, conversely, the energy map (\ref{Emap}) emerges in the present setting. We start with our
condition~\eqref{e:VtoE} which imposes the correct scattering angle of the effective theory. We now keep ${\cal E}_{\rm eff}, p_{\rm eff}^2$, and
$\mu$ {\it a priori} unrelated and analyze the condition order-by-order in the coupling $G_N$. To first Post-Minkowskian order it reads:
\begin{equation}
{\mu^2+p_{\rm eff}^2-{\cal E}_{\rm eff}^2\over p_{\rm eff}^2}+
\left({f_1\over p_\infty^2}+{4 h_1\over p_{\rm eff}^2} (\mu^2-2{\cal
E}_{\rm eff}^2)\right) {G_N M\over r}+O(G_N^2)=0~.
\end{equation}
To zeroth order in $G_N$ we obtain the free particle relation ${\cal
E}_{\rm eff}^2=p_{\rm eff}^2+\mu^2$. To order $G_N$ we next get, after making use of the leading-order relation and after inserting the
expressions for $f_1$ from~\eqref{e:f1PM} and $h_1$
from~\eqref{e:h1},
\begin{equation}
\frac{{\cal E}_{\rm eff}}{\mu} = \sqrt{{f_1-4 p_\infty^2 h_1\over
f_1-8 p_\infty^2 h_1}} = \gamma = \frac{E^2 - m_1^2 - m_2^2}{2m_1m_2}
\end{equation}
which is the energy map (\ref{Emap}). From order $G_N^2$ and up this relationship is automatically satisfied by the
condition~\eqref{e:VtoE}.
\section{Comparison with earlier approaches}
It is interesting to observe that the full leading-order metric we deduced above is not of Schwarzschild form but rather
has the total mass $M = m_1 + m_2$ replaced by total energy $E$, with
\begin{equation}
h(r)=\sum_{n\geq1} \hat h_n(\gamma,M/E) \left(G_NE\over r\right)^n,
\end{equation}
so that, to first Post-Minkowskian order,
\begin{align}\label{1PMmetric}
A(r)=\left(1-\frac{G_NE}{2r} \over
1+\frac{G_NE}{2r} \right)^2 +{\cal O}(G_N^2) ;\qquad
B(r)=\left(1+\frac{G_NE}{2r}\right)^4 + {\cal O}(G_N^2) .
\end{align}
While this energy-dependent metric may appear as an intuitively appealing effective metric for the Post-Minkowskian problem
to this order, it seems to contradict the observation that to first order in the Post-Minkowskian
expansion the effective metric can be chosen to be exactly of Schwarzschild form~\cite{Damour:2016gwp}.
The resolution is as follows. Our condition for the effective metric $g_{\mu\nu}^{\rm eff}$ is that it solves the condition
(\ref{e:HJ}). As we have noted above, this leads us to solutions for
the effective metric that are energy dependent.
Instead, the conventional EOB formalism modifies the mass-shell condition in an alternative manner, replacing eq.~(\ref{e:HJ}) by
\begin{equation}\label{e:HJQ}
g^{\alpha\beta}_{\rm eff}\partial_{\alpha}\mathcal S\partial_{\beta}\mathcal S = \mu^2 + Q ~,
\end{equation}
where the function $Q$ absorbs all terms higher than quadratic in the momenta. Both prescriptions correct for the fact
that away from Minkowskian infinity we cannot insist on a purely quadratic equation in ${\cal E}_{\rm eff}$. The analysis
based on eq.~(\ref{e:HJQ}) in isotropic coordinates has first been performed in ref.~\cite{Damour:2019lcq}. Imposing the usual
angular momentum map $J = J_{\rm eff}$ the condition of correct scattering angle must then read, in our notation,
\begin{equation}
p_{\rm eff}^2+ W(R) = \bar B(R) \left({\mathcal E_{\rm eff}^2\over \bar A(R)}-\mu^2 -Q\right)
\end{equation}
where the functions $\bar{A}$ and $\bar{B}$ correspond to the Schwarzschild metric,
\begin{equation}
\bar A(R) =\left(1-{G_N M\over2R}\over 1+{G_N M\over 2R}\right)^2 ;\qquad
\bar B(R) = \left(1+{G_N M\over 2R}\right)^4
\end{equation}
and there is a rescaled three-momentum
\begin{equation}
\mathbf P^2= P_{\infty}^2 + W(R) ~.
\end{equation}
Comparing with the actual kinematical relation eq.~(\ref{kinematics}) of the two-body problem this allows us to identify
\begin{equation}
\mathbf P^2= {p_{\rm eff}^2\over p_{\infty}^2} p^2= \left(E\over
M\right)^2 p^2
\label{momentumPtop}
\end{equation}
and
\begin{equation}
W(R)= -{p_{\rm eff}^2\over p_\infty^2} V_{\rm eff}=-\left(E\over
M\right)^2 V_{\rm eff} (r,E)
\end{equation}
The two isotropic coordinates are related by $R= r\times (M/E)$ and as we see from eq.~(\ref{momentumPtop}) this is part
of the canonical transformation
\begin{equation}
(R, P_R)= \left(r {M\over E} , p_r {E\over M}\right) ~.
\end{equation}
Expanding the potential $W(R)=\sum_{n\geq1} \mu^2 w_n (G_NM/R)^n$ as in ref.~\cite{Damour:2019lcq} in terms of coefficients $w_i$ and after
taking into account the relation between the two radii $r$ and $R$, we find the identification
\begin{equation}
w_i = {f_i\over \mu^2} \left(M\over E\right)^{n-2} ~.
\end{equation}
Plugging in the coefficients $f_i$ one readily recovers the $w_i$ of ref.~\cite{Damour:2019lcq} for $i = 1,2$.
Finally, rewriting the condition for the metric and $Q$ in the form
\begin{equation}
1- {V_{\rm eff}(r,E)\over p_\infty^2}= {\bar B(R)\over p_{\rm eff}^2}
\left({\mathcal E_{\rm eff}^2\over \bar A(R)}-\mu^2-Q\right)
\end{equation}
we can immediately compare with our~\eqref{e:VtoE}. This gives
\begin{equation}\label{e:ToDamour}
\bar B(R)
\left({\mathcal E_{\rm eff}^2\over \bar A(R)}-\mu^2-Q\right)= B(r)
\left({\mathcal E_{\rm eff}^2\over A(r)}-\mu^2\right)
\end{equation}
where
\begin{equation}
Q=\mu^2 \sum_{n\geq2} q_n \left(G_N M\over R\right)^n
\end{equation}
Because both expressions yield the correct scattering angle, we should recover the $Q$-function from
ref.~\cite{Damour:2019lcq}. Indeed, inserting the Schwarzschild metric functions $\bar{A}$ and $\bar{B}$
and converting our $r$-coordinate to $R$ by the above canonical transformation, we obtain
\begin{equation}
h(R)= \sum_{n\geq1} h_n \left(M\over E\right)^n \left(G_N M\over R\right)^n ~ .
\end{equation}
Expanding~\eqref{e:ToDamour} and using that $h_1=E/(2M)$ we get
\begin{equation}
q_2= 4 (2\gamma^2-1) \left(M\over E\right)^2 \times h_2,
\end{equation}
which after using~\eqref{e:h2} reproduces the result given in
eq.~(3.33) of ref.~\cite{Damour:2019lcq}.
Next, expanding~\eqref{e:ToDamour} in $G_N$ and using the fact that $h$ starts at order $G_N$
gives
\begin{equation}
\mathcal E_{\rm eff}^2 \sum_{n\geq0} \sum_{p=0}^{\textrm{min}(n,6)}
{ (n-p+1) 6!\over p! (6-p)!} \left(\left(G_N M\over R\right)^n -
h(r)^n\right) =\mu^2 \sum_{n\geq2} q_n \left(G_N M\over
R\right)^n~ .
\end{equation}
Finally, using
\begin{equation}
h(R)^n = \sum_{m\geq n} \sum_{r_1+\cdots+r_n=m\atop r_i\geq1} \prod_{i=1}^n h_{r_i}
\left(M\over E\right)^m \, \left(G_N M\over R\right)^m,
\end{equation}
we have
\begin{equation}
q_n =\gamma^2\sum_{p=0}^{\textrm{min}(n,6)} { (n-p+1) 6!\over p! (6-p)!} - \sum_{m=1}^n \sum_{p=0}^{\textrm{min}(m,6)} { (m-p+1) 6!\over p! (6-p)!} \sum_{r_1+\cdots+r_m=n\atop r_i\geq1} \prod_{i=1}^n h_{r_i}
\left(M\over E\right)^n
\end{equation}
which shows how to express the $q_i$-coefficients in terms of the $h_i$-coefficients of this paper.
To summarize this part: We have shown the equivalence between our
remodeled EOB formalism in isotropic coordinates and the
conventionally used formalism that separates out all non-quadratic
energy-momentum terms in a function $Q$ which is added to the mass-shell
condition. A canonical transformation distinguishes our formulation,
which keeps the impact parameter $b$ fixed in the angular momentum
map, from the conventional one. This choice of canonical coordinates
allows us to immediately match the kinematical relation from amplitude
computations with the EOB kinematics of the reduced
problem. Additionally, we argued that it is far simpler to
solve for the effective metric directly, without introducing such an
auxiliary function $Q$ that parametrizes the deviations of the
effective metric from the one of Schwarzschild. Expanding our solution
around the Schwarzschild metric, we recover the $Q$-function of the
literature, thus demonstrating the equivalence. The purpose
of our remodeling has been to avoid this adding and subtraction of
terms that are the origin of the $Q$-function. This seems to not be
needed and one can instead work directly with the energy-dependent
metric.
\section{Conclusions}
With a fresh look at the EOB formalism in the light of modern
amplitude calculations for gravity, we have considered a modification
of the conventionally phrased formalism which is not based upon an
expansion around the static Schwarzschild metric. Instead, with a
rather general assumption about the desired form of the effective
one-body metric in isotropic coordinates, we have proposed a formulation where the metric
coefficients are solved order-by-order from the scattering angles as
computed from amplitudes. Crucial for this to come out in such a
simple form has been the use of isotropic coordinates and an angular
momentum map that differs from the one originally proposed. An
interesting consequence is that we remain entirely within a metric
framework, with no corrections terms needed, at least up to third
Post-Minkowskian order. The one principle that we have used to
determine the effective metric is to equate the integrands of, on one
side, the expression for the relativistic kinematics in isotropic
coordinates and, on the other side, the expression based on the
effective metric. In the probe limit we recover the Schwarzschild
metric in isotropic coordinates and at any mass range our effective
metric produces the correct scattering angle up to third
Post-Minkowskian order. We have also verified that the periastron shift
at second Post-Minkowskian order is correctly reproduced. Finally, we
have compared the above proposal with the conventional formalism and pointed
out where differences appear even though both approaches reproduce correctly the
observable quantities.
\section*{acknowledgments}
We thank Andrea Cristofoli, Thibault Damour, and Justin Vines for very helpful discussions.
The research of P.V. has received funding from the ANR grant
``Amplitudes'' ANR-17- CE31-0001-01, and the ANR grant ``SMAGP''
ANR-20-CE40-0026-01 and is partially supported by Laboratory of Mirror
Symmetry NRU HSE, RF Government grant, ag. No 14.641.31.0001. The
work of P.H.D. was supported in part by DFF grant 0135-00089A.
|
1,314,259,994,972 | arxiv | \section{Introduction}\label{intro}
Neutron scattering science is increasing its instrumental power and consequently improved performance are requested to the detection systems. In particular the upcoming neutron facility ESS will need a first suite of instruments fully operational already in 2019~\cite{esstdr}. The peak brightness at the ESS will be higher than that of any of the existing short pulse sources and it will be one order of magnitude higher than that of the world's leading continuous source. The time-integrated brightness at ESS will also be one or two orders of magnitude larger than is the case at leading pulsed sources today. ESS will be a long pulse source, with an average beam power of $5\,MW$ delivered to the target station, fig.~\ref{puls} shows the expected pulse of ESS compared to the pulses of existing spallation sources in the world and to the steady flux available at reactor sources.
\begin{figure}[!ht]
\centering
\resizebox{0.5\textwidth}{!}{\includegraphics{figures/pulse}}
\caption{\footnotesize The ESS long pulse compared to the pulses of existing spallation sources in the world and to the steady flux available at reactor sources~\cite{esstdr}.}\label{puls}
\end{figure}
\\ $\mathrm{^{3}He}$ gaseous detectors and scintillating detectors are the two principal technologies employed in thermal and cold neutron detection in all the facilities at present for any kind of intrument. These technologies are already close to the cutting-edge development which is sufficient to assure the performance of the instruments in neutron facilities at present. The needs of future neutron scattering science, including those of ESS, can not be met with today technologies if further studies and developments will not be performed~\cite{gebauer1,cooper,rhwessdet}.
\\ Because of the shortage of $\mathrm{^3He}$~\cite{shea,kouzes3,zeitsear}, it is crucial to find an alternative, efficient and cost-effective way to detect thermal and cold neutrons for large area applications ($\sim 50\,m^2$), e.g. chopper spectrometers like IN5~\cite{in5oliv} at ILL.
\\ Even though a limited quantity of $\mathrm{^3He}$ would be available for small area detectors ($\sim 1\,m^2$), the requirements for the future detectors can not be fulfilled by this technology. The main goal to be achieved for small area applications is to expand the detector performance mostly in terms of counting rate capability and spatial resolution, as well as cost-effectiveness. The development of small area detectors is focused on reflectometry applications because is where the requirements are more challenging.
\\ In tab.~\ref{tab1} are summarized the features of the state of the art detector technology at existing facilities for chopper spectrometers and reflectometers.
\begin{table}[!ht]
\begin{center}
\footnotesize{
\begin{tabular}{|l l||c|c|c|c|c|c|}
\hline
\hline
Instrument & Facility & active area & spatial res. & efficiency & global rate & local rate \\
& & ($m \times m$) & ($mm \times mm$) & & ($s^{-1}$) & ($s^{-1}cm^{-2}$)\\
\hline
\hline
chopper spectrometers & & & & & & \\
\hline
IN5~\cite{in5oliv} & ILL & $10 \times 3$ & $ 26 \times 26$ & $\sim74\% \,@\, 1.8$\AA & - & - \\
\hline
\hline
reflectometers & & & & & & \\
\hline
FIGARO~\cite{figaro} & ILL & $0.5 \times 0.25$ & $ 2 \times 7.5$ & $\sim63\% \,@\, 2.5$\AA & $3\cdot10^7$ & $23\cdot10^3$ \\
\hline
INTER~\cite{inter} & ISIS & $0.2 \times 0.2$ & $ 1 \times 1$ & - & - & - \\
\hline
\hline
\end{tabular}}
\caption{\footnotesize Detectors features on instruments at existing facilities, where figures are publically available.}
\label{tab1}
\end{center}
\end{table}
\\ Table~\ref{tab2} summarizes the requirements at ESS for large area detectors for chopper spectrometers and for small area detector for reflectometry applications. The flux at sample must be also interpreted as the maximum rate the detector should tolerate. It is not unlikely that the full beam is completely reflected toward the detector in a neutron reflectometry experiment.
\begin{table}[!ht]
\begin{center}
\footnotesize{
\begin{tabular}{|l|c|c|c|c|c|}
\hline
\hline
ESS instrument type & active area & wavelength band & spatial resolution & time resolution & max flux at sample \\
& ($m^2$) & (\AA) & ($mm \times mm$) & ($\mu s$) & ($s^{-1}cm^{-2}$) \\
\hline
\hline
chopper spectrometers~\cite{trexprop} & $50$ & $[0.8,20]$ &$20 \times 20$ & $10$ & $10^7$ \\
\hline
reflectometers~\cite{freiaprop} & $\sim 1$ & $[2,23]$ & $0.5 \times 2$ & $100$ & $10^9$ \\
\hline
\hline
\end{tabular}}
\caption{\footnotesize Summary of generalized detector requirements for chopper spectrometers and refletometers at ESS.}
\label{tab2}
\end{center}
\end{table}
\\ The Multi-Grid~\cite{jonisorma,mgpat} prototype has been developed in the framework of the collaboration between ILL and ESS in order to address the problem of the $\mathrm{^3He}$ shortage for large area detectors. It is a gaseous detector that contains $30$ $\mathrm{^{10}B_4C}$ layers as neutron converters. Each neutron can be converted in one of these layers that are crossed orthogonally by the neutron beam. Several prototypes using the Multi-Grid design have been built and tested, showing the feasibility of such a design for large area coverage with a suitable neutron detection efficiency.
\\ On the other hand, the Multi-Blade~\cite{buff3,framb} prototype, already introduced at ILL in 2005~\cite{buff1}, is a small area detector which wants to push the limit of spatial resolution beyond that of $\mathrm{^3He}$-based detectors for high flux applications. In particular it has been conceived to be suitable for neutron reflectometry instruments, though potential applications could be broader than that. A detailed study on this prototype has been carried out at ILL showing the improvements in terms of detector features of such a design.
\\ The Multi-Grid and the Multi-Blade prototypes are described and the results are illustrated in the following sections.
\section{Large area applications: the Multi-Grid prototype}\label{mgsect}
Several version of the Multi-Grid detector have been implemented at ILL in collaboration with ESS in the framework of the CRISP project (http://www.crisp-fp7.eu/). With the construction of these prototypes the reliability and the actual performance of this design have been shown. A new prototype of $3\times0.8\,m^2$ active area is being built to demonstrate the feasibility of a large area detector for neutron chopper spectrometers, e.g. IN5 at ILL~\cite{in5oliv}. The IN5 Time-of-Flight spectrometer is used as a benchmark for performance and geometry requirements. IN5 was chosen for its size of about $\sim 30\,m^2$. It is the largest chopper spectrometer at ILL, finding a replacement for $\mathrm{^3He}$-based detectors over such a surface is the main goal to validate the feasibility of large areas detectors based on $\mathrm{^{10}B_4C}$. The Multi-Grid design must be able to cover a sensitive area above $30\,m^2$ with about $2\times2\,cm^2$ spatial resolution. Figure~\ref{figmg3} shows a picture of the vacuum chamber of IN5 equipped with $12\times32$ $\mathrm{^3He}$-tubes and a drawing of the $3\times0.8\,m^2$ area demonstrator nearby the $\mathrm{^3He}$ detectors of the actual instrument.
\begin{figure}[!ht]
\centering
\resizebox{0.7\textwidth}{!}{\includegraphics{figures/mgphotos3}}
\caption{\footnotesize A picture of the vacuum chamber of IN5 at ILL equipped with $\mathrm{^3He}$-tubes (left). A drawing of the Multi-Grid $3\times0.8\,m^2$ area demonstrator nearby the $\mathrm{^3He}$-detectors of the instrument (right).}\label{figmg3}
\end{figure}
\\ A Multi-Grid is a proportional gaseous detector in which the neutron conversion into charged particles is obtained by using solid $\mathrm{^{10}B_4C}$-layers. $\mathrm{^{10}B}$ was chosen as the neutron converter because of its large absorption cross-section for thermal neutrons ($\sigma_{abs}=3838b$), and relatively high energy of its reaction products~\cite{jonisorma}. In the actual prototype the $\mathrm{^{10}B_4C}$ was chosen due to its conductivity, thermal and chemical stability compared to pure $\mathrm{^{10}B}$. Due to the limited efficiency that can be achieved with a single layer~\cite{gregor} (if it is not operated under a grazing angle as it is in the Multi-Blade concept~\cite{framb}), many layers are necessary to reach a suitable detection efficiency. If a single layer is used it must be operated a grazing angle because the neutron absorption path in the layer travels closer to the surface. Consequently the neutron capture fragments have more chance to escape the layer and thus to contribute to the efficiency. At a normal incidence a single layer is only about $5\%$ efficient at thermal energies.
\\ An optimization of the layer thicknesses, the number of layers necessary, and considerations on the substrate effects are required as well as a minute mechanical study. According to~\cite{fratheo}, in order to get a detection efficiency around $50\%$ while keeping the mechanics reasonably simple, a good choice for the number of layers is around $30$. The optimal thickness of the layers should be fixed at $1\,\mu m$ if we consider a monochromatic neutron beam at $2.5$\AA~\cite{fratheo}. The interesting neuron wavelength range for the scientific case of chopper spectrometer instruments is about from $1$\AA\, to $20$\AA\, which corresponds to energies from $82\,meV$ to $200\,\mu eV$. Further optimization is possible if there are requirements on a particular neutron wavelength distribution or efficiency. It has been shown in~\cite{fratheo} that a reasonable optimization is the one that considers the barycenter of the wanted neutron wavelength distribution, i.e. the $2.5$\AA\, will provide a detector optimized for short wavelengths. Figure~\ref{figmgeff} shows the expected efficiency as a function of wavelength for a detector made up of $2,10,28,30$ and $34$ $\mathrm{^{10}B_4C}$-layers, in the two cases in which all the layers are $1\,\mu m$ or $0.5\,\mu m$ thick. The detector with $0.5\,\mu m$ layers is optmized for long wavelengths. The contribution given by the scattering or absorption in the aluminium substrates ($0.5\,mm$ thick) is also taken into accounts in the plots in fig.~\ref{figmgeff}, as well as a $2.5\,mm$ entrance Al-window. The decreasing efficiency at longer wavelengths is due to the absorption of neutrons by the detector entrance Al-window. No decrease can be observed if the entrance window is neglected.
\begin{figure}[!ht]
\centering
\resizebox{0.45\textwidth}{!}{\includegraphics{figures/MG1umEffWave}}
\resizebox{0.45\textwidth}{!}{\includegraphics{figures/MG1umEffWave0p5}}
\caption{\footnotesize Expected efficiency as a function of the neutron wavelength for a detector made up of $2,10,28,30$ and $34$ $\mathrm{^{10}B_4C}$-layers. The converter thickness is $1\,\mu m$ for all the layers in the plot on the left and $0.5\,\mu m$ for the plot on the right. The contribution of the Al-scattering from substrates and detector window is also taken into account.}\label{figmgeff}
\end{figure}
\\ Several prototypes made up of $28$ and $30$ layers have been built and tested. A $34$-layer detector of large area is under construction.
\\ A Multi-Grid detector is composed of a gas vessel filled with grids electrically insulated one from another and stacked to make square or rectangular counters~\cite{mgpat}, i.e. tubes. Each grid is made up of a frame in which blades, coated with $\mathrm{^{10}B_4C}$~\cite{carina} on both sides, are inserted (see fig.~\ref{figmg1}). Both the frame and the blades are made of aluminium which has a low scattering and absorption cross-section for neutrons~\cite{bruproceed}.
\begin{figure}[!ht]
\centering
\resizebox{0.7\textwidth}{!}{\includegraphics{figures/mgphotos1}}
\caption{\footnotesize A drawing of a frame where $\mathrm{^{10}B_4C}$-coated blades are inserted (left) and fully assembled grid (frame and blades) of the prototype (right).}\label{figmg1}
\end{figure}
\begin{figure}[!ht]
\centering
\resizebox{0.5\textwidth}{!}{\includegraphics{figures/MGs}}
\caption{\footnotesize The grids are stacked to form rectangular tubes where an anode wires are inserted.}\label{schemMG}
\end{figure}
\\ The grids, stacked one after the other, act as a segmented cathode (see fig(s).~\ref{schemMG} and~\ref{figmg2}). The readout is performed by the cathode grids and anode wires. Each of these anode wires is placed in the middle of a tube formed by the stacked grids. Individual and charge division readouts have both been implemented. The grids are electrically insulated and each of them is connected to a charge amplifier. Each anode wire is either connected directly to a charge amplifier (individual readout) or is connected to the other wires through resistors. This allows to readout many wires with only two charge amplifiers connected at the ends of the resistive chain. The position of the neutron interaction point is reconstructed by the coincidence between grid and wire signals.
\\ Figure~\ref{figmg2} shows two of the assembled Multi-Grid prototypes. Both of them used $28$ converter layers. The first, with its $96$ grids, had an active area of $0.08\times2\,m^2$~\cite{bruproceed,buff2,khaplanov}, and the second demonstrator of about $0.3\times0.5\,m^2$. The second prototype~\cite{in6procc} was installed on the Time-of-Flight chopper spectrometer IN6~\cite{in6papold} at ILL placed side by side to the conventional $\mathrm{^3He}$ detectors of the instrument.
\begin{figure}[!ht]
\centering
\resizebox{0.7\textwidth}{!}{\includegraphics{figures/mgphotos2}}
\caption{\footnotesize A $96$-grid prototype of active area $0.08\times2\,m^2$ is shown on the left, during its contruction. A $0.3\times0.5\,m^2$ prototype is shown on the right.}\label{figmg2}
\end{figure}
\\ The detector was tested with both $\mathrm{Ar/CO_2}$ ($90:10$) gas mixture and $\mathrm{CF_4}$~\cite{bruproceed}. Counting curves and Pulse Height Spectra (PHS) have been measured for several different stopping gas pressures in order to confirm that the detector can be operated at either atmospheric pressure or even below.
\\ The neutron detection efficiency was measured on our test beam line at ILL, CT2, with a monochromatic neutron beam of $2.5$\AA. As shown in~\cite{jonisorma}, an $\mathrm{^3He}$-multi-tube detector was used as a reference and an efficiency of $(46.8\pm0.3)\%$ was found for the detector with $28$ converter layers. The position resolution was also measured and it resulted to be $2\times2\,cm^2$. This resolution is given by the size of the voxel identified by a single grid and a wire (see fig.~\ref{schemMG}). It has been proved that a finer spatial resolution can be obtained by calculating the center of gravity of the induced charge on adjacent grids.
\\ The $96$-grids-detector was scanned in order to quantify the uniformity in the efficiency. It has been already proven~\cite{carina}, that the $\mathrm{^{10}B_4C}$ coatings show about a $13\%$ variation in thickness due to the deposition method, this affects the detector uniformity in the range of $1\%$~\cite{jonisorma}.
\\ An accurate study on the response of the Multi-Grid detector in presence of $\gamma$-ray backgorund is reported in~\cite{khap}. A high level of discrimination between neutron and photon signals can be reached with $\mathrm{^3He}$ detectors. The sensitivity of a neutron detector to $\gamma$-rays is a very important characteristic, as it defines the best achievable signal-to-noise ratio. In a neutron facility the $\gamma$-ray background is produced by the interaction of the neutrons with the parts of the instrument. Neutron guides, collimators, shielding, beam stoppers and choppers, are only a few of the objects where a neutron can be absorbed giving rise to $\gamma$-ray background. The majority of efficient neutron absorbers, in particular $\mathrm{^{10}B}$, $\mathrm{^{113}Cd}$, $\mathrm{^{115}Cd}$ and $\mathrm{^{157}Gd}$, emit one or more $\gamma$-rays for each neutron captured. The energy of these $\gamma$-rays span a very wide range that goes from a few tens of $keV$ up to about $10\,MeV$. Considering that the scattering cross sections of many samples tend to be relatively low, the neutron intensity that carries useful information can be many orders of magnitude lower than the flux of $\gamma$-rays~\cite{khap}.
\\ The physical effects and geometric considerations that affect the sensitivity to $\gamma$-rays in gas-based detectors for thermal neutrons have been deeply investigated. It has been found that the $\gamma$-ray rejection do not need to be lower in $\mathrm{^{10}B}$-based detectors than in $\mathrm{^3He}$ tubes. A correctly-chosen energy threshold and operating voltage allow for an equally high $\gamma$-ray rejection as in a $\mathrm{^3He}$ detector. In particular for the Multi-Grid detector, it has been shown that sensitivities below $10^{-6}$ can be reached by setting an energy threshold to $100\,keV$ and operating the Multi-Grid at $900\,V$. At this voltage the equivalent gas gain is below $100$, consequently $\gamma$-rays can be discriminated by their energy and only loosing a few percent in the neutron detection efficiency.
\\ With the Multi-Grid demonstrator, installed on IN6 in order to be tested in a real instrument environment, it has been shown the suitable Time-of-Flight resolution of the $\mathrm{^{10}B}$-based detector as well as its better solid angle coverage with respect to $\mathrm{^3He}$ tubes. Considering the dead spaces, i.e. solid angle coverage of the Multi-Grid detector, this is only $3\%$ less efficient than the IN6 detection system at $4.6$\AA \,and $9\%$ more efficient at $4.1$\AA~\cite{in6procc}.
\\ During the tests on IN6 it became clear that not only $\gamma$-ray and neutron background was present: the detector showed a count rate that could not be correlated neither to $\gamma$-ray or neutron events. This background was not modulated when the beam was pulsed and was present irrespective of the instrument's shutter state or even the reactor operation. The found the source of such background are the naturally-occurring concentrations of uranium and thorium in aluminium~\cite{Al1}. The concentration of radioisotopes in materials~\cite{Al2} is a well known topic in the the chemical industry and it has also been investigated in the field of the neutron detectors~\cite{Al3}. Two solutions are currently being investigated to eliminate this background. One is to deposit a thin nickel layer at the surface of aluminium in order stop the alpha particles before they reach the gas~\cite{prax}. The other solution is to use $\mathrm{U/Th}$-free aluminium for which the concentration of uranium and thorium is $2-3$ orders of magnitude lower than in standard aluminium alloys~\cite{hyd}. Both this solutions reduce the background of several order of magnitudes. Respective costs will likely be the deciding factor between these two approaches.
\\ The following step in this project is the construction of the $3\times0.8\,m^2$ large area demonstrator. This detector will demonstrate the feasibility and cost of large-scale production of both the mechanical parts as well as the $\mathrm{^{10}B_4C}$-layers required~\cite{in6procc}.
\section{Small area applications: the Multi-Blade prototype}\label{mbsect}
The Multi-Blade prototype~\cite{framb} is a small area detector for neutron reflectometry applications~\cite{figaro}. Although the amount of $\mathrm{^3He}$ needed for a small area detector will be available in the future, the Multi-Blade wants to push the limit of $\mathrm{^3He}$-based detectors in terms of spatial resolution and counting rate capability. The Multi-Blade is a Multi Wire Proportional Chamber (MWPC) operated at atmospheric pressure (see fig.~\ref{schemMB}). This detector uses $\mathrm{^{10}B_4C}$ converters at grazing angle with respect to the incoming neutron beam. The angled geometry improves both its spatial resolution and its counting rate capability. The use of the $\mathrm{^{10}B_4C}$ conversion layer at grazing angle also increases the detection efficiency.
\begin{figure}[!ht]
\centering
\resizebox{0.4\textwidth}{!}{\includegraphics{figures/MBs}}
\caption{\footnotesize A drawing of the Multi-Blade detector, several identical units (cassettes) are aligned to form a detector. Each module is an independent MWPC.}\label{schemMB}
\end{figure}
\\ The Multi-Blade prototype is conceived to be modular in order to be adaptable to different applications. A significant concern in a modular design is the uniformity of the detector response. Several effects might contribute to degrade the uniformity and they have to be taken into account in the detector concept: overlap between different substrates, coating uniformity, substrate flatness and parallax errors.
\\ Each module of the Multi-Blade, a so-called \emph{cassette}, is an independent MWPC equipped with the neutron converter and a two-dimensional readout system. Wires are placed orthogonally to cathode strips (see fig.~\ref{schemMB}); the interaction point of the neutron is reconstructed by the coincidence wires and strips.
\\ The fully assembled detector is composed of several cassettes as shown in the schematic in fig.~\ref{figmb}.
\begin{figure}[!ht]
\centering
\resizebox{0.7\textwidth}{!}{\includegraphics{figures/mb}}
\caption{\footnotesize Schematic of the Multi-Blade detector made up of several independent and identical units called \emph{cassettes} (left). A picture of the Multi-Blade prototype composed of $4$ cassettes (right).}\label{figmb}
\end{figure}
\\ Several approaches in the prototype design have been studied: number of converters, read-out system and materials to be used. Two versions of the Multi-Blade prototype have been built focusing on its different issues and features.
Their detection efficiency and uniformity were measured on the test beam line CT2 at ILL. It has been shown in~\cite{framb} that a suitable detection efficiency for reflectometry applications can be achieved with such a detector. The main issue in the Multi-Blade design is the uniformity over its active surface. The cassettes overlap to avoid dead zones, and, in the switching between one cassette to another, a loss in efficiency can occur. Further studies are necessary to optimize the detector uniformity.
\\ The detector is operated at atmospheric pressure. This makes it suitable to be operated in vacuum chambers. Moreover, cost effective materials can be used inside the detector because outgassing is not an issue. The materials used in the detector can pour out molecules that give rise to radicals. The presence of radicals in the gas drastically affects the detector functionality and aging. Since the gas is flushed those molecules are continuously evacuated.
\\ The spatial resolution was measured~\cite{framb} to be $0.275\times4\,mm^2$. The reasonable limit in the resolution that can be reached with $\mathrm{^3He}$ detectors is around $1\,mm$. In many areas of soft and hard matter research science, the amount of material to investigate is rather limited. Partly because the fabrication of larger samples is too expensive or not feasible, yet, partly because the interesting features depend on the size. The development of a neutron reflectometer optimized for small samples is under study~\cite{rainbow1,estiaprop}. There is a great interest in expanding the technique of neutron reflectometry beyond static structural measurements of layered structures to kinetic studies~\cite{cubitt2}. In order to perform these studies the neutron wavelength to position encoding is necessary~\cite{rainbow2}, but, due to the practical limits in the actual spatial resolution of $\mathrm{^3He}$-based detectors this concept is probably not practical. Therefore, the development of an area detector with $0.2\,mm$, required in one dimension only, is crucial~\cite{cubitt2}. The Multi-Blade can easily fulfill this requirement. The Multi-Blade concept is a promising alternative, to accomplish the high spatial resolution and the high counting rate capability which is in principle $10$ times higher than conventional $\mathrm{^3He}$-based detectors~\cite{framb}.
\section{Conclusions}\label{conclu}
The Multi-Grid concept has been successfully introduced and tested. It has been shown, with the construction of several prototypes, that the performance of such a design fulfills the needs of the intruments that requires large area detectors, i.e. chopper spectrometers. A neutron detection efficiency of about $50\%$ at $2.5\,$\AA\, can be reached with $30$ $\mathrm{^{10}B_4C}$ layers~\cite{jonisorma}. This detector is operated at atmospheric pressure with a continuous gas flow. This makes this detector suitable to be operated in vaccum chambers as well as the cost-effectiveness of the materials that can be used.
\\ A $0.3\times0.5\,m^2$ area demonstrator~\cite{in6procc} has been tested in the Time-of-Flight chopper spectrometer IN6 at ILL placed side by side to the conventional $\mathrm{^3He}$ detectors of the instrument. The suitable Time-of-Flight resolution of the $\mathrm{^{10}B}$-based detector as well as its better solid angle coverage with respect to $\mathrm{^3He}$ tubes have been shown. $\mathrm{U/Th}$-free aluminium and nickel plating of aluminium have been investigated as the two solutions to reduce the naural $\alpha$-emission of Al-contaminants. Backround is efficiently reduced and the respective costs will be the deciding factor between these two approaches.
\\ Although the Multi-Grid detector efficiency is smaller than for $\mathrm{^3He}$ detectors, it can be considered suitable in the context of the $\mathrm{^3He}$ shortage. In addition to that, it has been shown that the wider solid angle coverage of the Multi-Grid design, i.e. its higher granularity, with respect to the $\mathrm{^3He}$ detectors, adequately compensates this smaller efficiency~\cite{in6procc}.
\\ An accurate study on the $\gamma$-ray sensitivity of such a technology has been performed to validate its reliability in terms of background $\gamma$-ray rejection~\cite{khap}.
\\ A prototype of $3\times0.8\,m^2$ and exploiting $34$ layers is under construction. The expected efficiency is about $56\%$ for $2.5\,$\AA. This prototype will demonstate the feasibility of a large area detector that can be scaled up to a few tens of square meters.
\\ In order to tackle the problem of the limitations of the $\mathrm{^3He}$ technology in terms of counting rate capability and spatial resolution for small area detector, the Multi-Blade prototype has been built and tested~\cite{framb}. In particular a high spatial resolution detector based on inclined $\mathrm{^{10}B_4C}$ layers suitable for neutron reflectometry instruments has been developed. This detector has been conceived to be modular in order to be more versatile: it is composed of modules called cassettes. All the issues that can arise from a modular design have been investigated. The detector is operated at atmospheric pressure. This makes it suitable to be operated in vacuum. Moreover, cost effective materials can be used inside the detector because outgassing is not an issue. It has been shown that a proper detection efficiency for reflectometry instruments can be achieved with such a detector. The presented Multi-Blade prototype showed a very high spatial resolution, it was measured to be about $0.3\times4\,mm^2$~\cite{framb}.
\\ Throughout the design, assembly and characterization of our $\mathrm{^{10}B}$-based prototypes, it has been shown that this technology is a viable replacement of the sparse $\mathrm{^3He}$, in terms of performance and cost. This $\mathrm{^{10}B}$ detector technology will certainly form a central part of the landscape for future detectors for neutron scattering instruments.
\begin{acknowledgement}
\textbf{\large Acknowledgements}
\medskip
\\ The work has been supported by the CRISP project (European Commission 7th Framework Programme Grant Agreement 283745) - http://www.crisp-fp7.eu/.
\end{acknowledgement}
|
1,314,259,994,973 | arxiv |
\section{Acknowledgments}
We thank the Center of Computational Innovations at RPI for maintaining the equipment used in this research, including the AiMOS supercomputer supported by the National Science Foundation under Grant No. 1828083.
This research was also supported by the Exascale Computing Project (17-SC-20-SC), a collaborative effort of the U.S. Department of Energy Office of Science and the National Nuclear Security Administration. Sandia National Laboratories is a multimission laboratory managed and operated by National Technology and Engineering Solutions of Sandia, LLC., a wholly owned subsidiary of Honeywell International, Inc., for the U.S. Department of Energy's National Nuclear Security Administration under contract DE-NA-0003525.
\clearpage
\section*{References}
\bibliographystyle{elsarticle-num}
\section{Background}
\subsection{Coloring Problem }
While there exist many definitions of the ``graph coloring problem,'' we specifically consider variants of distance-1 and distance-2 coloring.
Consider graph $G = (V,E)$ with vertex set $V$ and edge set $E$.
\emph{Distance-1 coloring} assigns to each vertex $v \in V$ a color $C(v)$ such that $\forall (u,v) \in E, C(u) \neq C(v)$.
In \emph{distance-2 coloring}, colors are assigned such that
$\forall (u,v),(v,w) \in E, C(u) \neq C(v) \neq C(w)$;
i.e., all vertices within two hops of each other have different colors.
\emph{Partial Distance-2 coloring} is a special case of distance-2 coloring in which
$\forall (u,v),(v,w) \in E, C(u) \neq C(w)$; it is typically applied to bipartite graphs
in which only one set of the vertices is given colors (thus, the designation ``partial'').
Partial distance-2 coloring is used to color sparse Jacobian matrices~\cite{GebremedhinMannePothen}.
When a coloring satisfies one of the above constraints, it is called \emph{proper}.
The goal is to find proper colorings of $G$ such that the total number of different colors used is minimized.
\subsection{Coloring Background}
While minimizing the number of colors is NP-hard, serial coloring algorithms using greedy heuristics have been effective for many applications~\cite{IAB:gebremedhin2000scalable}.
The serial greedy algorithm in Algorithm~\ref{IAB:alg:serialgreed} colors vertices one at a time.
Colors are represented by integers, and the smallest usable color is assigned as a vertex's color.
Most serial and parallel coloring algorithms use some variation of greedy coloring, with algorithmic differences usually involving the processing order of vertices or, in parallel, the handling of conflicts and communication.
\begin{algorithm}
\caption{Serial greedy coloring algorithm}
\label{IAB:alg:serialgreed}
\begin{algorithmic}
\Procedure{SerialGreedy}{Graph $G=(V,E)$}
\State $C(\forall v \in V) \gets 0$ \Comment{Initialize all colors as null}
\ForAll{$v \in V$ in some order}
\State $c \gets$ the \emph{smallest} color not used by a neighbor of $v$
\State $C(v) \gets c$
\EndFor
\EndProcedure
\end{algorithmic}
\end{algorithm}
\emph{Conflicts} in a coloring are edges that violate the color-assignment criterion; for example, in distance-1 coloring, a conflict is an edge with both endpoints sharing the same color.
Colorings that contain conflicts are not proper colorings, and are referred to as \emph{pseudo-colorings}.
Pseudo-colorings arise only in parallel coloring, as conflicts arise only when two vertices are colored concurrently.
A coloring's ``quality'' refers to the number of colors used; higher quality colorings of a graph $G$ use fewer colors, while lower quality colorings of $G$ use more colors.
It has been observed that the order vertices are visited affects the number of colors needed.
Popular vertex orderings include largest-degree-first, smallest-degree-last, and saturation degree~\cite{Besta20}.
These orderings are highly sequential and do not allow much parallelism. However, relaxations of those orderings
can allow some parallelism \cite{Hasenplaugh14}.
\subsection{Parallel Coloring Algorithms}
There are two popular approaches to parallel graph coloring.
The first concurrently finds independent sets of vertices and concurrently colors all of the vertices in each set. This approach was used by
Jones and Plassmann~\cite{IAB:jones1993parallel}.
Osama et al.~\cite{osama19} found independent sets on a single GPU and explored the impact of varying the baseline independent set algorithm.
The second approach, referred to as ``speculate and iterate''~\cite{IAB:gebremedhin2000scalable,IAB:ccatalyurek2012graph}, colors as many vertices as possible in parallel and then iteratively fixes conflicts in the resulting pseudo-coloring until no conflicts remain.
Gebremedhin et al.~\cite{IAB:gebremedhin2000scalable}, {\c{C}}ataly{\"{u}}rek et al.~\cite{IAB:ccatalyurek2012graph} and Rokos et al.~\cite{IAB:rokos2015fast} present shared-memory implementations based on the speculate and iterate approach.
Deveci et al.~\cite{IAB:deveci2016parallel} present implementations based on the speculate and iterate approach that are scalable on a single GPU.
Distributed-memory algorithms such as those in~\cite{IAB:bozdaug2008framework,IAB:sariyuce2012scalable} use the speculate and iterate approach.
Grosset et al.~\cite{IAB:grosset2011evaluating} present a hybrid speculate and iterate approach that splits computations between the CPU and a single GPU,
but does not operate on multiple GPUs in a distributed memory context.
Sallinen et al.~\cite{Sallinen16} demonstrated how to color very large, dynamic graphs efficiently.
Besta et al. \cite{Besta20} developed shared memory coloring algorithms and analyzed their performance.
They compared to both Jones-Plassman and speculative methods, but only on multicore CPU.
Bozda{\u{g}} et al.~\cite{IAB:bozdaug2008framework} showed that, in distributed memory, the speculative approach is more scalable than methods based on the independent set approach of Jones and Plassmann.
Therefore, we choose a speculative and iterative approach with our algorithms.
\subsection{Distributed Coloring}
In a typical distributed memory setting, an input graph is split into subgraphs that are assigned to separate processes.
A process's \emph{local graph} $G_l = \{V_l+V_g, E_l+E_g\}$ is the subgraph assigned to the process.
Its vertex set $V_l$ contains \emph{local vertices}, and a process is said to \emph{own} its local vertices. The intersection of all processes' $V_l$
is null, and the union equals $V$.
The local graph also has non-local vertex set $V_g$, with such non-local vertices commonly referred to as \emph{ghost vertices}; these vertices are copies of
vertices owned by other processes.
To ensure a proper coloring, each process needs to store color state information for both local vertices and ghost vertices; typically, ghost vertices are treated as read-only.
The local graph contains edge set $E_l$, edges between local vertices, and $E_g$, edges containing at least one ghost vertex as an endpoint.
Bozda{\u{g}} et al.~\cite{IAB:bozdaug2008framework} also defines two subsets of local vertices: \emph{boundary vertices} and \emph{interior vertices}.
Boundary vertices are locally owned vertices that share an edge with at least one ghost; interior vertices are locally owned vertices that do not neighbor ghosts.
For processes to communicate colors associated with their local vertices, each vertex has a unique global identifier (GID).
\section{Future work}
We have presented new multi-GPU distributed memory implementations of distance-1, distance-2 and
partial distance-2 graph coloring. These methods enable parallel graph coloring for graphs too large
to fit into a single GPUs memory; weak-scaling results demonstrate coloring of a graph with
12.8 billion vertices and 76.7 billion edges in less than two seconds.
We introduced a new recoloring heuristic based on vertex degrees
that reduces the amount of recoloring needed in parallel coloring methods.
We showed that our approaches are scalable to 128 GPUs and produce colorings with quality
similar to or better than Zoltan's distributed memory coloring algorithms.
Because our coloring algorithms use the Kokkos and KokkosKernels library for
on-node performance portability, our MPI+X methods can also run on distributed-memory
computers with multicore (CPU-based) nodes.
In this work, we focused on GPU architectures; exploring multicore performance
is future work.
We are currently integrating this code into the Zoltan2 package of Trilinos.
Our goal is to deliver a complete suite of MPI+X algorithms for distance-1, distance-2, and partial distance-2 coloring in Zoltan2.
We will modify PD2 to allow it to color only vertices of interest to the application as Zoltan does.
We also will investigate further optimizations to increase performance.
There are optimizations present in Zoltan's implementation that are not directly applicable to our implementation, but can inform optimizations that
reduce the overall recoloring workload and minimize communication.
These changes could increase performance for D1, D2, and PD2,
as well as make D2 and PD2 more scalable on skewed graphs.
\section{Experimental Setup}
We performed scaling experiments on the AiMOS supercomputer housed at Rensselaer Polytechnic Institute. The system has 268 nodes, each equipped with two IBM Power 9 processors clocked at 3.15~GHz, 4x NVIDIA Tesla V100 GPUs with 16~GB of memory connected via NVLink, 512~GB of RAM, and 1.6~TB Samsung NVMe Flash memory. Inter-node communication uses a Mellanox Infiniband interconnect. We compile with xlC 16.1.1 and use Spectrum MPI with GPU-Direct communication disabled.
The graphs we used to test D1 and D2 are listed in Table~\ref{IAB:tab:graphs}. Most of the graphs are from the SuiteSparse Matrix Collection
~\cite{IAB:Davis2011UFS}.
The maximum degree $\delta_{max}$ can be considered an upper bound for the number of colors used, as any incomplete,
connected, and undirected graph can be colored using $\delta_{max}$ colors~\cite{IAB:brooks1941colouring}.
We selected many of the same graphs used by Deveci et al. to allow for direct performance comparisons.
We include many graphs from Partial Differential Equation (PDE) problems because they are representative of graphs used with Automatic Differentiation~\cite{IAB:gebremedhin2020introduction}, which is a target application for graph coloring algorithms.
We also include social network graphs and a web crawl to demonstrate scaling of our methods on irregular real-world datasets.
We preprocessed all graphs to remove multi-edges and self-loops, and
we used subroutines from HPCGraph~\cite{slota_ipdps2016} for efficient I/O.
We compare our implementation against distributed distance-1 and distance-2 coloring in the Zoltan~\cite{IAB:devine2009getting} package of Trilinos.
Zoltan's implementations are based directly on Bozda{\u{g}} et al.~\cite{IAB:bozdaug2008framework}.
Zoltan's distributed algorithm for distance-2 coloring requires only a single ghost layer, and to reduce conflicts, the boundary vertices are colored in small batches.
For our results, we ran Zoltan and our approaches with four MPI ranks per node on AiMOS, and used the same partitioning method across all of our comparisons.
Our methods D1, D1-2GL, and D2 were run with four GPUs and
four MPI ranks (one per GPU) per node.
Zoltan uses only MPI parallelism; it does not use GPU or multicore parallelism.
For consistency, we use four MPI ranks per node with Zoltan, and use the same number of nodes
for experiments with Zoltan and our methods.
We used Zoltan's default coloring parameters; we did not experiment with
options for vertex visit ordering, boundary coloring batch size, etc.
We omit direct comparison to single-node GPU coloring codes such as CuSPARSE~\cite{naumov2015parallel}, as we use subroutines for on-node coloring from Deveci et al.~\cite{IAB:deveci2016parallel}. Deveci et al. have already performed a comprehensive comparison between their coloring methods and those in CuSPARSE, reporting an average speedup of 50\% across a similar set of test instances. As such, we are confident that our on-node GPU coloring is representative of the current state-of-the-art.
\section{Introduction}
We present new multi-GPU, distributed memory implementations of distance-1,
distance-2, and partial distance-2 graph coloring.
\emph{Distance-1 graph coloring} assigns \emph{colors} (i.e., labels) to all vertices in a graph such that no two neighboring vertices have the same color.
Similarly, \emph{distance-2 coloring} assigns colors such that no vertices within \emph{two hops}, also called a ``two-hop neighborhood,'' have the same color.
\emph{Partial distance-2 coloring} is a special case of distance-2 coloring, in which only one set of a bipartite graph's vertices are colored.
Usually, these problems are formulated as NP-hard optimization problems, where the number of colors used to fully color a graph is minimized.
Serial heuristic algorithms have traditionally been used to solve these problems, one of the most notable being the DSatur algorithm of Br{\'e}laz~\cite{brelaz1979new}.
More recently, parallel algorithms~\cite{IAB:deveci2016parallel,IAB:bozdaug2008framework} have been proposed; such algorithms usually require multiple \emph{rounds} to correct for improper \emph{speculative} colorings produced in multi-threaded or distributed environments.
There are many useful applications of graph coloring.
Most commonly, it is employed to find concurrency in parallel scientific computations~\cite{IAB:deveci2016parallel, IAB:allwright1995comparison}; all data sharing a color can be updated in parallel without incurring race conditions.
Other applications use coloring as a preprocessing step to speed up the computation of Jacobian and Hessian matrices~\cite{IAB:gebremedhin2013colpack} and to identify short circuits in printed circuit designs~\cite{IAB:garey1976application}.
Despite the intractability of minimizing the number of colors for non-trivial graphs, such applications benefit from good heuristic algorithms that produce small numbers of colors.
For instance, Deveci et al.~\cite{IAB:deveci2016parallel} show that a smaller number of colors used by a coloring-based preconditioner reduces the runtime of a conjugate gradient solver by 33\%.
In particular, this work is motivated by the use of graph coloring as a preprocessing step for distributed scientific computations such as automatic differentiation~\cite{IAB:gebremedhin2020introduction}.
For such applications, assembling the associated graphs on a single node to run a sequential coloring algorithm may not be feasible~\cite{IAB:bozdaug2008framework}.
As such, we focus on running our algorithms on the parallel architectures used by the underlying applications.
These architectures typically are highly distributed, with multiple CPUs and/or GPUs per node.
Therefore, we specifically consider coloring algorithms that can use the ``MPI+X'' paradigm, where the Message Passing Interface (MPI) library is used in distributed memory and ``X'' is multicore CPU or GPU acceleration.
\subsection{Contributions}
We present and examine two MPI+X implementations of distance-1 coloring as well as one MPI+X implementation of distance-2 coloring.
In order to run on a wide variety of architectures, we use the Kokkos performance portability framework~\cite{IAB:edwards2014kokkos,kokkoskernels} for on-node parallelism and Trilinos~\cite{IAB:heroux2005overview} for distributed MPI-based parallelism.
The combination of Kokkos and MPI allows our algorithms to run on multiple multicore CPUs or multiple GPUs in a system.
For this paper, we focus on the performance of our algorithms in MPI+GPU environments.
For distance-1 coloring of real-world networks, our algorithms achieve up to 2.38x speedup on 128 GPUs compared to a single GPU, and only a 2.23\% increase in the number of colors on average.
For distance-2 coloring, our algorithm achieves up to 33x speedup and, on average, a 7.5\% increase in the number of colors.
We also demonstrate good weak scaling behavior up to 128 GPUs for graphs with up to 12.8 billion vertices and 76.7 billion edges.
\section{Methods}
We present three hybrid MPI+GPU algorithms, called Distance-1 (D1), Distance-1 Two Ghost Layer (D1-2GL) and Distance-2 (D2). D1 and D1-2GL solve the distance-1 coloring problem, and D2 does distance-2 coloring.
We apply a variation of our D2 coloring to do partial D2-coloring (PD2).
We leverage Trilinos~\cite{IAB:heroux2005overview} for distributed MPI-based
parallelism and Kokkos~\cite{IAB:edwards2014kokkos} for on-node parallelism. KokkosKernels~\cite{kokkoskernels} provides baseline implementations of distance-1 and distance-2 coloring algorithms that we use and modify for our local coloring and recoloring subroutines.
Our three proposed algorithms follow the same basic framework, which builds upon that of Bozda{\u{g}} et al.~\cite{IAB:bozdaug2008framework}.
Bozda{\u{g}} et al. observe that interior vertices can be properly colored independently on each process without creating conflicts or requiring communication.
They propose first coloring interior vertices, and then coloring boundary vertices in small batches over multiple rounds involving communication between processes.
This approach can reduce the occurrence of conflicts, which in turn reduces the amount of communication necessary to properly color the boundary.
In our approach, we color all \emph{local} vertices first.
Then, after communicating boundary vertices'
colors, we fix all conflicts. Several
rounds of conflict resolution and communication may be needed to resolve all
conflicts.
We found that this approach was generally faster than the batched boundary
coloring, and it allowed us to use existing parallel coloring routines in KokkosKernels without substantial modification.
\begin{algorithm}[!htb]
\caption{Distributed-Memory Speculative Coloring}
\label{IAB:alg:overview}
\begin{algorithmic}
\Procedure{Parallel-Color} {\newline \hspace*{0.5pc} Local Graph $G_l=\{V_l+V_g,E_l+E_g\}$,GID}
\State colors $\gets$ Color($G_l$, colors) \Comment{Initially color local graph}
\State Communicate colors of boundary vertices
\State conflicts $\gets$ Detect-Conflicts($G_l$, colors, GID)
\State Allreduce(conflicts, SUM) \Comment{Global sum conflicts}
\While{conflicts $>$ 0}
\State $\mathit{gc} \gets$ current colors of all ghosts
\State colors = Color($G_l$, colors) \Comment{Recolor conflicted}
\State \Comment{vertices}
\State Replace ghost colors with $\mathit{gc}$
\State Communicate updated boundary colors
\State conflicts $\gets$ Detect-Conflicts($G_l$, colors, GID)
\State Allreduce(conflicts, SUM) \Comment{Global sum conflicts}
\EndWhile
\State \textbf{return} colors
\EndProcedure
\end{algorithmic}
\end{algorithm}
Algorithm~\ref{IAB:alg:overview} demonstrates the general approach for our three speculative distributed algorithms.
First, each process colors all local vertices with a shared-memory algorithm.
Then, each process communicates its boundary vertices' colors to processes with corresponding ghosts.
Processes detect conflicts in a globally consistent way and remove the colors of conflicted vertices.
Finally, processes locally recolor all uncolored vertices, communicate updates, detect conflicts, and repeat until no conflicts are found.
\subsection{Distributed Boundaries}
\begin{figure}
\includegraphics[scale=0.2]{boundary-vertex-diagram.png}
\caption{Definition of boundary vertex sets for different coloring instances}
\label{IAB:boundary-verts}
\end{figure}
Figure~\ref{IAB:boundary-verts} shows the sets of boundary vertices for distance-1 and distance-2 formulations of graph coloring.
A process' distance-1 boundary vertices are its owned vertices that have edge neighbors owned by other processes.
Its distance-2 boundary vertices are its owned vertices whose neighbors have neighbors owned by other processes.
These sets allow us to optimize our distributed conflict detection, as only vertices in the boundary may conflict with a vertex on another process.
\subsection{Distance-1 Coloring (D1)}
Our Distance-1 method begins by independently coloring all owned vertices on each process using the GPU-enabled algorithms by Deveci et al.~\cite{IAB:deveci2016parallel}
VB\_BIT and EB\_BIT in KokkosKernels~\cite{kokkoskernels}.
VB\_BIT uses vertex-based parallelism; each vertex is colored by a single thread. VB\_BIT uses compact bit-based representations of colors to make it performant on GPUs.
EB\_BIT uses edge-based parallelism; a thread colors the endpoints of a single edge. EB\_BIT also uses the compact color representation to reduce memory usage on GPUs.
For graphs with skewed degree distribution (e.g., social networks), edge-based parallelism typically yields better workload balance between GPU threads.
We observed that for graphs with a sufficiently large maximum degree, edge-based EB\_BIT outperformed vertex-based VB\_BIT on Tesla V100 GPUs.
Therefore, we use a simple heuristic based on maximum degree: we use EB\_BIT for graphs with maximum degree greater than 6000; otherwise, we use VB\_BIT.
\begin{algorithm}[!htb]
\algrenewcommand\algorithmicindent{1.0em}
\caption{Distance-1 conflict detection}
\label{IAB:alg:conflictres}
\begin{algorithmic}
\Procedure{Detect-Conflicts-D1}{\newline \hspace*{0.5pc} Local Graph $G_l=\{V_l+V_g,E_l+E_g\}$, colors, GID}
\State conflicts $\gets$ 0
\ForAll{$v \in V_g$} \textbf{in parallel}
\ForAll{$\langle v, u\rangle \in (E_g)$}
\State conflicts $\gets$ conflicts $+$ Check-Conflicts($v,u,\ldots$)
\If{colors[$v$] $=$ 0}
\State \textbf{break}
\EndIf
\EndFor
\EndFor
\State \textbf{return} conflicts
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[!htb]
\caption{Algorithm to identify and resolve conflicts}
\label{IAB:alg:conflictcheck}
\begin{algorithmic}
\Procedure{Check-Conflicts}{$v$, $u$, colors, GID, recolorDegrees}
\If{colors[$v$] $=$ colors[$u$]}
\If{recolorDegrees and degree($v$) $<$ degree($u$)}
\State colors[$v$] $\gets$ 0
\ElsIf{recolorDegrees and degree($u$) $<$ degree($v$)}
\State colors[$u$] $\gets$ 0
\ElsIf{rand(GID[$v$]) $>$ rand(GID[$u$])}
\State colors[$v$] $\gets$ 0
\ElsIf{rand(GID[$u$]) $>$ rand(GID[$v$])}
\State colors[$u$] $\gets$ 0
\ElsIf{GID[$v$] $>$ GID[$u$]}
\State colors[$v$] $\gets$ 0
\Else
\State colors[$u$] $\gets$ 0
\EndIf
\State \textbf{return} 1
\EndIf
\State \textbf{return} 0
\EndProcedure
\end{algorithmic}
\end{algorithm}
Algorithm~\ref{IAB:alg:conflictres} shows the conflict detection component of Algorithm~\ref{IAB:alg:overview}.
This algorithm runs on each process using its local graph $G_l$.
It detects conflicts across processor boundaries and uncolors vertices to
resolve the conflicts before recoloring.
After the initial coloring, only boundary vertices can be in conflict with one another\footnote{As suggested by Bozda{\u{g}} et al., we considered reordering local vertices to group all boundary vertices together for ease of processing. This optimization did not show benefit in our implementation, as reordering tended to be slower than coloring of the entire local graph.}.
We perform a full exchange of boundary vertices' colors using collective communication functions implemented in the Zoltan2 package of Trilinos~\cite{IAB:heroux2005overview}.
After the initial all-to-all boundary exchange, we only communicate the colors of boundary vertices that have been recolored.
After each process receives its ghosts' colors, it detects conflicts by checking each owned vertex's color against the colors of its neighbor.
The conflict detection is done in parallel over the owned vertices using Kokkos.
The overall time of conflict detection is small enough that any imbalance resulting from our use of vertex-based parallelism is insignificant relative to end-to-end times for the D1 algorithm.
Once we have identified all conflicts, we again use VB\_BIT or EB\_BIT to recolor the determined set of conflicting vertices.
We modified KokkosKernels' coloring implementations to accept a ``partial'' coloring and the full local graph, including ghosts.
(Our initial coloring phase did not need ghost information.)
We also modified VB\_BIT to accept a list of vertices to be recolored.
Such a modification was not feasible for EB\_BIT.
Before we detect conflicts and recolor vertices, we save a copy of the ghosts' colors ($\mathit{gc}$ in Algorithm~\ref{IAB:alg:conflictres}).
Then we give color zero to all vertices that will be recolored; our coloring functions interpret color zero as uncolored.
To prevent the coloring functions from resolving conflicts without respecting our conflict resolution rules (thus preventing convergence of our parallel coloring), we allow a process to temporarily recolor some ghosts,
even though the process does not have enough color information to correctly recolor them. The ghosts' colors are then
restored to their original values in order to keep ghosts' colors consistent with their owning process.
Then, we communicate only recolored owned vertices, ensuring that recoloring changes only owned vertices.
\subsection{Distributed Recoloring Using Vertex Degrees}
When a conflict is found, only one vertex involved in the conflict needs to be recolored.
Since conflicts happen on edges between two processes' vertices, both processes must agree on which vertex will be recolored.
We propose a new algorithm for selecting vertices to be recolored in the conflict phase, based on prioritizing by vertex degrees.
This idea was inspired by the effectiveness of largest-first and smallest-last ordering in the serial greedy algorithm.
To the best of our knowledge, prioritizing the distributed recoloring of lower degree vertices is a novel approach to distributed coloring conflict resolution.
In this approach,
shown in Algorithm~\ref{IAB:alg:conflictcheck}, when recolorDegrees is true, our conflict detection prioritizes recoloring the lower degree vertex involved in a distributed conflict.
For vertices with equal degree, we adopt the random conflict resolution scheme of Bozda{\u{g}} et al.
in which
the conflicted vertex with the higher random number generated from its global identifier (GID) is chosen for recoloring.
The idea behind our recolorDegrees heuristic is that recoloring vertices with large degrees will likely result in giving those vertices a higher color, while recoloring vertices with a smaller degree may be able to use a smaller color for that vertex.
Additionally, recoloring vertices with fewer neighbors means that it is less likely that we recolor neighboring vertices concurrently which can reduce the number of conflicts that arise during distributed recoloring.
We show that this approach generally decreases runtime for distance-1 coloring, and reduces the number of colors used.
In our experiments, recolorDegrees reduces our color usage by 8.9\% and runtime by roughly 7\% for D1 on average.
It achieves a maximum speedup of 45\%, and a maximum color reduction of 39\% over using D1 without recolorDegrees.
We compute the vertex degrees only once. Possible variations include using a ``dynamic''
degree based on how many neighbors have been colored or the ``saturation degree'' (how many colors the colored neighbors have been assigned). We do not investigate those variations here.
\subsection{Two Ghost Layers Coloring (D1-2GL)}
Our second algorithm for distance-1 coloring, D1-2GL, follows the D1 method, but adds another ghost vertex ``layer'' to the subgraphs on each process.
In D1, a process' subgraph does not include neighbors of ghost vertices unless those neighbors are already owned by the process.
In D1-2GL, we include all neighbors of ghost vertices (the two-hop neighborhood of local vertices) in each process's subgraph, giving us ``two ghost layers.''
To the best of our knowledge, this approach has not been explored before with respect to graph coloring.
This method can reduce the total amount of communication relative to D1 for certain graphs by reducing the total number of recoloring rounds needed.
In particular, for mesh or otherwise regular graphs, the second ghost layer is primarily made up of interior vertices on other processes.
Interior vertices are never recolored, so the colors of the vertices in the second ghost layer are fixed. Each process can then directly resolve more conflicts in a consistent way, thus requiring fewer rounds of recoloring.
Fewer recoloring rounds results in fewer collective communications.
However, in D1-2GL, each communication is more expensive than in D1, because a larger boundary from each process is communicated.
Also, in irregular graphs, the second ghost layer often does not have mostly interior vertices.
The relative proportion of interior vertices in the second layer also gets smaller as the number of processes increases.
For the extra ghost layer to pay off, it must reduce the number of rounds of communications enough to make up for the increased cost of each communication.
To construct the second ghost layer on each process, processes exchange the adjacency lists of their boundary vertices; this step is needed only once.
After the ghosts' connectivity information is added, we use the same coloring approach as in D1.
We optimize our conflict detection for both distance-1 implementations by looking through only the ghost vertices' adjacencies ($E_g$), as they neighbor all local boundary vertices.
Our local coloring algorithms require our local graphs to have undirected edges to ghost vertices, so this optimization is trivial for both D1 and D1-2GL.
\begin{table*}[!t]
\centering
\caption{Summary of D1 and D2 input graphs. $\delta_{avg}$ refers to average degree and $\delta_{max}$ refers to maximum degree. Values listed are after preprocessing to remove multi-edges and self-loops. k = thousand, M = million, B = billion.}
\begin{tabular}{|r|r|r|r|r|r|r|}
\hline
Graph & Class & \#Vertices & \#Edges & $\delta_{avg}$ & $\delta_{max}$ & Memory (GB)\\
\hline
ldoor & PDE Problem & 0.9 M & 21 M & 45 & 77 & 0.32 \\
Audikw\_1 & PDE Problem & 0.9 M & 39 M & 81 & 345 & 0.59 \\
Bump\_2911 & PDE Problem & 2.9 M & 63 M & 43 & 194 & 0.96 \\
Queen\_4147 & PDE Problem & 4.1 M & 163 M & 78 & 89 & 2.5 \\
soc-LiveJournal1& Social Network & 4.8 M & 43 M & 18 & 20 k & 0.67 \\
hollywood-2009 & Social Network & 1.1 M & 57 M & 99 & 12 k & 0.86 \\
twitter7 & Social Network & 42 M & 1.4 B & 35 & 2.9 M & 21 \\
com-Friendster & Social Network & 66 M & 1.8 B & 55 & 5.2 k & 27 \\
europe\_osm & Road Network & 51 M & 54 M & 2.1 & 13 & 1.2 \\
indochina-2004 & Web Graph & 7.4 M & 194 M & 26 & 256 k & 2.9 \\
MOLIERE\_2016 & Document Mining Network & 30 M & 3.3 B & 80 & 2.1 M & 49 \\
rgg\_n\_2\_24\_s0 & Synthetic Graph & 17 M & 133 M & 15 & 40 & 2.1\\
kron\_g500-logn21 & Synthetic Graph & 2.0 M & 182 M & 87 & 8.7 & 2.7\\
mycielskian19 & Synthetic Graph & 393 k & 452 M & 2.3 k & 196 k & 6.7\\
mycielskian20 & Synthetic Graph & 786 k & 1.4 B & 3.4 k & 393 k & 21\\
\hline
hexahedral & Weak Scaling Tests & 12.5 M -- 12.8 B & 75 M -- 76.7 B & 6 & 6 & 1.2 GB -- 1.1 TB\\
\hline
\end{tabular}\\
\label{IAB:tab:graphs}
\end{table*}
\subsection{Distance-2 Coloring (D2)}
Our distance-2 coloring algorithm, D2, builds upon both D1 and D1-2GL.
As with distance-1 coloring, we use algorithms from Deveci et al. in KokkosKernels for local distance-2 coloring.
Specifically, we use NB\_BIT, which is a ``net-based'' distance-2 coloring algorithm that uses the approach described by Ta{\c{s}} et al.~\cite{IAB:tacs2017greed}.
Instead of checking for distance-2 conflicts only between a single vertex and its two-hop neighborhood, the net-based approach detects distance-2 conflicts among the immediate neighbors of a vertex.
Our D2 approach also utilizes a second ghost layer to give each process the full two-hop neighborhood of its boundary vertices.
This enables each process to directly check for distance-2 conflicts with local adjacency information.
To find a distance-2 conflict for a given vertex, its entire two-hop neighborhood must be checked for potential conflicting colors.
\begin{algorithm}
\algrenewcommand\algorithmicindent{1.0em}
\caption{Distance-2 conflict detection}
\label{IAB:alg:d2con}
\begin{algorithmic}
\Procedure{Detect-D2-Conflicts}{\newline \hspace*{0.5pc} Local Graph $G_l=\{V_l+V_g,E_l+E_g\}$, $V_b$, colors, GID, doPartialColoring}
\State conflicts $\gets$ 0
\ForAll{$v \in V_b$} \textbf{in parallel}
\ForAll{$\langle v, u\rangle \in (E_l+E_g)$}
\If{not doPartialColoring}
\State conflicts $\gets$ conflicts $+$ Check-Conflicts($v,u,\ldots$)
\If{colors[$v$] $=$ 0}
\State \textbf{break}
\EndIf
\EndIf
\ForAll{$\langle u, x\rangle \in (E_l+E_g)$}
\State \Comment{$u$ is one hop and $x$ is two hops from $v$}
\State conflicts $\gets$ conflicts $+$ Check-Conflicts($v,x,\ldots$)
\If{colors[$v$] $=$ 0}
\State \textbf{break}
\EndIf
\EndFor
\If{colors[$v$] $=$ 0}
\State \textbf{break}
\EndIf
\EndFor
\EndFor
\State \textbf{return} conflicts
\EndProcedure
\end{algorithmic}
\end{algorithm}
Algorithm~\ref{IAB:alg:d2con} shows conflict detection in D2 for each process.
We again use vertex-based parallelism while detecting conflicts; each thread examines the entire two-hop neighborhood of a vertex $v$.
The input argument $V_b$ is the set of distance-2 boundary vertices (as in Figure~\ref{IAB:boundary-verts}), which we precompute.
As with distance-1 conflict detection, we identify all local conflicts and use a random number generator to ensure that vertices to be recolored are chosen consistently across processes.
The iterative recoloring method of D1 then also works for D2 --- we recolor all conflicts, replace the old ghost colors, and then communicate local changes.
\subsection{Partial Distance-2 Coloring (PD2)}\label{IAB:method:PD2}
We have also implemented an algorithm, PD2, that solves the partial distance-2 coloring problem.
Partial distance-2 coloring is similar to distance-2 coloring, but it detects and resolves only two-hop conflicts.
Typically, partial distance-2 coloring is used on non-symmetric graphs. A bipartite graph
$B(V_s, V_t, E_B)$ is constructed from $G(V,E)$ with an undirected edge
$\langle v_s \in V_s, v_t \in V_t \rangle$ $\in E_B$
for each directed edge $\langle v_s, v_t \rangle \in E$; colors are needed only for vertices in $V_s$.
Partial distance-2 coloring colors only one set of the vertices in the bipartite graph,
which is why it is a partial coloring.
In algorithm~\ref{IAB:alg:d2con}, when doPartialColoring is false, the algorithm detects all distance-2 conflicts.
When doPartialColoring is true, it only detects two-hop conflicts for the partial coloring.
Currently, our PD2 implementation must color all vertices in the bipartite representation of the graph;
applications can ignore colors for vertices in $V_t$.
Removing this limitation is a subject for future work.
\subsection{Partitioning}
We assume that target applications partition and distribute their input graphs in some way before calling these coloring algorithms. In our experiments, we used XtraPuLP v0.3~\cite{slota2017partitioning} to partition our graphs.
Determining optimal partitions for coloring is not our goal in this work.
Rather, we have chosen a partitioning strategy representative of that used in many
applications. We partition graphs by balancing the number of edges per-process and minimizing a global edge-cut metric.
This approach effectively balances per-process workload and helps minimize global communication requirements.
\section{Results}
For our experiments, we compare overall performance for D1 and D2 on up to 128 ranks versus Zoltan.
Our performance metrics include execution time, parallel scaling, and number of colors used.
We do not include the partitioning time for XtraPuLP; we assume target applications will partition and distribute their graphs.
Each of the results reported represents an average of five runs.
\subsection{Distance-1 Performance}
We summarize the performance of our algorithms relative to Zoltan
using the performance profiles in Figure~\ref{IAB:distance1prof}.
Performance profiles plot the proportion of problems an algorithm can solve for a given relative cost.
The relative cost is obtained by dividing each approach's execution time (or colors used) by the best approach's execution time (or colors used) for a given problem.
In these plots, the line that is higher represents the best performing algorithm.
The further to the right that an algorithm's profile is, the worse it is relative to the best algorithm.
D1-baseline does not consider vertex degree when doing distributed recoloring (e.g., recolorDegrees is false in Algorithm~\ref{IAB:alg:conflictcheck}).
D1-recolor-degree represents our novel approach that recolors distributed conflicts based on vertex degree (e.g., recolorDegrees is true in Algorithm~\ref{IAB:alg:conflictcheck}).
\begin{figure}[h]
\centering
\caption{Performance profiles comparing D1-baseline and D1-recolor-degree on 128 Tesla V100 GPUs with Zoltan's distance-1 coloring on 128 Power9 cores in terms of (a) execution time and (b) number of colors computed for the graphs listed in Table~\ref{IAB:tab:graphs}.}
\label{IAB:distance1prof}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[width=\textwidth]{plots/distance-1-runtime-profile-recolor-new.png}
\caption{Runtime performance profile}
\label{IAB:d1runtime}
\end{subfigure}%
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[width=\textwidth]{plots/distance-1-color-profile-recolor-new.png}
\caption{Color performance profile}
\label{IAB:d1color}
\end{subfigure}
\end{figure}
We ran D1-baseline, D1-recolor-degree and Zoltan with 128 MPI ranks to color the 15 SuiteSparse graphs in Table~\ref{IAB:tab:graphs}.
D1-baseline and D1-recolor-degree used MPI plus 128 Tesla V100 GPUs, while Zoltan used MPI on 128 Power9 CPU cores across 32 nodes (four MPI ranks per node).
Some skewed graphs (e.g., hollywood-2009) did not run on 128 ranks on Zoltan or D1-baseline; in those cases we use the largest run that completed for both approaches.
Figure~\ref{IAB:d1runtime} shows that D1-recolor-degree outperforms both Zoltan and D1-baselines in terms of execution time in these experiments.
D1-baseline and D1-recolor-degree are very similar in terms of runtime performance, but D1-recolor-degree is the fastest approach for 60\% of the graphs,
D1-baseline is fastest on 26\%, and Zoltan is fastest on 13\%.
Zoltan is faster than our approaches on two of the smallest graphs, Audikw\_1, and ldoor.
D1-baseline is faster than D1-recolor-degrees on four graphs which are more varied in application and structure: Bump\_2911, com-Friendster, rgg\_n\_2\_24\_s0, and twitter7.
There are four graphs for which D1-baseline and D1-recolor-degrees runtime performance differ substantially: Audikw\_1 (D1-baseline is 32\% faster), ldoor (D1-recolor-degrees is 42\% faster), mycielskian19 (D1-recolor-degrees is 45\% faster), and mycielskian20 (D1-recolor-degrees is 38\% faster).
D1-baseline has at most a 10x speedup over Zoltan (with the mycielskian20 graph) and at worst an 1.95x slowdown relative to Zoltan (with ldoor), while
D1-recolor-degrees achieves at most a 14x speedup over Zoltan (on mycielskian20), and at worst a 2x slowdown (on Audikw\_1).
Figure~\ref{IAB:d1color} shows that Zoltan outperforms D1-baseline in terms of color usage, but D1-recolor-degree is much more competitive.
Both Zoltan and D1-recolor-degree use the fewest colors in 53\% of experiments; Zoltan and D1-recolor-degree tie on a single graph.
D1-baseline uses the fewest number of colors on a single graph, for which it ties D1-recolor-degree.
D1-recolor-degree uses more colors than D1-baseline for two graphs (indochina-2004 and twitter7); in both graph, D1-baseline uses roughly 1\% fewer colors.
On average, D1-recolor-degree uses 8.9\% fewer colors than D1-baseline,
and in the best case, it reduces color usage 39\% relative to D1-baseline (mycielskian19).
On average, D1-recolor-degree uses 4\% fewer colors than Zoltan.
In the worst case, D1-recolor-degree uses 51\% more colors than Zoltan (twitter7); in the best case,
D1-recolor-degree uses 53\% fewer colors than Zoltan (mycielskian20).
Because the performance of D1-recolor-degree is generally better than that of D1-baseline,
all further distance-1 coloring results use D1-recolor-degree, and we refer to D1-recolor-degree as D1 going forward.
\subsection{Distance-1 Strong Scaling}
Figure~\ref{IAB:realstrong} shows strong scaling times for Queen\_4147 and com-Friendster.
These graphs are selected for presentation because they are the largest graphs of their respective problem domains.
Data points that are absent were the result of out-of-memory issues or execution times (including partitioning)
that were longer than our single job allocation limits.
D1 scales better on the com-Friendster graph than on Queen\_4147, as
the GPUs can be more fully utilized with the much larger com-Friendster graph.
For Queen\_4147, D1 on 128 GPUs shows a speedup of around 2.38x over a single GPU.
D1 uses 12\% fewer colors than Zoltan in the 128 rank run on Queen\_4147, as well as running 1.75x faster than Zoltan on that graph.
For com-Friendster, D1 is roughly 4.6x faster than Zoltan in the 128 rank run, and only uses 0.6\% more colors than Zoltan.
\begin{figure}[h]
\centering
\caption{Zoltan and D1 strong scaling on select (a) PDE and (b) Social Network graphs.}
\label{IAB:realstrong}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Queen_4147-scaling-total.pdf}
\caption{Queen\_4147}
\label{IAB:queenhybridzoltan}
\end{subfigure}%
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[scale=0.5]{plots/com-Friendster-scaling-total.pdf}
\caption{com-Friendster}
\label{IAB:friendsterhybridzoltan}
\end{subfigure}
\end{figure}
For graph processing in general, it is often difficult to demonstrate good strong scaling relative to single node runs. From the Graph500.org benchmark (June 2020 BFS results)~\cite{graph500}, the relative per-node performance difference in the metric of ``edges processed per second'' between the fastest multi-node results and fastest single node results are well over 100x.
For coloring on GPUs, graphs that can fit into a single GPU do not provide sufficient work parallelism for large numbers of GPUs, and multi-GPU execution incurs communication overheads and additional required rounds for speculative coloring.
However, on roughly half of the graphs that fit on a single GPU, D1 with 128 GPUs achieves an average speedup of 1.9x over a single GPU.
D1 achieves a maximum speedup of 2.43x on the mycielskian20 graph.
For the other half of the graphs, D1 does not show a speedup over a single GPU.
On small or highly skewed graphs that fit on a single GPU, speedup is limited, due to the communication overheads and work imbalances that result from distribution even with relatively good partitioning.
Distributed coloring is valuable even for these small problems, however, as parallel applications
using coloring typically have distributed data that would be expensive to gather into one GPU for single-GPU coloring.
On average over all the graphs, D1 uses 38\% more colors than the single GPU run, while Zoltan uses 53.6\% more colors than the single GPU run.
Such large color usage increases are mostly due to the Mycielskian19 and Mycielskian20 graphs.
These graphs were generated to have known minimum number of colors (chromatic numbers) of 19 and 20 respectively, and our single GPU runs use 19 and 21 colors to color those graphs.
Both D1 and Zoltan have trouble coloring these graphs in distributed memory, but our D1 implementation colors these graphs in fewer colors than Zoltan.
Without these two outliers, the average color increase from the single GPU run is only 2.23\% for D1, and Zoltan decreases color usage by 0.1\% on average.
Zoltan's higher coloring quality is due to its inherently lower concurrency.
\begin{figure}[h]
\centering
\caption{D1 communication time (Comm) and computation time (Comp) from 1 to 128 GPUs.}
\label{IAB:strongbreakdown}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Queen_4147-comm-comp.pdf}
\caption{Queen\_4147}
\label{IAB:queenbreakdown}
\end{subfigure}%
\begin{subfigure}[b]{0.23\textwidth}
\centering
\includegraphics[scale=0.5]{plots/com-Friendster-comm-comp.pdf}
\caption{com-Friendster}
\label{IAB:friendsterbreakdown}
\end{subfigure}
\hspace*{\fill}
\end{figure}
Figure~\ref{IAB:strongbreakdown} shows the total communication and computation time associated with each run.
For both graphs, the dominant scaling factor is computation.
Specifically, the computational overhead associated with recoloring vertices in distributed memory is the dominant scaling factor.
However, strong scaling is good on both graphs,
despite the fact that adding more ranks to a problem also increases the number of vertices that need to be recolored.
Figure~\ref{IAB:friendsterbreakdown} shows that D1 scales to more ranks on com-Friendster, primarily because of the graph's larger size.
\subsection{Distance-1 Weak Scaling}
The greatest benefit of our approach is its ability to efficiently process massive-scale graphs.
We demonstrate this benefit with a weak-scaling study conducted with uniform 3D hexahedral meshes.
The meshes were partitioned with block partitioning along a single axis, resulting in the mesh being distributed in ``slabs.''
Larger meshes were generated by doubling the number of elements in a single dimension to keep the per-process communication and computational workload constant.
Each distinct per-process workload increases the boundary by a factor of two, which correspondingly increases communication and recoloring overhead for distributed runs.
We run with up to 100 million vertices per GPU, yielding a graph of 12.8 billion vertices and 76.7 billion edges in our largest tests; \textbf{this graph was colored in less than two seconds}.
\begin{figure}[h]
\centering
\caption{Weak scaling of D1 on 3D mesh graphs. Tests use 12.5, 25, 50, and 100 million vertices per GPU.}
\includegraphics[scale=0.6]{plots/mesh-hybrid-weak-scaling.pdf}
\label{IAB:meshhybridweak}
\end{figure}
Figure~\ref{IAB:meshhybridweak} shows that the single rank runs for each workload are similar,
indicating that communication and recoloring overhead are the dominant scaling factors for this study.
In increasing the boundary size by a factor of two, we do not necessarily increase the number of distributed conflicts by two,
especially in such a regular graph.
The smaller workloads all have similar and relatively small recoloring workloads, which is why they show more consistent weak scaling than
the 100 Million vertex per rank experiment.
That particular experiment does substantially more recoloring than the others, resulting in its increase in runtime as the number of ranks increases.
We have found that for extremely regular meshes like these, the number of vertices on process boundaries impacts the recoloring workloads for D1.
\subsection{D1-2GL Performance}
In general, D1-2GL reduces the number of collective communications used in the distributed distance-1 coloring. Figure~\ref{fig:2GLrounds} compares the number of communication rounds for D1-baseline and D1-2GL on the Queen\_4157 input for 2 to 128 MPI ranks, averaged over five runs.
With 128 ranks on this graph, D1-2GL method reduces the number of rounds by 25\% on average, giving
speedup of 1.18x.
D1-2GL provides speedups over D1-baseline with the smaller graphs: 1.17x with Audikw\_1 and 1.2x with ldoor.
Unfortunately, due to the increased cost of each communication round, D1-2GL does not generally achieve a total execution time speedup over D1-baseline on AiMOS.
Additionally, second ghost layer vertices may be recolored if they are boundary vertices on another processor; this occurs often in dense inputs and incurs further recoloring rounds.
However, in distributed systems with much higher latency costs, D1-2GL could be beneficial.
\begin{figure}[h]
\centering
\caption{Number of communication rounds for D1-baseline and D1-2GL on Queen\_4147 from 2 to 128 ranks.}
\includegraphics[scale=0.6]{plots/Queen_4147-rounds.pdf}
\label{fig:2GLrounds}
\end{figure}
\subsection{Distance-2 Performance}
We compare our D2 method to Zoltan's distance-2 coloring using eight graphs from Table~\ref{IAB:tab:graphs}: Bump\_2911, Queen\_4147, hollywood-2009, europe\_osm, rgg\_n\_2\_24\_s0, ldoor, Audikw\_1, and soc-LiveJournal1.
We use the same experimental setup as with the distance-1 performance comparison.
Figure~\ref{IAB:d2runtime} shows that D2 compares well against Zoltan in terms of execution time, with D2 outperforming Zoltan on all but two graphs.
In the best case, we see an 8.5x speedup over Zoltan on the Queen\_4147 graph.
\begin{figure}[h]
\centering
\caption{Performance profiles comparing D2 on 128 Tesla V100 GPUs with Zoltan's distance-2 coloring on 128 Power9 cores in terms of (a) execution time and (b) number of colors computed for a subset of graphs listed in Table~\ref{IAB:tab:graphs}.}
\label{IAB:distance2prof}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[width=\textwidth]{plots/distance-2-runtime-profile-new.png}
\caption{Runtime performance profile}
\label{IAB:d2runtime}
\end{subfigure}%
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[width=\textwidth]{plots/distance-2-color-profile-new.png}
\caption{Color performance profile}
\label{IAB:d2color}
\end{subfigure}
\end{figure}
Figure~\ref{IAB:d2color} shows that D2 has similar color usage as Zoltan.
D2 and Zoltan each produce fewer colors in half of the experiments.
In all but one case in which Zoltan uses fewer colors, D2 uses no more than 10\% more colors.
Interestingly, the number of colors used by D2 on the soc-LiveJournal1 graph is unchanged with one and 128 GPUs.
Zoltan outperforms D2 with respect to runtime on skewed graphs because Zoltan has distance-2 optimizations which reduce communication overhead
and minimize the chance for distributed conflicts.
\subsection{Distance-2 Strong Scaling}
Figures~\ref{IAB:Bumpstrong} and~\ref{IAB:queenstrong} show the strong scaling behavior of D2 and Zoltan on Bump\_2911 and Queen\_4147.
Bump\_2911 shows that D2 scales better initially than Zoltan, and with 128 ranks, D2 is 2.9x faster than Zoltan, using 0.7\% more colors.
Queen\_4147 shows better scaling for D2 as well; with 128 ranks, D2 is 8.5x faster than Zoltan and uses 10\% fewer colors.
\begin{figure}[h]
\centering
\caption{D2 and Zoltan strong scaling for distance-2 coloring.}
\label{IAB:distance2strong}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Bump_2911-d2-total.pdf}
\caption{Bump\_2911}
\label{IAB:Bumpstrong}
\end{subfigure}%
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Queen_4147-d2-total.pdf}
\caption{Queen\_4147}
\label{IAB:queenstrong}
\end{subfigure}
\end{figure}
On average over the eight graphs, D2 exhibits 4.29x speedup on 128 GPUs over a single GPU, and uses 7.5\% more colors than single GPU runs.
Speedup is greater with D2 than D1 because distance-2 coloring is more computationally intensive, and thus has a larger work-to-overhead ratio.
\begin{figure}[h]
\centering
\caption{D2 communication time (comm) and computation time (comp) from 1 to 128 GPUs.}
\label{IAB:distance2breakdown}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Bump_2911-d2-comm-comp.pdf}
\caption{Bump\_2911}
\label{IAB:bumpbreakdown}
\end{subfigure}%
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Queen_4147-d2-comm-comp.pdf}
\caption{Queen\_4147}
\label{IAB:d2queenbreakdown}
\end{subfigure}
\end{figure}
Figures~\ref{IAB:bumpbreakdown} and~\ref{IAB:d2queenbreakdown} show the communication and computation breakdown of D2 on Bump\_2911 and Queen\_4147.
Bump\_2911 shows computation and communication scaling for up to 128 ranks, while color usage increases by only 0.6\%.
In general, the relative increase in color usage from a single rank for distance-2 coloring is less than for distance-1 coloring. The number of colors used for distance-2 coloring is greater than for distance-1; therefore, a similar absolute increase in color count results in a lower proportional increase.
\subsection{Distance-2 Weak Scaling}
Figure~\ref{IAB:meshd2weak} demonstrates the weak scaling behavior for D2.
The same hexahedral mesh graphs were used as in the D1 weak scaling experiments.
In general, D2 has fairly consistent weak scaling.
The runtimes across workloads with a single rank increase by more than factor of two because the number of edges in each mesh increases by more than a factor of two, and the complexity of the distance-2 coloring algorithm for local colorings depends on the number of edges.
Weak scaling to large process counts is good for all workloads.
\begin{figure}[h]
\centering
\caption{Distance-2 weak scaling of D2 on 3D mesh graphs.}
\includegraphics[scale=0.6]{plots/mesh-d2-weak-scaling.pdf}
\label{IAB:meshd2weak}
\end{figure}
\subsection{Partial Distance-2 Strong Scaling}
\begin{table}[!t]
\small
\centering
\caption{Summary of the graphs used for PD2 tests. Statistics are for the bipartite representation of the graph (Section ~\ref{IAB:method:PD2}). $\delta_{avg}$ is average degree and $\delta_{max}$ is maximum degree. Numeric values listed are after preprocessing to remove multi-edges and self-loops. k = thousand, M = million}
\begin{tabular}{|r|r|r|r|r|r|r|}
\hline
Graph & Class & \#Vtx & \#Edges & $\delta_{avg}$ & $\delta_{max}$ \\
\hline
Hamrle3 &Circuit Sim. &2.9 M &5.5 M & 3.5 & 18 \\
patents &Patent Citations &7.5 M &14.9 M & 1.9 & 1k \\
\hline
\end{tabular}\\
\label{IAB:tab:pd2graphs}
\end{table}
Table~\ref{IAB:tab:pd2graphs} shows the graphs that we used to compare our PD2 implementation against Zoltan.
Partial distance-2 coloring is typically used on non-symmetric and bipartite graphs; the graphs in Table ~\ref{IAB:tab:pd2graphs} are representative of application use cases.
We report metrics reported for the bipartite representation of the graph (as described in Section~\ref{IAB:method:PD2}).
Partial distance-2 colorings typically are needed for only a subset of the vertices in a graph, but
our PD2 implementation colors all vertices in the graph. We compare to Zoltan, which colors only vertices that would be colored in typical partial distance-2 coloring.
Thus, in general, PD2 is coloring roughly twice as many vertices as Zoltan.
\begin{figure}[h]
\centering
\caption{PD2 strong scaling for partial distance-2 coloring.}
\label{IAB:partialdistance2strong}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/patents-pd2-scaling-total.pdf}
\caption{patents}
\label{IAB:patentsstrong}
\end{subfigure}%
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Hamrle3-pd2-scaling-total.pdf}
\caption{Hamrle3}
\label{IAB:hamrle3strong}
\end{subfigure}
\end{figure}
Figure~\ref{IAB:partialdistance2strong} shows the strong scaling behavior of PD2.
The experiment with Hamrle3 on four ranks benefits from a particularly good partition that results in less recoloring for both PD2 and Zoltan relative to other process configurations.
For the patents graph, D2 has a particularly heavy recoloring workload for four ranks, resulting in a large increase in runtime from two to four ranks.
Even though PD2 is coloring more vertices than Zoltan in these tests, PD2 achieves roughly 2x speedup on 128 ranks with Hamrle3.
With patents, Zoltan is faster than PD2; this result can be attributed partially to Zoltan's optimized recoloring scheme that reduces the number of conflicts introduced while recoloring distributed conflicts.
PD2 achieves a 1.73x speedup over a single GPU with the patents graph, while it did not show any speedup from a single GPU with Hamrle3.
Figure~\ref{IAB:patentsstrong} shows that, with the patents graph, Zoltan is faster on one core than a single GPU.
This speedup is attributed to Zoltan's coloring fewer vertices than PD2;
when Zoltan colors the same number of vertices as PD2, their single rank runtimes are equal.
Investigating the cause of this result is a subject for future research.
For these two graphs, PD2 uses a very similar number of colors as Zoltan.
PD2 uses at most 10\% more colors in the distributed runs.
This difference is typically only one to five colors more than Zoltan.
\begin{figure}[h]
\centering
\caption{PD2 communication time (comm) and computation time (comp) from 1 to 128 GPUs}
\label{IAB:partialdistance2breakdown}
\begin{subfigure}[b]{0.25\textwidth}
\centering
\includegraphics[scale=0.5]{plots/patents-pd2-comm-comp.pdf}
\caption{patents}
\label{IAB:patentsbreakdown}
\end{subfigure}%
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[scale=0.5]{plots/Hamrle3-pd2-comm-comp.pdf}
\caption{Hamrle3}
\label{IAB:hamrle3breakdown}
\end{subfigure}
\end{figure}
Figure~\ref{IAB:partialdistance2breakdown} shows that computation is the main factor in the scaling behavior of PD2.
In distributed runs, the largest factor of the runtime is the computation overhead involved in recoloring distributed conflicts.
Figure~\ref{IAB:hamrle3breakdown} shows an unexpected decrease in computation for the Hamrle3 graph for four ranks,
which is due to a decrease in the recoloring workload.
PD2's recoloring workload is approximately 25,000 vertices per rank in most experiments, but the four-rank experiment has a recoloring workload of 9,000 vertices per rank.
Figure~\ref{IAB:patentsbreakdown} shows that the four-rank PD2 run has a much longer computation time than expected; this is due to the total distributed recoloring workload increasing by a factor of six.
Additionally, the 64-rank run with the patents graph shows slightly less computational scaling than expected, due to an increase in recoloring rounds.
Increasing recoloring rounds serializes recoloring computation and incurs more rounds of communication, resulting in a runtime increase.
Optimizing recoloring to reduce subsequent conflicts and reduce the number of recoloring rounds necessary in D2 and PD2 are subjects for future research.
\section{Coloring Library(ies)}
ColPack~\cite{gebremedhin2013colpack} is a graph coloring library that supports multiple
different instances of the graph coloring problem. Among the instances handled by ColPack are:
Distance-1, Distance-2, Star, Acyclic, Restricted Star, Triangular, Partial Distance-2 and
Star Bicoloring. ColPack supports multiple different vertex orderings as well, which enables
the user to tailor the coloring to their data, if possible. They propose a generic coloring framework
that is able to support all of the different coloring algorithms. For parallel algorithms, they note
that other publications' distributed memory algorithms are
used~\cite{bozdaug2008framework,bozdaug2010distributed}, and that multithreaded versions are
the subject of current and future research. The end goal for this package is to be able to support
sparse derivative computations on a wide range of architectures.
\section{Shared Memory Implementations}
Singhal, Peri, and Kalyanasundaram~\cite{singhal2017practical} present a study of multiple
shared-memory implementations. They translate distributed memory algorithms faithfully to shared memory,
and find that such translations are often underperforming. In particular, their shared memory imitation
of a distributed memory implementation of the Jones-Plassmann algorithm took 18 hours to color the
LiveJournal graph from the SNAP dataset. They show that algorithms that are developed using
shared-memory constructs such as fine-grained resource locks perform much better, and in their
test case these algorithms outperformed the sequential implementation. They achieved around a 2x speedup
over the sequential algorithm, while keeping the coloring quality high. They only provide results on
the LiveJournal graph. Their conclusion is that using fine-grained locking for synchronization gives
the best results.
Rokos, Gorman and Kelly~\cite{rokos2015fast} build off work presented in
{{\c{C}}ataly{\"u}rek et. al~\cite{ccatalyurek2012graph}.{{\c{C}}ataly{\"u}rek et. al presented
both a Cray XMT-reliant dataflow algorithm and a {\it speculation and iteration} algorithm.
In this context, a {\it speculation and iteration} algorithm is one that colors vertices in parallel
in such a way that may result in two neighboring vertices sharing the same color.
In order to obtain a valid coloring, these algorithms must iterate to resolve coloring conflicts caused
by this ``optimistic'' parallel coloring scheme.
Another popular approach to parallel graph coloring finds maximal independent sets and colors vertices
in the same set in parallel. \cite{ccatalyurek2012graph} motivate their optimistic direction by stating that
approaches based on finding maximal independent sets are generally
less performant than approaches that optimistically color vertices, detect conflicts, and recolor.
The approach presented in \cite{ccatalyurek2012graph} is fairly straightforward, using a First-Fit
coloring to speculatively color a thread's vertices, then detecting and fixing conflicts in parallel.
While this approach shows good strong scaling, Rokos et. al found that if the {\it speculate}
phase is combined with the {\it iterate} phase, which are distinct in \cite{ccatalyurek2012graph}, a
significant speedup is seen, with no penalty to the quality of the coloring. \cite{rokos2015fast} reports
difficulties in running these algorithms on GPUs, and attributes it to the more uniform synchronicity of
GPU threads as opposed to the more unpredictable scheduling of CPU threads. Thus, these algorithms may
not be trivially portable to a GPGPU context.
\subsection{Distance-2 and Partial Distance-2}
Ta{\c{s}}, Kaya, and Saule~\cite{tacs2017greed} present an algorithm for Bipartite Graph Partial Coloring
(Partial Distance-2 Coloring) that is 4.71x faster than ColPack's implementation. Their idea is to look at the
Bipartite Graph Coloring problem as a Hypergraph coloring problem. They refer to the vertices of the hypergraph as
``pins'', and they call the hyperedges ``nets''. They point out that the literature heavily favors ``pin-based''
colorings, and opt to go with a ``net-centric'' approach, since the nets of the hypergraph define the neighborhoods
of interest.
They also use the same
ideas \todo{(which ideas? how did they achieve the speed-up over colpack?)} in a full Distance-2 coloring algorithm. Additionally they propose cost-free heuristics that
attempt to give a more balanced coloring. As opposed to traditional Partial Distance-2 colorings that
adapt Distance-2 coloring algorithms to do Partial Distance-2, Ta{\c{s}} et. al pursue a
net-centric approach \todo{in which ... explain net-centric}. A drawback of net-centric coloring conflict detection is that each net must be
fully traversed at every iteration. In practice, vertex-centric approaches are more costly than
net-centric approaches in early iterations, so they propose a somewhat adaptive approach. They show
that the best runtime performance is gained by doing net-based coloring and conflict detection in
the first few iterations, and then transitioning to a vertex-centric approach. They report an 11.38x
speedup from ColPack's sequential Partial Distance-2 algorithm, while only using 8\% more colors.
Additionally their fastest implementation is 4.12x faster than ColPack's parallel Partial Distance-2
coloring.
\section{Distributed Memory Implementations}
Gebremedhin and Manne~\cite{gebremedhin2000scalable} propose a parallel framework for graph coloring.
Essentially, their method is to speculatively color the graph, and detect coloring conflicts in parallel.Then, conflicts are resolved sequentially.
\todo{Need to describe the parallelism -- each processor colors a
non-overlapping subgraph independently, conflicts arise at boundaries between
processors' subgraphs, etc.}
An addition that they provide is based on Culberson's
Iterated Greedy Coloring Heuristic\todo{\cite{Culberson}}, which means there are two speculative coloring phases before
detecting conflicts. \todo{What is different in the second phase?} This is done because according to Culberson, the coloring obtained is guaranteed to
be at least as good, but often times uses less colors. In this coloring step, vertices are ordered by
decreasing color class from the initial speculative coloring.
\todo{Are you saying that the first coloring visits vertices arbitrarily,
and the second coloring visits them according to the color assigned in the first
coloring? Also, is ``decreasing'' with respect to the color number, or the
number of vertices with that color, or something else?}
Bozda{\u{g}} et. al.~\cite{bozdaug2008framework} proposes a framework that is specific to
distributed-memory architectures. They provide their source code in the Zoltan package of Trilinos.
They also opt for a {\it speculate and iterate} approach. Their approach assumes that the input graph
is partitioned reasonably among the processors, and they define interior vertices as vertices that have
all neighbors on the same processor, while boundary vertices are vertices with at least one remote
neighbor. The interior vertices can be colored independently, as they will not participate in any
conflicts. The coloring of boundary vertices proceeds in rounds. Each round has a tentative coloring
phase, and a conflict detection phase. The tentative coloring uses supersteps in a manner akin to a BSP
computation. \todo{I don't understand the previous sentence; what happens in each superstep? Also, I think of BSP as a paradigm that could be used in supersteps, not a particular computation.} Updated color information is sent in bulk at the end of this phase. Conflict detection \todo{detection? or resolution?} is
implemented in a way that allows each processor to make the same decision without communication. \todo{For the previous sentence, can you be more specific in the same number of words? Something like, ``Using random number
generation, processors can independently assign consistent colors to conflicted
vertices without communication.''} The
process concludes when there are no more vertices to recolor. \cite{bozdaug2008framework} explores
many minor tweaks to this approach to get the best performance.
\subsection{Distance-2 Colorings}
Bozda{\u{g}} et. al.~\cite{bozdaug2005parallel,bozdaug2010distributed} present a Distance-2 version of
the framework presented in~\cite{bozdaug2008framework}. This approach
(it is the same idea across both papers \todo{don't need to say that}) is also present in the Zoltan package of Trilinos. Instead of extending the extra vertex data
that is stored on each processor to include not only distance-1 neighborhoods but also distance-2
neighborhoods, Bozda{\u{g}} et. al use the processor that owns the intermediate vertex in a 3-vertex path
to detect distance-2 coloring conflicts. The approach is very similar to \cite{bozdaug2008framework},
except for the fact that the conflict detection phase now uses supersteps as well. Instead of using
unscheduled communication \todo{what is unscheduled communication? is it used in their previous paper?}, each processor computes a coloring schedule, so each processor knows which
neighboring processors need what coloring information at which point in time.
\todo{you might consider rephrasing the previous sentence; it is awkward.}
This means that conflict
detection only needs to worry about vertices in the current scheduling step. Additionally, processors
must participate until there are no conflicts globally, as they may be involved with an indirect
conflict. \todo{is this global participation unique to this paper or is it
true in the previous papers? It isn't clear to me which details in this
paragraph are different from the d-1 implementation.}
\section{GPU-based Implementations}
Naumov, Castonguay and Cohen~\cite{naumov2015parallel} conduct a study of two GPU-based coloring
algorithms, the Cohen-Castonguay (CC) algorithm, and the Jones-Plassmann-Luby (JPL) algorithm, \todo{in the context of a ??? application}.
Both of these algorithms are based on finding maximal independent sets. They show that the JPL algorithm
results in a smaller set of colors, and thus more application parallelism, but it is significantly
slower than the CC algorithm. Additionally, because their application presumably allows for a slightly
incomplete coloring, they do a runtime study for incomplete colorings, but find that the existing
discrepancies \todo{what discrepancies?} hold for coloring 80\% and 90\% of the graph.
Pham and Fan~\cite{pham2018efficient} propose two new algorithms and compare them to three other existing
algorithms. They propose a ``Counting-based Jones-Plassmann'' (CJP) algorithm, along with a
``Conflict Coloring'' (CC) \todo{already used CC in paragraph above} algorithm. These two algorithms encompass both {\it speculate and iterate} and
maximal independent set approaches to graph coloring. They compare to the algorithm presented in
Deveci et. al~\cite{deveci2016parallel}, the {\it csrcolor} routine from the CUSPARSE library, and
ColPack's sequential algorithm. Both the CC and CJP algorithms are faster than the other three, with
CC being the fastest of the group. \todo{remove next four words} Predictably, this means that generally CC uses more colors than CJP,\todo{; instead of ,}
however {\it csrcolor} used the most colors. \todo{how did deveci compare?} Notably, the CJP and CC algorithms only used 1.3x and 1.5x
the colors of the sequential ColPack algorithm.
Che et. al.~\cite{che2015graph} look into load-balancing an implementation of the Jones-Plassmann
algorithm for GPUs. They use a work-stealing approach presented by Cedermann and
Tsigas~\cite{cederman2012dynamic}, and modified Jones-Plassmann's random number generation to factor in
vertex degrees, so that larger degree vertices would be processed first, and then work units would be
relatively uniform. Che et. al propose doing a vertex-based Jones-Plassmann for the first few iterations,
then transitioning to traditional Jones-Plassmann. They do not attempt to answer the question of when to
switch from one approach to the other, and leave that as an open question.
\section{Hybrid Implementations}
Grosset et. al.~\cite{grosset2011evaluating} propose a CPU+GPU framework that uses a
{\it speculate and iterate} approach. They do initial graph partitioning \todo{for distributed memory? or into chunks that fit in GPU memory?} on the CPU, run coloring
on the GPU, and once the number of conflicting vertices is small enough, they resolve the remaining
conflicts on the CPU. They used four \todo{you list five in the next sentence -- or is ``SDO and LDO'' one method?} vertex ordering heuristics, including two new heuristics \todo{which are the new ones?}. They
used First Fit, SDO \todo{SDO = ??} and LDO \todo{LDO = ??}, MAX OUT, and MIN OUT. First fit is the traditional greedy heuristic, where
the color assigned is the smallest color, and the vertex ordering is not specified. SDO and LDO
allocates the smallest color(?) \todo{fix the (?)} to the vertex with the highest number of colored neighbors, and breaks
ties \todo{ties among what?} using the highest degree vertex. MAX OUT allocates the smallest color to the vertex that has the
most remote neighbors \todo{what is a remote neighbor?}, again breaking ties with the degree. MIN OUT allocates the smallest color to the
vertex that has the fewest remote neighbors, breaking ties with the degree.
Test cases used in this paper
were pretty small, it would be interesting to see behavior on larger graphs (perhaps infeasible due to
lack of MPI+GPU implementation?). \todo{fix previous sentence: run-on sentence}
Grosset et. al report that the best performance/quality tradeoff is
afforded by the parallel SDO and LDO. This approach results in less \todo{fewer} colors than the sequential First fit,
but it is also slower overall.
Sariy{\"u}ce, Saule, and {\c{C}}ataly{\"u}rek~\cite{sariyuce2012scalable} motivates the use of the
framework present in Zoltan, saying that dataflow algorithms rely on niche hardware support that is not
prevelant in HPC systems. They also note that most distributed-memory machines are only
distributed-memory at the highest logical level, and have some form of shared memory architecture at a
lower level. They propose a hybrid MPI + OpenMP implementation that builds on the Zoltan graph coloring
framework. They show that their hybrid approach has better scalability than an MPI-only approach, the reason
being that they are able to use architecture-appropriate parallelization at every
logical level. Specifically, they are able to used shared memory on compute nodes, and they can use
MPI to pass messages between compute nodes. Important things to note are that thread affinity and
scheduling has an important and often unpredictable effect on performance. Additionally, only
distance-1 coloring was implemented.
\section{Balanced Coloring}
Lu, et al~\cite{lu2015balanced} present algorithms for balanced coloring. They introduce both
{\it ab initio} and {\it guided} algorithms, and show how to parallelize the guided coloring algorithms.
{\it Ab initio} algorithms attempt to build balanced colorings from the start, as opposed to guided
algorithms, which start with an unbalanced coloring (typically First-Fit for the Culberson property \todo{Culberson property == ??}), and
then attempt to balance the colorings by either recoloring using a balance-aware algorithm, or
shuffling the vertices from over-full coloring classes to under-full coloring classes. They propose and
analyze different parallelization schemes for both approaches, and find that a parallel guided shuffling algorithm
they call ``Scheduled Reverse'' has the best performance-quality tradeoff. This approach uses a greedy First-Fit
coloring initially, and then identifies arbitrary subsets of vertices from over-full color classes. When shuffling
vertices, it tries to fill under-full bins in decreasing order of color index, which takes advantage of the
incidence property \todo{incidence property == ??} afforded by the greedy First-Fit heuristic.
\todo{Deveci's paper is cited but there is no summary for it.}
|
1,314,259,994,974 | arxiv | \section{\label{Section1}Introduction}
Neutron stars are good testing grounds for predictions of theories beyond the standard model, since they are compact enough to provide conditions necessary for exotic physics to occur \cite{Lattimer2012485,Ozel2016401,Vidana2018,Lavallaz2010}. Furthermore, they are a staple in the studies of nuclear physics, quantum chromodynamics (QCD), and general relativity (GR) \cite{Glendenning2000,Shapiro2004,Camenzind2007}.
One area of research that is currently very active in theoretical and observational astrophysics are neutron star interiors, especially with the advent of gravitational and electromagnetic wave observations among neutron star mergers \cite{Lattimer2012485,Ozel2016401,Vidana2018,Lavallaz2010}. The description of static, nonrotating neutron stars is achieved by solving the Tolman-Oppenheimer-Volkoff (TOV) equations of GR \cite{Tolman1939,Oppenheimer1939374,Caroll2004}, which are completed by an equation of state (EoS) \cite{Lattimer2012485,Ozel2016401,Haensel2007,Haensel2005,Potekhin2013}. This yields the mass-radius relations for neutron stars which can be analyzed \cite{Silbar2004892}. Realistic models of neutron stars utilize nuclear field theory in the context of the relativistic mean field theory (rMFT) in obtaining the EoS for nuclear structure, particularly at the core of the star \cite{Chin197424,Serot1997}. Moreover, several semi-empirical approaches have also been developed to describe the overall structure of the neutron star, by including its outer layers, such as the crust and/or the atmosphere \cite{Haensel2005,Potekhin2013}.
Another factor that we can consider in the studies of neutron stars are the observations and measurements of the mass-energy density of the universe which shows that majority of its mass-energy content does not come from matter that is well-described by the standard model; about $25\%$ is of the form now known as dark matter (DM) \cite{Calabrese2017,Kisslinger2019}. Strong evidence for the existence of DM using galactic rotation curves was provided by Vera Rubin, Kent Ford and Ken Freeman in the 1960s and 1970s \cite{Rubin1970a,Rubin1970b}. A favored dark matter candidate is the weakly interacting massive particle (WIMP), which is predicted by supersymmetric extensions to the standard model, and at the same time supported by N-body cosmological simulations \cite{Andreas2008,Springer2005}. Reviews on DM can be found in Ref. \cite{Kisslinger2019,Young2017,Arun2017}.
The effects of DM on neutron star structure, and other properties such as tidal deformability have been investigated in the literature, using different assumptions on the nature of the DM involved \cite{Goldman1989,Kovaris2010,Panotopoulos2017,Ellis2018,Rezaei2018,Das2019,Kain2021}. Some of these used the relativistic mean field theory (rMFT) in quantum hadrodynamics (QHD) \cite{Panotopoulos2017,Das2019}. In particular, the DM particle is assumed to be fermionic, captured and trapped inside the neutron star \cite{Panotopoulos2017,Das2019,Cline2015}. The result of this approach is that DM softens the nuclear equation of state, yielding neutron stars of lower masses than neutron stars without DM \cite{Panotopoulos2017,Das2019}. This effect of reducing neutron star masses is also supported by studies assuming that there is a DM core, together with a nuclear EoS in the middle of the star \cite{Ellis2018}.
A nuclear EoS, however is only dominant at the core of the neutron star, with densities $\rho > \rho_c \sim 10^{14}$ g/cm$^3$, while an actual neutron star can have a crust or atmosphere \cite{Haensel2005,Potekhin2013}. The neutron star can then be thought of as having a crust, with density $\rho$, surrounding the core, beginning with density $\rho_c$, such that $\rho <\rho_c$ \cite{Haensel2005,Potekhin2013}. In Ref. \cite{Das2019}, the DM-admixed nuclear EoS was added with a Baym-Pethick-Sutherland (BPS) crust \cite{Baym1971} by using a polytropic formula. In this paper we extend these studies by admixing DM at the nuclear core, and by adding three types of crust on top of the core: an ideal neutron gas (ING), the Friedman-Pandharipande-Skyrme (FPS) crust, and finally, the Skyrme Lyon (SLy) crust.
In this paper, we deal with the QHD model, the $\sigma-\omega$ or the Walecka model \cite{Chin197424} and include the Higgs fields up to order $h^2$. In the Standard Model, the Higgs fields are small fluctuations about the vacuum and higher orders of $h$ can be ignored. Given the simplicity of the Walecka model, we are able to extract the implications of putting a crust on top of the core of the star. We then extend the analysis of Ref. \cite{Panotopoulos2017} by investigating instabilities in the DM-admixed EoS, and we fix these instabilities by replacing these unstable regions, which happen to be at the low density-end of the EoS with that of crust EoS, notably first with an ING EoS, and then with the FPS EoS, and finally the SLy EoS; the latter two can be represented by semi-analytical models that describe the neutron star crust realistically \cite{Haensel2005}. The effects of these modifications to the DM-admixed EoS are then compared and studied.
We summarize the structure of this paper as follows. In Section \ref{Section2}, we discuss the modification of the Walecka model with DM. Section \ref{Section3} then deals with adding the ING crust, the FPS crust, and the SLy crust to the DM-admixed EoS. The consequences of these modifications to the neutron star structure are discussed in Section \ref{Section4}. Finally, we conclude by giving some recommendations in Section \ref{Section5}. In this paper, we work with natural units $\hbar = c = 1$ unless otherwise explicitly stated.
\section{\label{Section2}The Walecka Model Equation of State with DM}
The simplest QHD model is the $\sigma-\omega$ or Walecka model \cite{Glendenning2000,Chin197424}. It is a model describing nucleon-nucleon interaction that is mediated by exchanging $\sigma$ and $\omega$ mesons. The fields in this model are based on four particles: the nucleons (neutrons and protons) $\psi$, the scalar meson $\sigma$, and the omega vector mesons $\omega^{\mu}$, with a Lagrangian density given by
\begin{equation}
\begin{split}
\mathcal{L}_{\text{had}} =&\bar{\psi} \left[i \gamma_{\mu} \left(\partial^{\mu} + ig_{\omega}\omega^{\mu} \right)-\left(m_n - g_{\sigma}\sigma \right) \right]\psi\\
&+ \frac{1}{2} \left(\partial_{\mu}\sigma \partial^{\mu}\sigma -m_{\sigma}^2 \sigma^2 \right) - \frac{1}{4} \omega_{\mu \nu}\omega^{\mu \nu} + \frac{1}{2}m_{\omega}^2\omega_{\mu}\omega^{\mu}, \label{eq. 1}
\end{split}
\end{equation}
where $\omega^{\mu\nu} = \partial^{\mu}\omega^{\nu} - \partial^{\nu}\omega^{\mu}$, $m_n \approx 1$ GeV is the mass of the nucleon (or neutron), $m_{\sigma} = 520$ MeV is the mass of the $\sigma$ meson, $m_{\omega} = 783$ MeV is the mass of the $\omega$ meson, and the dimensionless coupling constants are $g_{\omega}^2 = 190.4$ for the $\omega$ meson coupled to the four-current $\bar{\psi}\gamma^{\mu}\psi$ and $g_{\sigma}^2 = 109.6$ for the $\sigma$ meson coupled with the baryon scalar density $\bar{\psi}\psi$ \cite{Serot1997,Panotopoulos2017}.
Let us now consider a DM particle with mass $M_{\chi} = 200$ GeV which would be the lightest supersymmetric neutralino \cite{Martin2010}. The fermionic DM Lagrangian density is given by
\begin{equation}
\begin{split}
\mathcal{L}_{\text{DM}} =& \bar{\chi}\left[i\gamma^{\mu}\partial_{\mu} - M_{\chi} + yh \right]\chi \\
&+ \frac{1}{2}\partial_{\mu}h\partial^{\mu}h - \frac{1}{2}M_h^2h^2 + f \frac{m_n}{v}\bar{\psi}h\psi, \label{eq. 2}
\end{split}
\end{equation}
where we have the Higgs boson $h$ with mass $M_h = 125$ GeV, a DM-Higgs Yukawa coupling $y$, and a nucleon-Higgs Yukawa coupling $fm_n/v$, where $v = 246$ GeV is the Higgs vacuum expectation value, and $f=0.3$ parametrizes the Higgs-nucleon coupling \cite{Martin2010,Murakami2001,Panotopoulos2017,Das2019}. Very stringent constraints on the DM-nucleon interaction for DM masses above $6$ GeV are given by recent DM direct detection experiments \cite{Akerib2017,Cui2017,Aprile2018}. We then consider a negligible DM-nucleon coupling and did not include this term in Eq. (\ref{eq. 2}) \cite{Gresham2019,Nelson2019}. The total Langrangian density for the DM-admixed system is then
\begin{equation}
\mathcal{L} = \mathcal{L}_{\text{had}} + \mathcal{L}_{\text{DM}}. \label{eq. 3}
\end{equation}
In rMFT, the system is assumed to be uniform in its ground state, and the fields in the Lagrangian are replaced by their mean values \cite{Glendenning2000}, that is, $\sigma \rightarrow \langle \sigma \rangle$, $\omega_{\mu} \rightarrow \langle \omega_{\mu}\rangle$, and $h \rightarrow \langle h\rangle$. The equations of motion then become
\begin{equation}
\begin{split}
m_{\sigma}^2 \langle \sigma\rangle &= g_{\sigma}\langle \bar{\psi}\psi\rangle\\
m_{\omega}^2 \langle \omega_{\mu} \rangle &= g_{\omega}\langle \bar{\psi}\gamma_{\mu}\psi\rangle\\
\left[\gamma_{\mu}(i\partial^{\mu} - g_{\omega}\langle \omega^{\mu}\rangle) - m_n^*\right]\psi(x) &= 0\\
M_h^2\langle h \rangle &= y \langle \bar{\chi}\chi\rangle + f\frac{m_n}{v}\langle \bar{\psi}\psi\rangle\\
\left[i\gamma^{\mu}\partial_{\mu} - M_{\chi}^* \right]\chi(x)&=0, \label{eq. 4}
\end{split}
\end{equation}
where the effective masses are given by
\begin{equation}
\begin{split}
M_{\chi}^* \equiv& M_\chi -y\langle h\rangle\\
\quad m_n^* \equiv& m_n - g_{\sigma}\langle\sigma\rangle - f\frac{m_n}{v}\langle h\rangle. \label{eq. 5}
\end{split}
\end{equation}
Defining the following dimensionless quantities to increase the efficiency of our numerical calculations:
\begin{equation}
\tilde{p} = \frac{p}{m_n}; \quad \varphi = \frac{p_F}{m_n}; \quad \phi = \frac{p_F^{DM}}{m_n}, \label{eq. 6}
\end{equation}
\begin{equation}
\tilde{\sigma} = \frac{g_{\sigma}\langle\sigma\rangle}{m_n}; \quad \tilde{\omega}_0=\frac{g_{\omega}\langle\omega_0\rangle}{m_n}; \quad \tilde{h} = \frac{Y\langle h\rangle}{m_n}; \quad Y = \frac{fm_n}{v}, \label{eq. 7}
\end{equation}
\begin{equation}
\tilde{\epsilon} = \frac{\epsilon}{\epsilon_0}; \quad \tilde{P} = \frac{P}{\epsilon_0}; \quad \text{where} \,\, \epsilon_0 = \frac{m_n^4}{3\pi^2}, \label{eq. 8}
\end{equation}
where $p$ is the particle momentum, $p_F$ is the nucleon Fermi momentum, $p_F^{DM}$ is the DM Fermi momentum, $\epsilon$ is the energy density, and $P$ is the pressure, then the mean fields for the DM-admixed Walecka model become
\begin{equation}
\tilde{\sigma} = \frac{g_{\sigma}^2m_n^2}{m_{\sigma}^2\pi^2}\int_0^{\varphi}d\tilde{p}\tilde{p}^2\frac{1-\tilde{\sigma}-\tilde{h}}{\sqrt{\tilde{p}^2 + (1-\tilde{\sigma}-\tilde{h})^2}}, \label{eq. 9}
\end{equation}
\begin{equation}
\tilde{\omega}_0= \frac{g_{\omega}^2m_n^2}{m_{\omega}^2\pi^2}\frac{\varphi^3}{3}, \label{eq. 10}
\end{equation}
\begin{equation}
\begin{split}
\tilde{h} =& \frac{yYm_n^2}{M_h^2\pi^2}\int_0^{\phi}d\tilde{p}\tilde{p}^2\frac{\left(\frac{M_{\chi}}{m_n}-\frac{y}{Y}\tilde{h}\right)}{\sqrt{\tilde{p}^2+\left(\frac{M_{\chi}}{m_n}-\frac{y}{Y}\tilde{h}\right)^2}}\\
&+ \frac{Y^2m_n^2}{M_h^2\pi^2}\int_0^{\varphi}d\tilde{p}\tilde{p}^2\frac{(1-\tilde{\sigma}-\tilde{h})}{\sqrt{\tilde{p}^2+(1-\tilde{\sigma}-\tilde{h})^2}}. \label{eq. 11}
\end{split}
\end{equation}
The dimensionless, parametric, DM-admixed Walecka (or $\sigma$-$\omega$-DM) EoS of the form $\epsilon(P)$ is then written as
\begin{equation}
\begin{split}
\tilde{\epsilon} =& \frac{1}{\epsilon_0}\Bigg[\frac{1}{2}\left(\frac{m_{\sigma}m_n}{g_{\sigma}}\right)^2\tilde{\sigma}^2+ \frac{1}{2}\left(\frac{m_{\omega}m_n}{g_{\sigma}}\right)^2\tilde{\omega}_0^2\\
&+ \frac{1}{2}\left(\frac{M_hm_n}{Y}\right)^2\tilde{h}^2 + \frac{m_n^4}{\pi^2}\int_0^{\varphi}d\tilde{p}\tilde{p}^2 \sqrt{\tilde{p}^2 + (1-\tilde{\sigma}-\tilde{h})^2}\\
&+ \frac{m_n^4}{\pi^2}\int_0^{\phi}d\tilde{p}\tilde{p}^2 \sqrt{\tilde{p}^2 + \left(\frac{M_{\chi}}{m_n}-\frac{y}{Y}\tilde{h}\right)^2}\Bigg], \label{eq. 12}
\end{split}
\end{equation}
\begin{equation}
\begin{split}
\tilde{P} =& \frac{1}{\epsilon_0}\Bigg[-\frac{1}{2}\left(\frac{m_{\sigma}m_n}{g_{\sigma}}\right)^2\tilde{\sigma}^2 + \frac{1}{2}\left(\frac{m_{\omega}m_n}{g_{\sigma}}\right)^2\tilde{\omega}_0^2\\
&- \frac{1}{2}\left(\frac{M_hm_n}{Y}\right)^2\tilde{h}^2 + \frac{m_n^4}{3\pi^2}\int_0^{\varphi}d\tilde{p}\frac{\tilde{p}^2}{\sqrt{\tilde{p}^2 + (1-\tilde{\sigma}-\tilde{h})^2}}\\
&+ \frac{m_n^4}{3\pi^2}\int_0^{\phi}d\tilde{p}\frac{\tilde{p}^4}{\sqrt{\tilde{p}^2 + \left(\frac{M_{\chi}}{m_n}-\frac{y}{Y}\tilde{h}\right)^2}} \Bigg]. \label{eq. 13}
\end{split}
\end{equation}
To numerically solve the EoS, we first solve simultaneously for the mean fields Eqs. (\ref{eq. 9})-(\ref{eq. 11}), for a range of hardron Fermi momenta $p_F$, and for a given DM Fermi momentum $p_F^{DM}$, before substituting these to the EoS. We take the values of the DM Fermi momenta to be $p_F^{DM} = 0.02 \,\text{GeV}, \, 0.04 \,\text{GeV}, \, 0.06 \,\text{GeV}$, in accordance with existing literature \cite{Das2019}, which also evades constraints from DM search experiments.
Figure \ref{figure DM P(e)2 Equations of State} shows the $\sigma$-$\omega$-DM EoS plots for different values of the DM Fermi momentum. The effect of DM indeed is to ``soften" the EoS, that is, to shift the EoS towards higher energy density values corresponding to pressure values, albeit very slightly.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_DM_Pe_2_Equations_of_State}
\caption{\label{figure DM P(e)2 Equations of State}$\sigma$-$\omega$-DM $P(\epsilon)$ EoS. This plot confirms the results of Ref. \cite{Panotopoulos2017}.}
\end{figure}
The EoS can also be visualized in different, equivalent forms, and these are shown in Figure \ref{figure DM P(V) Equations of State}. We now analyze the EoS based on Figure \ref{figure DM P(V) Equations of State}. Notice that the $\sigma$-$\omega$-DM EoS has negative values of pressure, corresponding to unstable regions in the low-pressure regime. We see that the original $\sigma$-$\omega$ EoS is akin to the van der Waals EoS \cite{Schroeder2000}. The EoS is unstable because for a system in thermodynamic equilibrium to be stable, the pressure should not increase with the volume. One can get rid of the instabilities in the original Walecka EoS (corresponding to $p_F^{DM} = 0$) via Maxwell construction \cite{Schroeder2000}, as done in Ref. \cite{Chin197424}, by finding a critical pressure at which the ``well" and ``hill" regions have equal areas. However, the same procedure cannot be done for nonzero $p_F^{DM}$, since the wells and hills, even at a critical pressure of zero, cannot have equal areas. We therefore seek for another way of fixing these unstable regions.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_DM_PV_Equations_of_State}
\caption{\label{figure DM P(V) Equations of State}$\sigma$-$\omega$-DM $P(V)$ EoS. The plot labeled with $p_F^{DM} = 0$ GeV corresponds to the unmodified Walecka EoS.}
\end{figure}
\section{\label{Section3}The DM-Admixed EoS with Crust}
A remedy for the instability problem presented in Section \ref{Section2} is to replace the unstable regions in the EoS. This can be done by replacing the low-pressure (low-density) regions with another EoS that better describes it, similar to an atmosphere or crust. The underlying assumption for this is that our DM is trapped only inside the core of the neutron star; this means that the crust contains a negligible amount of DM particles.
We first deal with the DM-admixed Walecka EoS with the addition of an ideal neutron gas (ING) crust, a simple model for a neutron star atmosphere. The EoS $\epsilon_{\text{ING}}(P_{\text{ING}})$ of the ING EoS, in dimensionless form, is given by
\begin{equation}
\tilde{\epsilon}_{\text{ING}} = 3\int_0^{\varphi} \tilde{p}^2\sqrt{1+\tilde{p}^2}d\tilde{p}, \label{eq. 14}
\end{equation}
\begin{equation}
\tilde{P}_{\text{ING}} = 3\int_0^{\varphi} \tilde{p}^2\left(\sqrt{1+\varphi^2} - \sqrt{1+\tilde{p}^2} \right)d\tilde{p}. \label{eq. 15}
\end{equation}
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_DM_Dimensionless_Equations_of_State}
\caption{\label{figure DM Dimensionless Equations of State}$\sigma$-$\omega$-DM Dimensionless Equation of State}
\end{figure}
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_dimensionless_ING-sigma-omega-DM_EoS}
\caption{\label{figure dimensionless ING-sigma-omega-DM EoS}ING-$\sigma$-$\omega$-DM Dimensionless Equation of State}
\end{figure}
We now plot the ING EoS, superimposed on the $\sigma$-$\omega$-DM EoS. The result is shown in Figure \ref{figure DM Dimensionless Equations of State}. We see that the ideal neutron gas EoS crosses all the $\sigma-\omega$ EoS early on at very low pressures. Since the nuclear EoS is only dominant in the star's core, at high densities, we replace the unstable region of the $\sigma$-$\omega$-DM EoS at low pressure and energy density with the ING EoS up until the point of intersection between the two equations of state, to ensure continuity in the EoS. The resulting ING-$\sigma$-$\omega$-DM EoS is shown in Figure \ref{figure dimensionless ING-sigma-omega-DM EoS}. We can then use this resulting EoS to model our neutron star.
The more realistic equations of state that can model the neutron star crust are the FPS and SLy EoS. Both models are comparable in their modelling of the crust EoS, with the primary difference being their modelling of the crust-core interface. The FPS EoS model takes into account exotic nuclear shapes near the interface, while the SLy EoS models the interface as a small phase transition \cite{Haensel2007, Haensel2005}. The semi-analytical representations of FPS and SLy EoS can model all the regions of the neutron star interior \cite{Haensel2005}. In this study, we use the FPS and SLy EoS to model the crust, as our nuclear core is DM-admixed, which was not considered in Ref. \cite{Haensel2007, Haensel2005}. The parametrization for nonrotating stars \cite{Haensel2005} is given by
\begin{equation}
\begin{split}
\log P =&\frac{a_1 + a_2 \log \epsilon + a_3 (\log \epsilon)^3}{1 + a_4\log \epsilon}f_0(a_5(\log \epsilon - a_6))\\
& + (a_7 + a_8\log \epsilon)f_0(a_9(a_{10}- \log \epsilon))\\
&+(a_{11} + a_{12}\log \epsilon)f_0(a_{13}(a_{14}-\log \epsilon))\\
&+ (a_{15} + a_{16}\log \epsilon)f_0(a_{17} (a_{18} -\log\epsilon)), \label{eq. 16}
\end{split}
\end{equation}
where the units for $P$ and $\epsilon$ are in $\text{dyne/cm}^2$ and $\text{g/cm}^3$, respectively, the $a_i$ are fitting constants, and the function $f_0(x)$ is defined as
\begin{equation}
f_0(x) = \frac{1}{e^x +1}. \label{eq. 17}
\end{equation}
The values of $a_i$, taken from Ref. \cite{Haensel2005}, are given in Table \ref{Table1}.
\begin{table}[hbt!]
\caption{\label{Table1}Fitting Parameters for the FPS and SLy EoS \cite{Haensel2005}}
\begin{ruledtabular}
\begin{tabular}{c|c|c|c|c|c}
i & $a_i$ FPS & $a_i$ SLy & i & $a_i$ FPS & $a_i$ SLy\\
\hline
1 & 6.22 & 6.22 & 10 & 11.8421 & 11.4950\\
2 & 6.121 & 6.121 & 11 & -22.003 & -22.775\\
3 & 0.006004 & 0.005925 & 12 & 1.5552 & 1.5707\\
4 & 0.16345 & 0.16326 & 13 & 9.3 & 4.3\\
5 & 6.50 & 6.48 & 14 & 14.9 & 14.08\\
6 & 11.8440 & 11.4971 & 15 & 23.73 & 27.80\\
7 & 17.24 & 19.105 & 16 & -1.508 & -1.653\\
8 & 1.065 & 0.8938 & 17 & 1.79 & 1.50\\
9 & 6.54 & 6.54 & 18 & 15.13 & 14.67
\end{tabular}
\end{ruledtabular}
\end{table}
Similar with the procedure done in the case of the ING EoS, we replace the unstable regions of the $\sigma$-$\omega$-DM EoS with the FPS or SLy EoS, right until the points of intersections of the EoS. The resulting plots are shown in Figure \ref{figure dimensionless FPS-sigma-omega-DM EoS} for the FPS-$\sigma$-$\omega$-DM EoS and Figure \ref{figure dimensionless SLy-sigma-omega-DM EoS} for the SLy-$\sigma$-$\omega$-DM EoS.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_dimensionless_FPS-sigma-omega-DM_EoS}
\caption{\label{figure dimensionless FPS-sigma-omega-DM EoS}FPS-$\sigma$-$\omega$-DM Dimensionless Equation of State}
\end{figure}
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_dimensionless_SLy-sigma-omega-DM_EoS}
\caption{\label{figure dimensionless SLy-sigma-omega-DM EoS}SLy-$\sigma$-$\omega$-DM Dimensionless Equation of State}
\end{figure}
We observe from our ``crust" equations of state (ING, FPS, and SLy) that the ING EoS is the softest among the three, and so intersects with the $\sigma$-$\omega$-DM EoS at lower values of pressure, than with the FPS and SLy EoS. Meanwhile, the FPS and SLy EoS produce comparable EoS when combined with the $\sigma$-$\omega$-DM EoS. In the next section, we will compare the mass-radius relations for the neutron stars obtained from all of these EoS.
\section{\label{Section4}The Structure Equations and Mass-Radius Relations}
Using the dimensionless quantities for $\epsilon$ and $P$ defined in Eq. (8) as well as the following:
\begin{equation}
\tilde{M} = \frac{M}{M_{\odot}}, \quad \tilde{r} = \frac{r}{R_0}, \quad R_0 = GM_{\odot}, \quad \Omega = \frac{4\pi\epsilon_0}{M_{\odot}}R_0^3,
\end{equation}
where $M$ is the mass, $r$ is the distance from the center of the star, and $M_{\odot}$ is the solar mass, we can write the TOV equations in dimensionless form as
\begin{equation}
\frac{d\tilde{P}}{d\tilde{r}} = \frac{-\left[\tilde{\epsilon}+\tilde{P}
\right]\left[\tilde{M}+\Omega\tilde{r}^3\tilde{P}\right]}{\tilde{r}^2 - 2\tilde{M}\tilde{r}},
\end{equation}
\begin{equation}
\frac{d\tilde{M}}{d\tilde{r}} = \Omega\tilde{r}^2\tilde{\epsilon},
\end{equation}
with the conditions
\begin{equation}
\begin{split}
\tilde{P}(\tilde{r}=0)=\tilde{P}_c, \quad \tilde{P}(r=R_{\star}) = 0\\
\tilde{M}(\tilde{r}=0)=0, \quad \title{M}(r=R_{\star}) = M_{\star},
\end{split}
\end{equation}
where $P_c$ is the central pressure and $R_{\star}$ is the stellar radius. The TOV equations describe static, spherically symmetric, nonrotating stars in GR \cite{Glendenning2000,Camenzind2007,Caroll2004}. The modified EoS from Section \ref{Section3} are fed into the TOV equations and solved numerically using the forward Euler method over a range of central pressures $\tilde{P}_c$. The initial condition is such that the pressure is greatest at the center of the star, and reaches zero at the star's edge, defining the stellar radius at $r=R_{\star}$. Meanwhile, the equation for the mass is cumulative, such that it reaches the stellar mass $M = M_{\star}$ at $R_{\star}$. For a range of central pressures, we can then form a parametric relation between $M_{\star}$ and $R_{\star}$, known as the mass-radius relation of the star. We now investigate in this section the effects of three modified EoS that we obtained in Section \ref{Section3} to the mass-radius relations of neutron stars.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_MR_relation_sigma-omega-DM}
\caption{\label{figure MR relation sigma-omega-DM}Mass-Radius Relation, $\sigma$-$\omega$-DM EoS}
\end{figure}
As a reference, we also obtain the mass-radius relations for the $\sigma$-$\omega$-DM model for different values of $p_F^{DM}$, without any crust, which is similar to the results of Ref. \cite{Panotopoulos2017}. These are shown in Figure \ref{figure MR relation sigma-omega-DM}. Meanwhile, the mass-radius relations for the ING-$\sigma$-$\omega$-DM EoS are shown in Figure \ref{figure MR relation ING-sigma-omega-DM}. Note that the addition of the ING EoS produces significant changes to the mass-radius relations, by increasing the radii of the neutron star corresponding to the mass. However, the effect of DM remains generally the same: to ``shrink" the neutron star, by producing stars of lower masses and smaller radii as the value of $p_F^{DM}$ gets larger. We also note that the limiting/maximum mass of the neutron star increases by small amounts for the ING-$\sigma$-$\omega$-DM EoS. It is also interesting to see that for some constant radius (starting at around $15$ km), the masses of the neutron stars are all the same for different $p_F^{DM}$. For a constant given $p_F^{DM}$, the mass then decreases with increasing radius (starting at around $15$ km, or $0.4 M_{\odot}$).
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_MR_relation_ING-sigma-omega-DM}
\caption{\label{figure MR relation ING-sigma-omega-DM}Mass-Radius Relation, ING-$\sigma$-$\omega$-DM EoS}
\end{figure}
The mass-radius relations for the FPS-$\sigma$-$\omega$-DM EoS are shown in Figure \ref{figure MR relation FPS-sigma-omega-DM}. The changes to the mass-radius relations from the neutron star without crust are also substantial, and different from that of the ING-$\sigma$-$\omega$-DM EoS, especially for greater values of $p_F^{DM}$. Each plot for every nonzero $p_F^{DM}$ for the FPS-crusted mass-radius relations in Figure \ref{figure MR relation FPS-sigma-omega-DM} intersects that of the original $\sigma$-$\omega$-DM EoS ($p_F^{DM} = 0$). The stellar masses on the right side of the intersection point are increased from that without DM as a function of increasing $p_F^{DM}$, while the masses decrease in the left of the intersection point as a function of increasing $p_F^{DM}$. We also note that in the absence of DM, that is, at $p_F^{DM} = 0$, the mass-radius relation for both the FPS- and ING- crusted neutron stars yield more or less the same maximum masses.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_MR_relation_FPS-sigma-omega-DM}
\caption{\label{figure MR relation FPS-sigma-omega-DM}Mass-Radius Relation, FPS-$\sigma$-$\omega$-DM EoS}
\end{figure}
Finally, Figure \ref{figure MR relation SLy-sigma-omega-DM} shows the mass-radius relations for the SLy-$\sigma$-$\omega$-DM EoS. Because the FPS and SLy EoS are quite similar in nature, the mass-radius relations with the SLy crust are comparable with those containing the FPS crust. The changes to the mass-radius relations are also significant for large values of $p_F^{DM}$.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_MR_relation_SLy-sigma-omega-DM}
\caption{\label{figure MR relation SLy-sigma-omega-DM}Mass-Radius Relation, SLy-$\sigma$-$\omega$-DM EoS}
\end{figure}
From the mass-radius relations, we can then obtain the maximum or limiting masses and corresponding limiting radii for the neutron star for the different EoS, and also for varying values of $p_F^{DM}$. The results are summarized in Table \ref{Table2}.
\begin{table*}[hbt!]
\caption{\label{Table2} Maximum Masses and Limiting Radii among various EoS}
\begin{ruledtabular}
\begin{tabular}{c|cc|cc|cc|cc}
& \multicolumn{2}{c|}{\bf{$\sigma$-$\omega$-DM}} & \multicolumn{2}{c|}{\bf{ING-$\sigma$-$\omega$-DM}} & \multicolumn{2}{c|}{\bf{FPS-$\sigma$-$\omega$-DM}} &\multicolumn{2}{c}{\bf{SLy-$\sigma$-$\omega$-DM}} \\
$p_F^{DM}$ (GeV) & $M_{\text{lim}}/M_{\odot}$ & $R_{\text{lim}}$ (km) & $M_{\text{lim}}/M_{\odot}$ & $R_{\text{lim}}$ (km) & $M_{\text{lim}}/M_{\odot}$ & $R_{\text{lim}}$ (km) & $M_{\text{lim}}/M_{\odot}$ & $R_{\text{lim}}$ (km)\\
\hline
0 & 2.827 & 13.039 & 2.829 & 13.454 & 2.827 & 13.512 & 2.827 & 13.533\\
0.02 & 2.785 & 12.836 & 2.786 & 13.279 & 2.785 & 13.567 & 2.785 & 13.593\\
0.04 & 2.541 & 11.490 & 2.544 & 11.970 & 2.542 & 12.532 & 2.543 & 12.657\\
0.06 & 2.124 & 9.447 & 2.128 & 10.006 & 2.138 & 10.899 & 2.155 & 11.220
\end{tabular}
\end{ruledtabular}
\end{table*}
Another way to analyze the mass-radius relations is to plot the stellar mass $M_{\star}$ as a function of the DM Fermi momentum $p_F^{DM}$, for some constant stellar radius $R_{\star}$. The result for the SLy-crusted star is shown in Figure \ref{figure MvsPf SLy-sigma-omega-DM}. Each line in Figure \ref{figure MvsPf SLy-sigma-omega-DM} corresponds to one fixed radius $R_{\star}$ corresponding to different masses $M_{\star}/M_{\odot}$ as the $p_F^{DM}$ increases. The radii are separated by an interval of $0.1$ km, and each line changes shape for every value of $R_{\star}$. Lines that start from the left at $M_{\star}/M_{\odot} \lesssim 1.0$ correspond to different radii greater than $16$ km. Lines that start at $M_{\star}/M_{\odot} \gtrsim 1.5$ correspond to different radii lower than $15$ km. The different lines of constant radius tend to approach a value of $M_{\text{con}}/M_{\odot} \simeq 1.3$, as $p_F^{DM}$ increases, and this mass corresponds to radius $R_{\star} \simeq 15 \,\, \text{to} \,\, 16$ km, and central pressure $\tilde{P}_c \simeq 0.01 \,\, \text{to} \,\, 0.03$. From this, we can speculate that a DM-admixed compact object, which may not necessarily be a neutron star, could potentially exist, with size (mass and radius) that is conducive to a wide range of values of the DM Fermi momentum $p_F^{DM}$. We emphasize that the DM in the star, as previously mentioned, does not interact with the nucleons, and only interacts with the Higgs particle. This star may be comprised mostly of DM and Higgs particles but not yet detectable by current observational means.
The same behavior is observed for the FPS-crusted star, albeit with slightly different values of $M_{\text{con}}$ and corresponding $R_{\star}$. Finally, for the ING-crusted star, the masses seemingly converge at $M_{\text{con}}/M_{\odot} \lesssim 0.4$ with increasing $p_F^{DM}$; this is readily observed in Figure \ref{figure MR relation ING-sigma-omega-DM}. This mass is way below that of neutron stars but may possibly indicate a compact object which accomodates a wide range of $p_F^{DM}$ whose radius is around $15$ km and larger.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.43]{figure_MvsPf_SLy-sigma-omega-DM}
\caption{\label{figure MvsPf SLy-sigma-omega-DM}$M_{\star}/M_{\odot}$ as a function of $p_F^{DM}$, SLy-$\sigma$-$\omega$-DM EoS}
\end{figure}
\section{\label{Section5}Conclusions}
In this work, we extended the investigation of DM-admixed neutron stars by confining the DM in the star's core and by adding a crust on the core. This simulates a neutron star with a crust, and with a core dominated by the nuclear equation of state, which, in this case, was the Walecka model added with DM, which was obtained via relativistic mean field theory.
Three types of crust were considered: the ideal neutron gas, FPS, and SLy crusts. These crust equations of state were used to replace the instabilities in the $\sigma$-$\omega$-DM EoS, corresponding to negative values of the pressure at the lower density regime. The resulting mass-radius relations are markedly different from neutron stars without crust \cite{Panotopoulos2017}. DM effects are primarily responsible for decreasing the star mass, while the main effect of the crust is to increase the star radius. We also note that, with or without the crust, both the maximum mass and limiting radius of the neutron star progressively decreases as the DM Fermi momentum $p_F^{DM}$ increases in value (see Table \ref{Table2}). We also speculate the possibility of a compact object, containing a DM core and a crust, existing with a mass and radius that accomodate a wide range of values for $p_F^{DM}$.
One can then extend this study by using more complicated models for the nuclear equation of state, to address the limitations of the Walecka model. The effects of DM may also be investigated on the star's crust, taking into account the relative amounts of DM that must be present in either the core or the crust. Observations of neutron stars and neutron star mergers may also give constraints on the parameters of DM and the models used in this study.
\section*{\label{Acknowledgments}Acknowledgments}
A. G. Abac wishes to acknowledge the support of the Department of Science of Technology - Accelerated Science and Technology Human Resource Development Program during the course of this study.
%
%
\nocite{*}
|
1,314,259,994,975 | arxiv | \section{Introduction}
Metals occur naturally in the earth's crust, and their contents in the environment influence the ecologies in many habitats~\cite{morais2012heavy}. Among various metals, heavy atoms and their ions with high atomic weights and large densities are found to be toxic to the human body even when present in trace amounts in various environmental matrices~\cite{jaishankar2014toxicity}. This has lead to growing public health concerns about heavy metal pollution. Non-biodegradable characteristics of these elements have the capability of causing detrimental effects to the entire biodiversity~\cite{tovar2018heavy, nagajyoti2010heavy}. The high solubility of heavy ions leads to contamination of natural resources such as water and soil, which as a consequence gets accumulated in organisms and enters the food chain leading to a process of biomagnification~\cite{ali2019trophic}. Excessive exposure of Zn can lead to brain, respiratory and gastrointestinal syndrome~\cite{plum2010essential}. Cd species can cause skeletal damage as a secondary response to kidney damage or direct action on the bone cells, whereas Hg species being carcinogenic cause adverse effects on the development of human brain~\cite{mahurpawar2015effects}. Hematopoietic, renal, reproductive, and central nervous systems are vulnerable towards the dangers caused by exposure to the high level of Pb species~\cite{assi2016detrimental}. {The primary sources of these elements are various industrial activities, natural resources, agriculture, and untreated disposal of domestic waste~\cite{nagajyoti2010heavy, wuana2011heavy}.} Therefore, accurate and accessible detection of these toxic elements is necessary to ensure environmental quality control and early warning capabilities to avoid public safety adversity.
Detection of these elements with various conventional materials like clay, its minerals, zeolites, activated carbon, fullerenes, biomaterials, etc., has been done previously~\cite{uddin2017review, hong2019heavy, burakov2018adsorption}. Further, nanomaterials show great technological advances in a wide range of applications due to extraordinary properties as compared to their bulk counterparts~\cite{poole2003introduction}. The rapid growth of nanomaterials for various applications has seen a boost after the discovery of graphene. Many breakthroughs in the research of graphene have been observed in the last decade due to its large surface-to-volume ratio, thin structure, and interface interactions. Graphene and graphene-based nanostructures render unique mechanical, electrical, optical, and thermal properties~\cite{papageorgiou2017mechanical, phiri2018comparative, fan2014thermal, falkovsky2008optical} that have significantly made this material as one of the most studied two-dimensional (2D) material in condensed matter physics contributing in various applications like electrochemical devices, solar cells, plasmonic, purifiers, sensors etc.~\cite{kavan2013application, nguyen2016promising, grigorenko2012graphene, dervin20162d, mao2014nanocarbon}. Besides this, one dimensional (1D) allotrope of carbon; single-walled carbon nanotubes (SWCNT) with diameter less than 50 nanometers (nm) having different configurations exhibit similar properties as that of single-layer graphene~\cite{torres2017mesoscale}.
It has been observed both experimentally and theoretically that adsorption technology can monitor trace amounts of heavy metals. Chemical adsorption of adsorbate on a graphene-based system can modify its properties, providing a non-reversible binding of the atom or molecule to the surface. Therefore, physical adsorption is always preferred due to its reversible nature. Graphene and carbon nanotubes (CNT) have been extensively explored for physical adsorption of some of the heavy ions, dye molecules, and hydrogen molecules~\cite{shtepliuk2017interaction, yusuf2015applications, niemann2008nanomaterials, henwood2007ab} for sensor applications. Even now, the interaction studies for physisorption of heavy elements with graphene and CNT have been done theoretically and calculations are performed using Density functional theory (DFT)~\cite{ou2015physisorption, mashhadzadeh2018dft, petrushenko2019hydrogen, lazic2005role, silvestrelli2012adsorption}. Abdesalam \textit{et al.}~\cite{abdelsalam2019first} and Shtepliuk \textit{et al.}~\cite{shtepliuk2017interaction} studied the adsorption of toxic heavy elements on graphene-based system. However, a study by Oyetade \textit{et al.}~\cite{oyetade2017experimental} showed nitrogen-functionalized carbon nanotubes as a good reusable adsorbent for the removal of Pb$^{+2}$ and Zn$^{+2}$ from wastewater.
Other studies for physical adsorption of microparticles with the material given by generalized {Lifshitz} theory have been conducted using \textit{ab initio} calculations~\cite{jiang1984dispersion, rauber1982substrate, zaremba1976van}. The theory explains the interactions of atoms or molecules with material walls in both retarded and non retarded regimes giving rise to Casimir-Polder and van der Waals (vdW) forces~\cite{klimchitskaya2020casimir, bordag2006lifshitz, bordag2009advances}. These forces find diverse applications in circuit technology, adsorption, quantum reflections, and Bose condensation~\cite{bordag2009advances, lin2004impact, bezerra2008lifshitz, zaremba1976van, tao2014physical}. {Lifshitz} theory gave a generalization of both these interaction forces in which the strength of the attractive forces is expressed in terms of dispersion $C_3$ coefficient~\cite{caride2005dependences}. Dispersion coefficients have been calculated for a number of material walls, including metals, semiconductors, insulators, and dielectrics, by taking the optical properties into account~\cite{caride2005dependences, arora2014van, derevianko1999high, lach2010noble, dutt2020van, blagov2007van, kaur2016dispersion}. These dispersion coefficients were also measured experimentally using atomic force microscopy (AFM) and spectroscopy techniques~\cite{fichet2007exploring, lepoutre2009dispersive, lonij2011can, schneeweiss2012dispersion}. Such studies were reported for applications in hydrogen sensing, storage and designing an up-gradation technology for batteries~\cite{blagov2005van, blagov2007van, bordag2006lifshitz}.
In the present work, {we particularly focus on} the interaction of heavy elements with carbon-based systems - graphene and CNT which are considered as two-dimensional free-electron gas. Reflection coefficients of these materials are important contributors to the calculation of the dispersion coefficients. {Out of the models proposed in the literature for the evaluation of reflection coefficients, the Dirac model approach is preferred due to its providing results in close agreement with experiment~\cite{klimchitskaya2015comparison}.} Previously, studies conducted were based on this approach for the interaction of alkali atoms, alkaline ions, noble gas molecules, hydrogen atom, and hydrogen molecule with graphene and CNT wall~\cite{kaur2015dispersion, bordag2006lifshitz}. Accurate values of the polarizability of microparticles at imaginary frequencies are necessary to compute $C_3$ coefficients between the microparticle and the material wall given by generalized Lifshitz theory. In this paper, we have calculated the $C_3$ dispersion coefficients for interaction of microparticle with graphene and CNT wall along with the evaluation of static and dynamic polarizabilities of heavy ions and atoms at imaginary frequencies using the sum-over-states approach. There are a few studies that have reported only static polarizabilities. Most of these have used non-relativistic methods but for such heavy elements it is necessary to adopt a relativistic approach as we have done in the present work for the reliable calculations of atomic properties.
The outline of the paper is as follows. In Sec~\ref{Sec II}, we give a brief overview of the theory. Sec.~\ref{Sec III} contains the evaluated values of static dipole polarizability of heavy ions and atoms. The dynamic dipole polarizabilities for ions and atoms are also presented in the same section. In addition to this, the dispersion coefficients between considered ions or atoms and materials have been discussed. We have also compared the results of dispersion coefficients for graphene and CNT. The dependency of the gap parameter on interaction coefficients is also discussed in this section. Atomic units (a.u.) have been used throughout the paper unless stated otherwise.
\section{Theory}\label{Sec II}
\subsection{Dispersion coefficient}
Generalized Lifshitz formula for the non-retarded vdW interaction energy of atoms or molecules with graphene and CNT wall using proximity force approximation (PFA) can be written in terms of dispersion coefficients $C_3$ for a separation distance $a$ in the following form ~\cite{churkin2011dispersion}
\begin{equation}
E(a)= -\frac{C_3 (a)}{a^3}.
\end{equation}
The $C_3$ coefficient due to interaction between graphene and microparticle is expressed in the terms of reflection coefficients $r_{TM}$ and $r_{TE}$ as follows ~\cite{churkin2011dispersion, arora2014coefficients}
\begin{eqnarray}
& & C_3(a) = \frac{1}{16\pi}\int_{0}^\infty\alpha(\iota\xi) d\xi \int_{2a\xi\alpha_{fs}}^\infty y^2e^{-y}dy
\nonumber \\
& & \times \left[2r_{TM} - (r_{TM} + r_{TE})\frac{4a^2\xi^2\alpha_{fs}^2}{y^2} \right],
\nonumber\\
& &
\label{Eq.2}
\end{eqnarray}
whereas this coefficient for CNT of radius $R$ becomes radius dependent and can be expressed as:
\begin{eqnarray}\label{Eq.3}
& & C_3(a,R) = \frac{1}{16\pi}\sqrt{\frac{R}{R+a}}\int_{0}^\infty\alpha(\iota\xi) d\xi \int_{2a\xi\alpha_{fs}}^\infty ye^{-y} dy
\nonumber \\
& & \times \left(y-\frac{a}{2(R+a)}\right)
\times \left[2r_{TM} - (r_{TM} + r_{TE})\frac{4a^2\xi^2\alpha_{fs}^2}{y^2}\right].
\nonumber \\
& &
\end{eqnarray}
In both the above expressions, $\alpha$ is the dynamic dipole polarizability of the ion or atom over imaginary frequencies $\iota\xi$ and $\alpha_{fs}$ is the fine structure constant~\citep{churkin2011dispersion}. $y$ is a dimensionless variable given by $y=2aq$, where $a$ is the separation distance and $q=\sqrt{k^2 + \xi^2}$, dependent on wave vector $k$~\cite{churkin2011dispersion}. For the evaluation of these reflection coefficients, two models have been proposed in the literature for graphene and CNT. These two are Dirac~\cite{geim2009graphene, bordag2009casimir, neto2009electronic} and hydrodynamic models~\cite{bordag2001new, bordag2006lifshitz}. In hydrodynamic model, graphene is taken as infinitesimally thin positively charged sheet with a continuous fluid of mass and negative charge densities. The dispersion relation for quasiparticles in graphene is quadratic with respect to the momentum. However, this model does not take into account some properties of graphene which are important at low energies and due to this reason it overestimates the vdW interactions. In the Dirac model, the quasiparticles in graphene are considered to be Dirac Fermions moving with Fermi velocity and follow linear dispersion law. This model has provided results in accord with experimental values~\cite{klimchitskaya2015comparison}. In this work, Dirac model has been implemented for determination of dispersion coefficients. Under this framework, the explicit forms of two components of the reflection coefficients are given by~\cite{bordag2009casimir}
\begin{equation}\label{Eq.4}
r_{TM}= \frac{\alpha_{fs} q \phi(\tilde{q})} {2 \tilde{q}^2 + \alpha_{fs} q \phi(\tilde{q})},
\end{equation}
\begin{equation}\label{Eq.5}
r_{TE}= \frac{\alpha_{fs} q \phi(\tilde{q})} {2 \tilde{q} + \alpha_{fs} q \phi(\tilde{q})},
\end{equation}
where $q = \sqrt{k^2 +\xi^2/c^2}$, $\tilde{q}$ is the function of Fermi velocity $v_f$ of massless Fermions and $\phi$ is the polarization tensor. The expressions of these two parameters can be given as~\cite{bordag2009casimir}
\begin{equation}\label{Eq.6}
\tilde{q}=\sqrt{\frac{\alpha_{fs}^2v_f^2y^2}{4a^2}+(1-\alpha_{fs}^2v_f^2)\alpha_{fs}^2\xi^2},
\end{equation}
\begin{equation}\label{Eq.7}
\phi(\tilde{q})=4\left(\alpha_{fs}\Delta+\frac{\tilde{q}^2-4\alpha_{fs}^2\Delta^2}{2\tilde{q}}\arctan\left(\frac{\tilde{q}}{2\alpha_{fs}\Delta}\right)\right),
\end{equation}
where $\Delta$ is the gap parameter~\cite{neto2009electronic} whose value lies in the range $0 < \Delta < 0.1$ eV. Since the value of gap parameter is still not known, we have taken its value initially as 0.01 throughout the paper unless stated otherwise.
\subsection{Dipole polarizability}
The dipole polarizability for an atomic system in the ground state $n$ with a closed core and valence electron(s) can be evaluated by calculating two components of the polarizability as follows~\cite{arora2012multipolar}
\begin{equation}\label{Eq.8}
\alpha(\iota\omega)= \alpha_{val}(\iota\omega) + \alpha_c(\iota\omega),
\end{equation}
where subscript $val$ and $c$ refer respectively to the polarizability contributions due to valence and core orbitals respectively. The dominant contribution to the polarizability is from the valence part which can be further expressed in terms of Main and Tail contributions. The Main term of $\alpha_{val}$ contains the contributions due to the low lying allowed transitions from the ground state whereas Tail term has the contributions of the transition from ground to higher states.
The Main term of valence contribution can be estimated as follows
\begin{eqnarray}\label{Eq.9}
& & \alpha_{val}^{Main}(\iota\omega)=\frac{2}{3(2J_n+1)}
\nonumber \\
& & \times \sum_{m > N_c,m\neq n}^I \frac{(E_m-E_n)|\langle\psi_n||\textbf{D}||\psi_m\rangle|^2}{(E_m-E_n)^2+\omega^2}.
\nonumber \\
& &
\end{eqnarray}
In the above equation, $J_n$ is the total angular momentum quantum number of the ground state of the considered atom/ion sum is restricted by including sum over intermediate $m$ states after $N_c$ and up to $I$, where $N_c$ represents the core orbitals and $I$ refers to the bound states up to which we have determined the reduced matrix elements $\langle\psi_n||D||\psi_m\rangle$ in our calculations. We use relativistic all order method and multiconfigurational Dirac-Fock (MCDF) approximation for ions and atoms respectively to compute the matrix elements used for the Main term calculation. In order to do reliable calculations and avoid any uncertainties, we use the experimental excitation energy values $E_i$ of the corresponding states for Main term taken from National Institute of Standards and Technology (NIST) database~\cite{NIST_ASD}.
Similarly, the Tail term is evaluated using the following equation
\begin{equation}\label{Eq.10}
\alpha_{val}^{Tail}(\iota\omega)=\frac{2}{3(2J_n+1)}\sum_{m > I} \frac{(\epsilon_m-\epsilon_n)|\langle\psi_n||{\bf D}||\psi_m\rangle_{DHF}|^2}{(\epsilon_m-\epsilon_n)^2+\omega^2},
\end{equation}
where $\langle\psi_n||{\bf D}||\psi_m\rangle_{DHF}$ are the E1 reduced matrix elements obtained using DHF method. $\textbf{D}$ is the dipole operator defined as $\textbf{D}= -e\sum_j\textbf{r}_j$ with $\textbf{r}_j$ being position of a $j$th electron, and the sum $m>I$ corresponds to the excited states whose matrix elements are not accounted in the Main term. The energies calculated using DHF method are referred by $\epsilon_i$.
The calculations of the core polarizabilities of both ions and atoms are carried out in the DHF method using the following expression,
\begin{equation}\label{Eq.11}
\alpha_{c}(\iota\omega)=\frac{2}{3(2J_n+1)}\sum_{a}^{N_c}\sum_{m}^{I} \frac{(\epsilon_m-\epsilon_a)|\langle\psi_a||\textbf{D}||\psi_m\rangle_{DHF}|^2}{(\epsilon_m-\epsilon_a)^2+\omega^2},
\end{equation}
where $a$ refers to the core orbitals while $m$ includes valence or empty orbitals.
The evaluation of the core correlation using the above expression does not exclude contributions from excitations from core to the occupied valence shell which are forbidden by the Pauli's exclusion principle. Hence half of this contribution has to be subtracted in the case of ions.
Likewise for atoms, twice of this contribution has to be excluded from the core polarizability contribution due to fully filled valence $ns$ for Zn ($n$=4), Cd ($n$=4), Hg ($n$=5) and $np$ for Pb ($n$=6) orbitals.
These contributions are referred as the valence-core ($\alpha_{vc}$) in our calculations.
One can calculate the static values of polarizability by substituting $\omega= 0$ in Eqs.(~\ref{Eq.9} -~\ref{Eq.11}).
\subsection{Matrix elements}
In order to calculate polarizability of the monovalent ions and divalent atoms, reliable values of the matrix elements have to be calculated. In the present work, wave functions for ions and atoms are calculated using different relativistic methods. For ions, we consider relativistic all order method confined to the single and double excitation (SD) approximation ~\cite{safronova2008all, blundell1991relativistic}. The exact wave function of the state with the closed core and single valence electron $v$ is represented as
\begin{eqnarray}
& &|\psi_v\rangle_{SD} = \left[1+ \sum_{ma}\rho_{ma}a_m^\dagger a_a +\frac{1}{2}\sum_{mlab}\rho_{mlab} a_m^\dagger a_l^\dagger a_b a_a\right.
\nonumber \\
& & \left.+ \sum_{m\neq v} \rho_{mv} a_m^\dagger a_v + \sum_{mla} \rho_{mlva}a_m^\dagger a_l^\dagger a_a a_v\right]|\phi_v\rangle.
\nonumber\\
& &
\end{eqnarray}
Here $|\phi_v\rangle$, is the mean field wavefunction constructed as $ |\phi_v\rangle = a_v^\dagger|0_c\rangle$ with $|0_c\rangle$ representing the DHF wave function of closed core and $a^\dagger$, $a$ represents creation and annihilation operators respectively whereas excitation coefficients are denoted by $\rho$. $\rho_{ma}$, $\rho_{mv}$, $\rho_{mlab}$ and $\rho_{mlva}$ being the single core, single valence, double core and double valence excitation coefficients respectively. To obtain the DHF wave functions and matrix elements for each transition, we use a set of 50 B-splines of order k = 11 for each angular momentum. The basis set orbitals are constrained to a large spherical cavity of a radius R = 220 a.u.
The required wavefunctions for divalent systems are obtained from GRASP2K code which uses MCDF approach~\cite{jonsson2013new}. In MCDF, the atomic state wavefunction (ASF) in their initial/final state can be written as the linear combination of several configurational state functions (CSFs), having the same parity and
total angular momentum, e.g.,
\begin{equation}\label{Eq.13}
|\psi_v\rangle_{MCDF} = \sum_{x=1}^N a_x|\phi_x\rangle,
\end{equation}
where $x$ refers to the number of CSFs and $a_x$ is the mixing coefficient. It is important to mention that the calculation of ASFs is done by including Breit and quantum electrodynamic corrections. In order to increase the accuracy of the ASF, we consider the maximum number of CSFs in the linear contribution and, finally, retain only those which have the value of mixing coefficient greater than $10^{-3}$ . This method was used for divalent alkaline earth atoms in Ref.~\cite{shukla2020two}.
After obtaining wave functions for the aforementioned
ions and atoms, we determine the dipole-allowed (E1) matrix
element for a transition. The E1 matrix elements between the states $|\psi_v\rangle$ and $|\psi_k\rangle$ is evaluated using the following expression~\cite{PhysRevA.40.2233}
\begin{eqnarray}\label{14}
D_{vk} = \frac{\langle\psi_v|D|\psi_k\rangle}{\sqrt{\langle\psi_v|\psi_v\rangle \langle\psi_k|\psi_k\rangle}},
\end{eqnarray}
For practical purposes, we calculate the E1 matrix elements of some low-lying transitions, which contribute dominantly to Main term of the valence contribution using the above described method.
Tail contribution from high lying transitions calculated using DHF method is given for ions only.
Due to some computational constraints and the sake of simplicity, the Tail contribution in the case of atoms has been neglected.
\section{Results and Discussion}\label{Sec III}
\subsection{Dipole polarizabilities at imaginary frequencies}
\subsubsection{Static dipole polarizability of ions}
In Table~\ref{table1}, we present the static dipole polarizabilities values of Zn$^+$, Cd$^+$, Hg$^+$, and Pb$^+$ heavy ions. Using Eq.~\ref{Eq.9} and ~\ref{Eq.10}, the Main and Tail terms of the valence contribution of polarizability are computed at zero frequency and given explicitly in Table~\ref{table1}. We provide the breakdown of polarizability values from every dominant transition required for the calculation of the Main term of valence contribution. Values of E1 matrix elements included in the Main term of Zn$^+$, Cd$^+$, and Hg$^+$ have been calculated in the present work while E1 matrix elements for Pb$^+$ ion have been taken from Ref.~\cite{safronova2005excitation, sahoo2005electric} which were calculated by same method as ours. The core contribution is also tabulated in the same table which has been evaluated using Eq.~\ref{Eq.11}. While $\alpha_{vc}$ contribution for Zn$^+$, Cd$^+$, and Hg$^+$ ions is almost negligible, it is notable for Pb$^+$ and affects the total polarizability value. Similar case is observed for Tail term in which significant value is observed for Pb$^+$.
In the same table, the static polarizability values of the ions are compared with the experimental and other theoretical values to enmark the validity of our values using the considered method. The polarizability values of Zn$^+$, Cd$^+$ and Hg$^+$ ions match well with the values calculated by coupled-cluster single double with triple excitations (CCSD(T)) method by Ilia\v{s} \textit{et al.} ~\cite{iliavs1999ionization}. Our polarizability value of Zn$^+$ ion deviates from the experimental value by 16\%, but it is in close agreement with other theoretical works. In recent work, Li \textit{et al.} calculated the ground state polarizability for Cd$^+$ using the DHF approximation, third-order many-body theory, and singles and doubles approximated coupled-cluster method~\cite{li2018relativistic}. The only difference in the value calculated by us and Ref.~\cite{li2018relativistic} is that they have included the 4$d^9$5$s$5$p$ configurations. The static polarizability value of Pb$^+$ in Ref.~\cite{gould2016c} was calculated using time dependent DFT (TDDFT) without including relativistic effects and fixed core approximation. {The incorporation of relativistic effects for heavy elements is required for accurate polarizability values~\cite{iliavs1999ionization}, thus the values obtained in the present work using all order method are expected to be closer to the actual values.} In a number of other studies, the present method has provided accurate values of dipole polarizability for other monovalent atoms and ions~\cite{arora2007magic} hence we can say that static dipole polarizability value of Pb$^+$ ion is also legitimate if calculated by all-order SD method. Unfortunately, we did not find any experimental measurements for static polarizability values of Cd$^+$, Hg$^+$ and Pb$^+$ ion with which we can compare our calculated results.
\subsubsection{Static dipole polarizability of atoms}
In Table~\ref{table2}, we give the static polarizability results for Zn, Cd, Hg and Pb atoms. The breakdown of contribution of Main term from each transition is tabulated in the same table. E1 matrix elements have been obtained from the GRASP2K code required for calculation of Main term of considered atoms. Core contribution for considered atoms is same as that for respective ions. The focus is given on calculation of valence-core $\alpha_{vc}$ correlation for atoms. Since the excitations from core to the occupied valence shell which is completely filled in case of considered atoms are not allowed, exactly twice $\alpha_{vc}$ contribution calculated in the case of ions with one valence electron has been excluded in the case of atoms. We have not included the Tail term in case of atoms. However, we anticipate very small Tail value from considered atoms except for Pb.
In the same table, we also present a comparison of total value of static polarizability of the atoms calculated by us with experimental and other theoretical works. The static dipole polarizability values for Zn and Hg atoms given by Ye \textit{et al.}~\cite{ye2008dipole} using configuration interaction with a semiempirical core-polarization model potential method is found to be slightly larger than those calculated by us. Our polarizability values for Zn and Hg differ from experimental value by $\sim$9\% and $\sim$15\% respectively. Static polarizability values of Cd atom calculated by us agree well with experimental results. For Pb, the previous theoretical results has given underestimated values as compared to experimental values~\cite{pershina2008prediction, gould2016c}. The recent experimental value of Pb atom is 56 a.u. with an uncertainty of about $\pm18.2$ a.u. which is within uncertainty limits when compared to our result. However, we propose to include more transitions for more accurate polarizability for this atom.
\begin{table*}
\caption{\label{table1} State polarizability along with contributions from various E1 reduced matrix elements to the static polarizabilities {(a.u.)} of ground state of Zn$^+$, Cd$^+$, Hg$^+$ and Pb$^+$. Main, Tail, core and valence-core contributions are given as well. The numbers in square brackets for contribution from each transition in Main term represent powers of 10. The final results are compared with the previously estimated and available experimental values.}
\begin{center}
\begin{tabular}{|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.1cm}p{1.2cm}|}
\hline
\hline
\multicolumn{3}{|c|}{Zn+} & \multicolumn{3}{c|}{Cd+} & \multicolumn{3}{c|}{Hg+} & \multicolumn{3}{c|}{Pb+}\\
& & & & & & & & & & & \\
Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$\\
\hline
& & & & & & & & & & & \\
$4S_{1/2}-4P_{1/2}$ & 0.189[1] & 0.537[1] & $5S_{1/2}-5P_{1/2}$ & 0.194[1] & 0.623[1] & $6S_{1/2}-6P_{1/2}$ & 0.166[1] & 0.391[1] & $6P_{1/2}-7S_{1/2}$ & 0.101[1] & 0.125[1] \\[0.5ex]
$4S_{1/2}-4P_{3/2}$ & 0.267[1] & 0.106[2] & $5S_{1/2}-5P_{3/2}$ & 0.275[1] & 0.119[2] & $6S_{1/2}-6P_{3/2}$ & 0.235[1] & 0.666[1] & $6P_{1/2}-8S_{1/2}$ & 0.371[0] & 0.113[0] \\[0.5ex]
$4S_{1/2}-5P_{1/2}$ & 0.80[-1] & 0.46[-2] & $5S_{1/2}-6P_{1/2}$ & 0.10[0] & 0.77[-2] & $6S_{1/2}-7P_{1/2}$ & 0.535[0] & 0.19[0] & $6P_{1/2}-6D_{3/2}$ & 0.207[1] & 0.448[1] \\[0.5ex]
$4S_{1/2}-5P_{3/2}$ & 0.85[-1] & 0.52[-2] & $5S_{1/2}-6P_{3/2}$ & 0.59[-1] & 0.27[-2] & $6S_{1/2}-7P_{3/2}$ & 0.366[0] & 0.88[-1] & & & \\
$4S_{1/2}-6P_{1/2}$ & 0.86[-1] & 0.45[-2] & $5S_{1/2}-7P_{1/2}$ & 0.113[0] & 0.85[-2] & & & & & & \\[0.5ex]
$4S_{1/2}-6P_{3/2}$ & 0.143[0] & 0.13[-1] & $5S_{1/2}-7P_{3/2}$ & 0.117[0] & 0.91[-2] & & & & & & \\[0.5ex]
$\alpha_{val}^{Main}$ & & 15.98 & $\alpha_{val}^{Main}$ & & 18.14 & $\alpha_{val}^{Main}$ & & 10.84 & $\alpha_{val}^{Main}$ & & 5.84 \\[0.5ex]
$\alpha_{val}^{Tail}$ & & 0.02 & $\alpha_{val}^{Tail}$ & & 0.01 & $\alpha_{val}^{Tail}$ & & 0.06 & $\alpha_{val}^{Tail}$ & & 2.67 \\[0.5ex]
$\alpha_{vc}$ & & 0.006 & $\alpha_{vc}$ & & -0.02 & $\alpha_{vc}$ & & -0.04 & $\alpha_{vc}$ & & -2.28 \\[0.5ex]
$\alpha_{c}$ & & 2.05 & $\alpha_{c}$ & & 5.28 & $\alpha_{c}$ & & 8.21 & $\alpha_{c}$ & & 16.30 \\[0.5ex]
Total & & 18.05 & Total & & 23.41 & Total & & 19.07 & Total & & 22.52 \\[0.5ex]
Experiment & & 15.54~\cite{kompitsas1994rydberg} & & & & & & & & & \\[0.5ex]
Others & & 18.84~\cite{iliavs1999ionization} & Others & & 23.68~\cite{iliavs1999ionization} & Others & & 19.36~\cite{iliavs1999ionization} & Others & & 23.5~\cite{gould2016c} \\[0.5ex]
& & 17.90~\cite{gould2016c} & & & 23.1~\cite{gould2016c} & & & 17.50~\cite{gould2016c} & & & \\[0.5ex]
& & & & & 25.21~\cite{li2018relativistic} & & & & & & \\
\hline
\end{tabular}
\end{center}
\end{table*}
\begin{table*}
\caption{\label{table2}State polarizability along with contributions from various E1 reduced matrix elements to the static polarizabilities {(a.u.)} of ground state of Zn, Cd, Hg and Pb. Main, core and valence-core contributions are given as well. The final results are compared with the previously estimated and available experimental results. The numbers in square brackets for contribution from each transition in Main term represent powers of 10. The uncertainty in experimental values are given in the parentheses.}
\begin{center}
\begin{tabular}{|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.2cm}p{1.2cm}|p{1.9cm}p{1.1cm}p{1.2cm}|}
\hline
\hline
\multicolumn{3}{|c|}{Zn} & \multicolumn{3}{c|}{Cd} & \multicolumn{3}{c|}{Hg} & \multicolumn{3}{c|}{Pb}\\
& & & & & & & & & & &\\
Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$ & Transition & E1 & $\alpha(0)$\\
\hline
& & & & & & & & & & &\\
$4 {}^1S_0-4 {}^3P_1$ & 0.17[-3] & 0.748[-5] & $5 {}^1S_0-5 {}^3P_1$ & 0.208[-1] & 0.278[-3] & $6 {}^1S_0-6 {}^3P_1$ & 0.118[0] & 0.437[0] & $6 {}^3P_0-7 {}^3P_1$ & 0.16[-1] & 0.692[1] \\[0.5ex]
$4 {}^1S_0-4 {}^1P_1$ & 0.103[2] & 0.322[2] & $5 {}^1S_0-5 {}^1P_1$ & 0.949[1] & 0.318[2] & $6 {}^1S_0-6 {}^1P_1$ & 0.725[1] & 0.196[2] & $6 {}^3P_0-7 {}^1P_1$ & 0.80[-1] & 0.213[0] \\[0.5ex]
$4 {}^1S_0-5 {}^3P_1$ & 0.40[-4] & 0.839[-6]& $5 {}^1S_0-6 {}^3P_1$ & 0.16[-2] & 0.407[-4] & $6 {}^1S_0-7 {}^3P_1$ & 0.93[-3] & 0.196[-4] & $6 {}^1S_0-7 {}^1P_1$ & 0.491[1] & 0.362[2] \\[0.5ex]
$4 {}^1S_0-5 {}^1P_1$ & 0.466[0] & 0.108[1] & $5 {}^1S_0-6 {}^1P_1$ & 0.488[1] & 0.119[2] & $6 {}^1S_0-7 {}^1P_1$ & 0.554[-1] & 0.114[0] & $6 {}^3P_0-6 {}^3D_1$ & 0.229[1] & 0.704[1] \\[0.5ex]
$4 {}^1S_0-6 {}^3P1$ & 0.40[-4] & 0.899[-6] & $5 {}^1S_0-7 {}^3P_1$ & 0.4[-4] & 0.865[-6] & $6 {}^1S_0-8 {}^3P_1$ & 0.143[0] & 0.273[0] & & & \\[0.5ex]
$4 {}^1S_0-6 {}^1P_1$ & 0.18[-1] & 0.251[-3] & $5 {}^1S_0-7 {}^1P_1$ & 0.282[0] & 0.628[0] & $6 {}^1S_0-8 {}^1P_1$ & 0.428[-1] & 0.815[-3] & & &\\[0.5ex]
$\alpha_{val}^{Main}$ & & 33.29 & $\alpha_{val}^{Main}$ & & 44.36 & $\alpha_{val}^{Main}$ & & 20.51 & $\alpha_{val}^{Main}$ & & 50.39 \\[0.5ex]
$\alpha_{vc}$ & & -0.0013 & $\alpha_{vc}$ & & -0.04 & $\alpha_{vc}$ & & -0.08 & $\alpha_{vc}$ & & -4.58 \\[0.5ex]
$\alpha_{c}$ & & 2.05 & $\alpha_{c}$ & & 5.28 & $\alpha_{c}$ & & 8.21 & $\alpha_{c}$ & & 16.30 \\[0.5ex]
Total & & 35.33 & Total & & 49.61 & Total & & 28.65 & Total & & 61.90 \\[0.5ex]
Experiment & \multicolumn{2}{r|}{38.80(0.3)} & Experiment & \multicolumn{2}{r|}{49.65(1.46)} & Experiment & \multicolumn{2}{r|}{33.91(0.34)} & Experiment & \multicolumn{2}{r|}{56.0(18.2)} \\[0.5ex]
& & ~\cite{goebel1996theoretical} & & & ~\cite{goebel1995dispersion} & & & ~\cite{singh2015rigorous} & & & ~\cite{ma2015measured} \\[0.5ex]
Others & \multicolumn{2}{r|}{37.6~\cite{kello1995polarized}} & Others & \multicolumn{2}{r|}{46.8~\cite{kello1995polarized}} & Others & \multicolumn{2}{r|}{31.2~\cite{kello1995polarized}} & Others & \multicolumn{2}{r|}{46.96~\cite{pershina2008prediction}} \\[0.5ex]
& \multicolumn{2}{r|}{38.4~\cite{gould2016c}} & & \multicolumn{2}{r|}{46.7~\cite{gould2016c}} & & \multicolumn{2}{r|}{33.5~\cite{gould2016c}} & & \multicolumn{2}{r|}{47.9~\cite{gould2016c}}\\[0.5ex]
& \multicolumn{2}{r|}{38.12~\cite{ye2008dipole}} & & \multicolumn{2}{r|}{44.63~\cite{ye2008dipole}} & & \multicolumn{2}{r|}{31.32~\cite{ye2008dipole}} & & \multicolumn{2}{r|}{ }\\[0.5ex]
\hline
\end{tabular}
\end{center}
\end{table*}
\subsubsection{Dynamic dipole polarizability at imaginary frequency}
We determine the dynamic polarizability values at different frequencies using the same method which has been used for evaluation of static polarizability and expect our values to be reliable. Dynamic dipole polarizabilities of ions and atoms at imaginary frequencies are presented in Fig.~\ref{Fig.1} and Fig.~\ref{Fig.2} respectively. Tabulated values of $\alpha(\iota\omega)$ for considered ions and atoms are given in Supplementary materials (SM)~\cite{SM}. With increase of frequency, polarizability decreases and reaches a small value. This trend is seen for both ions and atoms. From Fig.~\ref{Fig.1}, one notices that at short frequency values, the dipole polarizabilities of Cd$^+$ and Pb$^+$ ions are comparable but with an increase in frequency, the polarizability of Cd$^+$ decreases more rapidly as compared to Pb$^+$ ion. Dynamic polarizability of Cd$^+$ is even lower than polarizability of Hg$^+$ for $\omega > 0.25$ a.u. For Zn$^+$ ion, the polarizability remains lowest throughout the frequency regime as compared to other ions. Similarly as shown in Fig.~\ref{Fig.2}, for atoms the static polarizability of Zn is larger than Hg at short $\omega$ but decreases rapidly for Zn as compared to Hg as $\omega$ increases. For $\omega > 1$ a.u., the polarizability values of Hg atom are more as compared to value of Zn and Cd and get closer to the polarizability of Pb atom. If we compare polarizability values among atoms and ions, the lowest value throughout the considered frequency range is observed for Zn$^+$ ion whereas the largest values for Pb and Cd atoms depending upon the frequency value. These values have been used for computing $C_3$ dispersion coefficients as a function of separation distance as discussed in the next section.
\begin{figure}
\includegraphics[width=\columnwidth,keepaspectratio]{hpionsnew.eps}
\caption{\textcolor{red}{(Color online) Dynamic dipole polarizability $\alpha$ (a.u.) at imaginary frequencies of Zn$^+$ (blue dashed curve), Cd$^+$ (orange dotted dashed curve), Hg$^+$ (red long dashed curve) and Pb$^+$ (green double dotted dashed curve).}}
\label{Fig.1}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\columnwidth,keepaspectratio]{hpatomsnew.eps}
\caption{\textcolor{red}{(Color Online) Dynamic dipole polarizabilities $\alpha$ (a.u.) at imaginary frequencies of Zn (blue dashed curve), Cd (orange dotted dashed curve), Hg (red long dashed curve) and Pb (green double dotted dashed curve)}.}
\label{Fig.2}
\end{figure}
\subsection{$C_3$ Coefficients for graphene}
In this section, we present the dispersion coefficients between graphene layer with $\Delta = $ 0.01 eV and heavy elements as a function of separation distance. As shown in Fig.~\ref{Fig.3} and Fig.~\ref{Fig.4}, $C_3$ coefficients exhibit an inverted yield curve i.e., $C_3$ values decrease with increase in distance. This nature of the curve is perceived for every element. Coefficients reach a value less than 0.1 a.u. for distance greater than 30 nm. Among the ions, the largest $C_3$ value is observed for Pb$^+$ indicating stronger interaction with graphene layer whereas Zn$^+$ is least attracted as shown in Fig.~\ref{Fig.3}. The respective $C_3$ values for Cd$^+$ and Hg$^+$ ions are approximately the same. In the case of atoms, the large $C_3$ values have been observed for Pb and Cd. At $a$ = 1 nm, $C_3$ values are 0.564 and 0.561 for Pb and Cd atom respectively. However, for separation distance $a > 7$ nm, the difference in $C_3$ coefficients for Pb and Cd become appreciable as displayed in Fig.~\ref{Fig.4}. Zn and Hg are least attracted towards graphene. To best of our knowledge, we did not find any literature on $C_3$ coefficient values for interaction of heavy ions and atoms with carbon-based nanostructures. However, a comparison has been made based on physisorption of heavy atoms with previous DFT study~\cite{shtepliuk2017interaction}. For atoms, our results are in accordance with the DFT study for physisorption of heavy atoms on graphene layer~\cite{shtepliuk2017interaction}. In DFT study, vdW interactions were described between the adsorbed atom and graphene and the strength of interactions were analysed as a function of binding energy and charge transfer. The sequence of reducing binding energy for atoms was reported as Pb $>$ Cd $>$ Hg~\cite{shtepliuk2017interaction}. A similar trend is observed in our study where we have analysed the strength of interaction on the basis of $C_3$ values. Shtepliuk \textit{et al.}~\cite{shtepliuk2017interaction} also studied the interaction of ions with graphene which resulted in chemisorption. Since our study provides the result for physisorption of microparticles on the material wall, hence we do not make a similar comparison for ions with Ref.~\cite{shtepliuk2017interaction}. It is important to note that these ions with large nuclear charge Z, which we have considered in the present work are only singly charged, (\textit{i.e.,} having residual unity charge), thus the effect of the latter may not be too significant as compared to the coulomb potential of heavier Z atoms in the calculation of matrix elements with which we have evaluated the $C_3$ coefficients. The overall charge present on the ions leads to stronger Coulomb interactions as compared to weak vdW attractions with both graphene and CNT wall. The considered theory only provides the information regarding the weak vdW forces between the microparticle and considered substrates, which on comparison reveals that the selectivity and sensitivity of graphene and CNT to adsorb heavy atoms is more as compared to ions.
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{grapheneionsnew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for interaction of Zn$^+$ (blue dashed curve), Cd$^+$ (orange dotted dashed curve), Hg$^+$ (red long dashed curve) and Pb$^+$ (green double dotted dashed curve) with graphene layer as a function of separation distance $a$ (nm).}}
\label{Fig.3}
\end{figure}
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{grapheneatomsnew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for interaction of Zn (blue dashed curve), Cd (orange dotted dashed curve), Hg (red long dashed curve) and Pb (green double dotted dashed curve) atoms with graphene layer as a function of separation distance $a$ (nm).}}
\label{Fig.4}
\end{figure}
\subsection{$C_3$ Coefficients for CNT}
In addition to graphene, we present the dispersion coefficient for CNT. Fig.~\ref{Fig.5} and Fig.~\ref{Fig.6} represent the influence of separation distance on dispersion coefficients evaluated for CNT of radius of 6 nm with heavy ions and atoms respectively. The $C_3$ coefficients and hence interaction are dominant at smaller distances for all the elements. Similar to the case of graphene, the interaction is strongest for Pb$^+$ and weakest for Zn$^+$ with CNT. For atoms, CNT offers a stronger potential to Pb and Cd atoms and weak potential to Zn atom.
The radius of CNT has been an important parameter in hydrogen storage applications. It has been known that larger radius of CNT imparts more gravimetric storage amount for hydrogen storage~\cite{weng2007atomistic}. This motivated us to study the effect of radius of CNT on $C_3$ coefficients. Fig. \ref{Fig.7} demonstrates the effect of radius of CNT on dispersion coefficients. CNT with larger radius i.e., 8 nm has more potential to adsorb Pb species. This can be attributed due to greater exposure of carbon atoms towards ions and atoms with increase in radius of CNTs.
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{cnt6ionsnew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for interaction between Zn$^+$ (blue dashed curve), Cd$^+$ (orange dotted dashed curve), Hg$^+$ (red long dashed curve) and Pb$^+$ (green double dotted dashed curve) with CNT of radius $R$ = 6 nm as a function of separation distance $a$ (nm).}}
\label{Fig.5}
\end{figure}
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{cnt6atomsnew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for interaction between Zn (blue dashed curve), Cd (orange dotted dashed curve), Hg (red long dashed curve) and Pb (green double dotted dashed curve) atom with CNT of radius $R$ = 6 nm as a function of separation distance $a$ (nm).}}
\label{Fig.6}
\end{figure}
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{rcompnew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for Pb atom (purple long dashed curve for $R$ = 4 nm, pink dashed curve for $R$ = 8 nm) and Pb$^+$ ion (orange dotted curve for $R$ = 4 nm, blue dotted dashed curve for $R$ = 8 nm) for two different values of radius of CNT.}}
\label{Fig.7}
\end{figure}
\subsection{Dispersion coefficient for different gap parameter}
Most of the previous studies on dispersion coefficients for graphene and CNT have taken the upper bound of the gap parameter $\Delta$ as 0.1 eV~\cite{churkin2011dispersion, chaichian2012thermal, arora2014coefficients}. The effect of this parameter was shown while studying the interactions between alkali atoms and graphene layer in our previous study~\cite{kaur2014emending}. In the present work, we investigate the dependency of gap parameter on $C_3$ coefficients as well. Since the interaction of Pb atom is most prominent among all the elements considered in this study, we present the effect of gap parameter on dispersion coefficient between graphene layer and Pb atom. To find the influence of $\Delta$, we choose two different values of $\Delta$ as 0.1 eV and 0.001. eV The dispersion coefficient is seen to increase by only 0.41 \% at separation distance of $a = 1$ nm with decrease of $\Delta$ from 0.1 eV to 0.001 eV. When investigated the same at a larger separation distance of 100 nm and 200 nm, the percentage increase in the $C_3$ coefficient is find to be about 49 and 80 respectively. This is presented in Fig.~\ref{Fig.8} where gap between the two curves of different $\Delta$ increases with increase in separation.
\begin{figure}
\centering \includegraphics[width=\columnwidth,keepaspectratio]{DeltaNew.eps}
\caption{\textcolor{red}{(Color online) $C_3$ dispersion coefficients (a.u.) for Pb atom with graphene wall as a function of separation distance $a$ (nm) for two different values of gap parameter $\Delta$ (green long dashed curve for $\Delta$ = 0.1 and red double dotted dashed curve for $\Delta$ = 0.001) in eV.}}
\label{Fig.8}
\end{figure}
\subsection{Comparison of graphene and CNT}
Since we studied the dispersion interactions for two carbon-based materials, it is important to compare these two and prognosticate a better material for physisorption of these heavy elements. Both the materials show similar trend for adsorption of ions and atoms and are selective towards Pb and Cd atoms. The weakest interaction is observed for Zn$^+$ ion.
However, when a comparison is drawn for interaction of a microparticle with graphene and CNT it was found that graphene provided larger $C_3$ coefficient value and hence stronger interaction as compared to CNT.
Contrarily, a number of studies can be found in literature where CNT is widely accepted for physisorption applications as compared to graphene. The reason behind this is ability of experimentalists to tailor the properties and structure of CNTs with ease whereas the bulk preparation of pristine graphene is a major bottleneck that needs a direction~\cite{petit1999tuning}.
\section{Conclusion}\label{Sec IV}
To conclude, we have probed the dispersion coefficients for Zn$^+$, Cd$^+$, Hg$^+$, Pb$^+$, Zn, Cd, Hg and Pb with graphene and CNT walls. We have provided the dynamic dipole polarizability values for both heavy ions and atoms using sum-over-states approach. The interactions between heavy elements and material wall as a result of dispersion $C_3$ coefficients has been found to be maximum for Pb atom and ion at short separations. The result of interaction studies by our methodology is in agreement with interactions studied by DFT for heavy atoms. CNT also shows the potential for interaction of heavy elements following similar trend as that of graphene. We also deduce that graphene is more sensitive for interaction of the considered elements as compared to CNT. The obtained results could be useful for the formation of highly sensitive and selective sensors for detection of heavy ions and atoms.
\section{Acknowledgements}
The authors, B. A. is thank-full to the SERB-TARE(TAR/2020/000189), New Delhi, India for research grant. While R. S. is thank-full for the sanction of the research grant no: CRG/2020/005597 by SERB-DST, New Delhi, India.
|
1,314,259,994,976 | arxiv | \section{Introduction}
Photons remain a promising vehicle for the development of next-generation quantum technology~\cite{Obrien:2009un, Latmiral:2016dp}. Integrated quantum photonics, with its intrinsic phase stability and miniature devices, is necessary to bring linear optics to the large scale \cite{Politi2008, Metcalf:2014jwa, Minkov:2016fm}. Several integrated photonic platforms have emerged to solve this problem,
including silica-on-silicon~\cite{Politi2008, Matsuda:2014cy, Reimer:2015bva, Carolan:2015fb}, direct-write glass~\cite{Sansoni2010, Tillmann:2013jva, Flamini:2015cb, Bentivegna:2015iaba,Spring:2017gj}, lithium niobate~\cite{Vergyris:2016bd, Alibart:2016jo, Lenzini2017, Sansoni:2017jt}, silicon nitride~\cite{Zhang:2016dy, Moss:2013kv} and silicon-on-insulator~\cite{silverstone2016}. Silicon quantum photonics promises to simultaneously achieve the required functionality, performance, and scale.
Several important quantum optical functionalities have already been shown with high performance in silicon. Photon pairs can be generated using spontaneous four-wave mixing (SFWM)~\cite{Sharping2006, Azzini2012, Matsuda:2012dma, Olislager2013, Collins:2013eu, Xiong:2016bv}, and interfered with high visibility~\cite{Harada:2011cw, Silverstone:2013fu, Takesue:2014ic, Xiong:2016bv, Xu:2013jna}. Single-photon \cite{Silverstone2015} and pump-rejection \cite{Harris2014, Piekarek:dUPyT_rs} spectral demultiplexers, as well as two-mode interferometers \cite{Wilkes:2016ba}, have been demonstrated with very high extinction. Finally, single-photon detectors, based on superconducting nanowires have shown excellent performance on silicon waveguides~\cite{Najafi2015, Pernice:2012bc}. The very high refractive index contrast of silicon-on-insulator waveguides yields micron-scale components (e.g.~\cite{Xu:2008db}), while miniature ring resonator SFWM sources~\cite{Azzini2012}, and quantum interferometric networks~\cite{Harris:2014kz} facilitate devices on a very large scale.
The integration of entangled qubit sources with entangling quantum logic, together on a common platform, is an important next step. Here we show a new method for generating path-encoded, variably entangled two-qubit states. We perform multi-qubit quantum logic on these states and study their entanglement. We implemented this scheme on a reconfigurable, silicon photonic device to generate a wide range of two-qubit states. We integrated this source with arbitrary state preparation, a switchable two-qubit gate, and an interferometer for tomographic analysis. The implemented quantum circuit is similar to the one reported in~\cite{Shadbolt2012}.
We tested the device's quantum logic capabilities with several experiments. We analysed the source performance using reversed-Hong-Ou-Mandel-type (RHOM)~\cite{Chen:2007he, Silverstone:2013fu} quantum interference, and qubit tomography on a wide range of possible states. We followed this with an exploration of the on-chip quantum logic, with the switchable two-qubit gate in both entangling ($\hat{\textsc{cz}}$) and non-entangling ($\hat{I}$) configurations, and using the purity ($P$)~\cite{Gamel:2012hm}, the CHSH parameter ($S$)~\cite{Horodecki:2009gb} and the Schmidt number ($K$)~\cite{Terhal:2000gd} as diagnostic metrics.
\begin{figure*}[t!]
\centering
\includegraphics[width=1\linewidth]{figure1.png}
\caption{Device and apparatus overview. \textbf{a}~Operating principles. \textbf{i}~Non-degenerate spontaneous four-wave mixing, \textbf{ii}~quantum circuit description. \textbf{b}~Schematic of the silicon quantum photonic chip. A pump laser is coupled into the device, coherently pumping two spiralled RHOM sources which produce two photons entangled or separable in path. These are fed into a reconfigurable linear optical network which can entangle or disentangle them, and analyse the output. \textbf{c}~Off-chip apparatus. A continuous wave (CW) tunable laser source (TLS) is polarisation controlled (PC), amplified (EDFA), filtered and coupled onto the chip using lensed fibres and spot-size converters. Signal, idler, and pump photons coupled back into fibre in the same way, then spatially separated using dense wavelength-division multiplexers (DWDM), detected using superconducting nanowire single-photon detectors (SNSPD), and the output signal is analysed by a time interval analyser (TIA). \textbf{d}~Electron \textbf{i}~and optical \textbf{ii}~micrographs of the device.}
\label{figure1}
\end{figure*}
\section{Device structure and operation}
A schematic of the device is shown in Figure \ref{figure1}a. It comprises a reconfigurable source of two path-encoded entangled photons, controlled by the parameters $\phi_\beta$, $\phi_T$ and $\phi_B$. The source is followed by a reconfigurable interferometer, able to implement any two-qubit projector (including entangled projections). This second part of the device can be divided into three sections: arbitrary single qubit gates, a switchable post-selected controlled-Z ($\hat{\textsc{cz}}$gate) gate \cite{Ralph:2002id}, and final single-qubit unitaries, used to implement projectors for quantum state tomography, to reconstruct the output state.
The device comprised $500\times220\,\mathrm{nm}^2$ waveguides, directional couplers (approximate length $45.9~\mu\textrm{m}$), a waveguide crossing ($> 20\ \mathrm{dB}$ isolation), and resistive metallic heaters (length $54.0~\mu\textrm{m}$). It was coupled to fibre via edge coupling, fibre lenses, and polymer spot-size converters. Electrical connections were achieved through multi-contact electrical probes and $200-\mu\textrm{m}$-pitch on-chip gold pads (approximately $120~\times~200~\mu\textrm{m}^2$). Fabrication of the device proceeded as in reference \cite{Silverstone2015}.
The experimental setup is presented in Figure \ref{figure1}b. Photons are generated on the chip via SFWM, pumped by an amplified continuous-wave tunable laser, and filtered to remove in-band noise. An average facet-to-facet transmission of $\approx -28\ \mathrm{dB}$ was observed. The dominant sources of loss were scattering at the chip facets, and propagation loss in the spiralled source waveguides. Inside the device the light was reconfigurably manipulated by an interferometric network, composed of evanescent coupler beam-splitters and thermo-optic phase-shifters~\cite{Trinh:1995gm, Harris:2014kz}. Photons were collected from the device, demultiplexed and separated from the pump using dense wavelength-division multiplexers (DWDM), detected using superconducting nanowire detectors~\cite{Miki:2013cv}, and finally converted into coincidence counts by a time-interval analyser.
\subsection{Photon-pair generation}
The strong non-linear properties of silicon waveguides are well known \cite{Leuthold:2010dg}. Spontaneous four-wave mixing (SFWM), an effect of the $\chi^{(3)}$ non-linearity, is now commonly used to produce photon pairs in silicon quantum photonic devices \cite{Sharping2006, Silverstone:2013fu}.
In the non-degenerate SFWM process used here, two photons from a bright pump are annihilated, producing two correlated photons with different wavelengths (Figure \ref{figure1}a). The two generated photons, `signal' and `idler', emerge spectrally on either side of the pump, conserving energy and momentum. In our experiment, spiralled 21-mm-long waveguides were used to produce photon-pairs, with the pump, signal, and idler photon wavelengths being 1551~nm, 1547~nm, and 1555~nm. These photons were generated in a continuous spectrum and the chosen wavelengths were post-selected by the off-chip demultiplexers.
\begin{figure}[tbh!]
\centering
\includegraphics[width=\linewidth]{figure2}
\caption{Quantum interference for the two sources, measuring coincidences from the outputs $\mathrm{OUT}_T'$ and $\mathrm{OUT}_B'$, obtained by pumping each RHOM source and scanning the source internal phase, $\phi_T$ or $\phi_B$. The imperfect interference can be explained in terms of imbalance in the on-chip evanescent coupler beam splitters.}
\label{figure2}
\end{figure}
\begin{table*}[bt!]
\centering
\begin{tabular}{cc|cccc}
\textbf{Source state} & \textbf{Gate} & \textbf{Purity} $P$ & \textbf{Schmidt number} $K$ & \textbf{CHSH} $S$ & \textbf{Fidelity} $F'$\\
\hline
\rule{0pt}{4mm
$|00\rangle$ & {bypassed} & $0.995 \pm 0.012 $ & $1.012 \pm 0.011$ & $1.577\pm 0.072$ & $0.973 \pm 0.011$ \\
$|00\rangle$ & {$\hat{I}$} & $0.946 \pm 0.031 $ & $1.034 \pm 0.017 $ & $1.465 \pm0.064 $ & $0.962 \pm 0.016$\\
\rule{0pt}{5mm
$|11\rangle$ & {bypassed} & $0.998 \pm 0.008$ & $1.004 \pm 0.006 $ & $1.511 \pm 0.049$ & $0.984 \pm 0.007 $\\
$|11\rangle$ & {$\hat{I}$} & $0.949 \pm 0.055 $ & $1.048\pm 0.037 $ & $1.601 \pm 0.121$ & $0.948 \pm 0.031$ \\
\rule{0pt}{5mm
$(|00\rangle + |11\rangle)/\sqrt{2}$ & {bypassed} & $0.864 \pm0.019$ & $1.905 \pm 0.022 $ & $2.560 \pm 0.037$ & $0.909 \pm 0.028 $\\
$(|00\rangle + |11\rangle)/\sqrt{2}$ & {$\hat{I}$} & $0.832 \pm 0.040$ & $1.936\pm 0.025$ & $2.538\pm0.072$ & $0.900 \pm 0.026$\\
\rule{0pt}{5mm
$|++\rangle$ & {$\hat{\textsc{cz}}$} & $0.931 \pm0.036$ & $1.657 \pm 0.045 $ & $2.560 \pm 0.078$ & $0.873 \pm 0.038$ \\
$(|00\rangle + |11\rangle)/\sqrt{2}$ & {$\hat{\textsc{cz}}$} & $0.900 \pm 0.071$ & $1.166 \pm 0.055 $& $1.907 \pm 0.137$ & $0.839 \pm 0.013$
\end{tabular}
\caption{Purity, Schmidt number, CHSH parameters and Fidelity for a variety of measured states. The Schmidt number and CHSH parameter indicate entanglement. $S > 2$ indicates the presence of non-local correlations~\cite{Horodecki:2009gb}, while $K$ indicates the number of coefficients in the Schmidt decomposition of the state~\cite{Terhal:2000gd}. The fidelities $F'$ reported are computed against the ideal state optimised over local $R_z$ rotations, to compensate for the intrinsic random phase factor on each qubit.}\label{table:data}
\end{table*}
\subsection{Entangled qubit generation}
Our device uses a new scheme to generate entangled path-encoded states, which can subsequently be interfered, using pairs of \emph{non-degenerate} photons. Pump laser is distributed between two reverse-HOM structures using a reconfigurable power splitter (splitting ratio $\sin^2[\phi_\beta/2]$). Each RHOM contains two spiralled waveguides and a thermal phase shifter, as in \cite{Silverstone:2013fu}. The internal RHOM phases ($\phi_T$ and $\phi_B$) were set to $\pi/2$, such that the produced photon-pairs emerged deterministically split, one in each output waveguide, and in a state symmetrical between signal and idler photons. $\phi_\beta$ allows us to control the balance of photon-pair emission between the two RHOM structures, and so to control the entanglement present in the two-qubit output state.
Following Figure \ref{figure1}b, if $\phi_\beta = \pi$, photons will be generated only in the top RHOM, and the photon number output state, after the waveguide crossing, will be $|\mathit{1010}\rangle$, or $|00\rangle$ in the qubit basis. On the other hand, if $\phi_\beta = 0$, only the bottom RHOM generates photons, leading to $|\mathit{0101}\rangle = |11\rangle$. Finally, if $\phi_\beta = \pi/2$, we obtain the maximally entangled state: $|\Phi^\Theta\rangle \equiv (|00\rangle+e^{i \Theta}|11\rangle)/\sqrt{2}$, where $\Theta$ is a fixed phase factor due to the chip's intrinsic path-length mismatch. Thus, the output state from the entangled qubit generator is
\begin{equation}
|\psi\rangle = \sqrt{\beta}|00\rangle + e^{i \Theta} \sqrt{1-\beta}|11\rangle
\label{equation1}
\end{equation}
which can be continuously varied across a wide range of separable and entangled states, depending on the balance parameter, $\beta$. The balance depends on the square of the power division of the state control MZI (controlled by the phase $\phi_\beta$), due to the two-photon dependence of SFWM:
\begin{equation}
\beta = \left|\frac{\sin^2(\phi_\beta/2)}{\sqrt{\sin^4(\phi_\beta/2) + \cos^4(\phi_\beta/2)}}\right|^2.
\label{bal}
\end{equation}
\subsection{Quantum logic and analysis}
The state $|\psi\rangle$ is fed into a two-qubit circuit, composed of single-qubit rotations, and a switchable entangling gate. We implemented the arbitrary rotations on each qubit by cascading phase-shifters and Mach-Zehnder interferometers (MZI). These were used to realise $\hat R_z$ and $\hat R_y$ rotations, respectively, obtaining an arbitrary $\mathrm{SU}(2)$ with the combination $\hat R_{z}\cdot \hat R_y \cdot \hat R_{z}$.
We implemented a switchable entangling gate using a scheme based on~\cite{Ralph:2002id}, but replacing the $1/3$ beam-splitters with tunable-reflectivity MZIs. In this way, we can switch the gate's controlled-Z operation on and off. When on, the $\hat{\textsc{cz}}$gate operation succeeds with probability $1/9$.
In the remaining $8/9$ cases non-qubit states are generated, which are filtered by the coincidence-counting post-selection.
Note that only the on ($\cos{(\theta_{CZ})} = 1/3$)
and off ($\cos{(\theta_{CZ})} = -1$)
gate configurations produce unitary operations.
The two qubit gate is followed by rotations (parametrised by $\theta_{Mz3}$, $\theta_{My2}$,
$M\in\{T,B\}$) used to implement quantum state tomography, via the method described in~\cite{James:2001bb}.
\subsection{Calibration}
Since the phase shifter parameters (phase-per-electrical-power, and phase offset) varied between phase modulators, a calibration process was essential. Measuring the bright-light transmission from the inputs ($\mathrm{IN}$ and $\mathrm{IN}'$) to the outputs ($\mathrm{OUT}_T$, $\mathrm{OUT}_B$, $\mathrm{OUT}_T'$, $\mathrm{OUT}_B'$),
we were able to characterise the electro-optic parameters of each thermal phase shifter, in a similar way to that described in \cite{Santagati2016}. We learned the parameters associated with each phase according to the scheme:
\begin{equation}
\begin{split}
\mathrm{IN}' \rightarrow \mathrm{OUT}_T', \mathrm{OUT}_B' &: \phi_{B}, \theta_{By1},
\theta_{\mathrm{CZB}}, \theta_{Ty1}, \theta_{\mathrm{CZT}}\\
\mathrm{IN} \rightarrow \mathrm{OUT}_T', \mathrm{OUT}_B' &: \phi_\beta, \phi_{T}, \theta_{Tz1}, \theta_{Bz1}\\
\mathrm{IN} \rightarrow \mathrm{OUT}_T &: \theta_{\mathrm{CZC}}, \theta_{Ty2}, \theta_{Tz2}, \theta_{Tz3}\\
\mathrm{IN} \rightarrow \mathrm{OUT}_B &: \theta_{By2}, \theta_{Bz2}, \theta_{Bz3}.
\end{split}
\label{eq:calibration}
\end{equation}
We observed instabilities in the calibration data, due to changes in electrical contact resistance between our probe card and the on-chip gold pads. To mitigate this, we periodically recalibrated the on-chip parameters. Metallurgical wire-bonded contacts can prevent this in future. Low levels of thermal and common-ground crosstalk were observed but not compensated. Recent results suggest that crosstalk can be reduced through efficiency improvements, passive compensation methods, and by current driving of the thermal phase shifters~\cite{Harris:2014kz, Santagati2016, Paesani:2017ga}.
The offsets of the tomographic $z$-rotation phases ($\theta_{Tz3}$, $\theta_{Bz3}$) were left at zero, meaning that additional random (fixed) $z$ rotations were applied to each qubit before measurement. This choice was necessitated by the combined difficulty of: (1) calibrating the non-linear source phase with bright light, and (2) doing this for each setting of the gate, in the device's finite stability time.
\section{Results}
\begin{figure}[tbh!]
\centering
\includegraphics[width=\linewidth]{figure3}
\caption{Two-qubit state properties, direct from the source, as a function of the input state control phase, $\phi_\beta$. \textbf{a}~Balance between the $|00\rangle$ and the $|11\rangle$ components of the state, see equation \eqref{bal}. \textbf{b}~Schmidt number. \textbf{c}~CHSH parameter. Maximal entanglement occurs when the state is balanced, when $\phi_\beta=\pi/2$. Error bars were computed as one standard deviation of 200 trials around each tomographic measurement, each with a random sampling of Poisson photon noise. We assume a control phase uncertainty of $\pm\pi/50$.}
\label{figure3}
\end{figure}
\subsection*{Source performance}
One of the key metrics of a photon-pair source is its pair-generation efficiency~\cite{Savanier:2016kb}. This quantity is obtained from the photon-pair detection rate as a function of the input power, accounting for loss and detector efficiency. Inside the 1-nm-wide signal and idler spectral bands, we measured a brightness of $20\, \mathrm{kHz}/ \mathrm{mW}^2$.
The indistinguishability between photon-pair sources is also important. The contrast of the RHOM block's quantum interference fringes indicates the indistinguishability of the block's constituent photon-pair sources. We measured RHOM quantum interference fringes on each source by configuring the chip to maximise photon flux at the $\mathrm{OUT}_T'$ and $\mathrm{OUT}_B'$
outputs, then varying $\phi_T$ and $\phi_B$ to obtain the fringes of Figure \ref{figure2}. We pumped the bottom source via the auxiliary input $\mathrm{IN}'$, and the top source via $\mathrm{IN}$ and the state-control MZI, integrating each point for 5~s. We observed $C = 93.2 \pm 1.4\%$ and $72.9 \pm 0.8\%$ fringe contrasts, respectively, for the top and bottom sources. Here, $C = (N_{\mathrm{max}}-N_{\mathrm{min}}) / (N_{\mathrm{max}} + N_{\mathrm{min}})$, where $N_{\mathrm{max}}$ and $N_{\mathrm{min}}$ are the accidental-subtracted maximum and minimum fitted count rates. The reduced contrasts can be explained by deviations (from the ideal $\eta = 50\%$) in the input evanescent couplers of each RHOM structure; they are compatible with reflectivity values of $\eta \approx 43 \%$ and $\eta \approx 36 \%$ for the top and bottom sources, respectively.
\subsection*{Quantum logic}
\begin{figure}[h!]
\centering
\includegraphics[width=\linewidth]{figure4}
\caption{Reconstructed output states for various source and gate configurations. States \textbf{a,c,e}~are seeded by an entangled source state, while \textbf{b,d,f}~are seeded by a $|11\rangle$ source state. States \textbf{a,b}~bypass the gate; \textbf{c,d}~pass through the gate set to $\hat{I}$; and \textbf{e,f}~pass through the gate set to $\hat{\textsc{cz}}$, and include the phase information, below. State properties are compiled in Table~\ref{table:data}. Device configurations producing each set of states are shown at right.}\label{figure4}
\end{figure}
\begin{figure}[h!]
\centering
\includegraphics[width=\linewidth]{figure5}
\caption{Detail of phase entanglement, separability of states shown in Fig. Figure \ref{figure4}~e,f. Since the $\hat{\textsc{cz}}$ gate gate operates on phase, random, fixed, local $z$-rotations obscure the underlying performance. The connection between the measured and ideal states, via numerical optimisation of $\zeta_t$ and $\zeta_b$, is shown for \textbf{a} the gate-entangled, and \textbf{b} gate-disentangled states. In both cases the ideal density matrix magnitude is constant, $|\hat\rho_{i,j}| = 1/4$.}\label{figure5}
\end{figure}
We next quantified the device's control over entanglement. Quantum state tomography was used to extract the Purity ($P=\mathrm{Tr}(\hat\rho^2)$~\cite{Gamel:2012hm}), the CHSH parameter, a strict measurement of quantum correlations, and the Schmidt number, analogous to the number of pure states represented in a given density matrix. These last two metrics show how separable the state is. The CHSH inequality, $S(\hat\rho) \le 2$~\cite{Aspect:1982br, Horodecki:2009gb, Silverstone2015},
is violated when the state $\hat\rho$ cannot be represented by a local classical theory, indicating its entangled quantum nature. The Schmidt number, on the other hand, is an entanglement monotone and can give further evidence of the entangled or separable nature of $\hat\rho$~\cite{Terhal:2000gd, Horodecki:2009gb, Sperling:2011di}. CHSH parameter values were obtained by computationally selecting an optimal measurement set for each of the states under analysis~\cite{Silverstone2015}.
We analysed a wide set of separable and entangled quantum states produced by the two-qubit source. Fixing $\phi_T = \phi_B = \pi/2$, we varied the phase of the state control MZI, $\phi_\beta$, between $0$ and $\pi$ to prepare variably entangled states in the form of \eqref{equation1}. When $\beta = 0$ or $1$, separable states result, while when $\beta = 1/2$, a maximally entangled state is produced. States obtained directly from the source (bypassing the gate) showed good agreement with \eqref{equation1}. These were measured using the {$\mathrm{OUT}_T'$} and {$\mathrm{OUT}_B'$} auxiliary outputs (see {Figure \ref{figure1}b}). Measured and calculated variations of the balance, Schmidt number, and CHSH parameter are plotted in Figure \ref{figure3}, versus the state control parameter $\phi_\beta$.
In Figure \ref{figure4} we show a sample of density matrices arising from the main device configurations, and we list their properties (purity, Schmidt number, CHSH parameter, and fidelity with the ideal $z$-rotated state) in Table~\ref{table:data}. Errors were obtained from Monte-Carlo simulations, based on 200 samples of Poissonian photon noise and accompanying tomographic reconstructions~\cite{Roos:2004hm}. As expected, the $\hat{I}$-mode gate did not substantially affect the properties of the input states. The $\hat{\textsc{cz}}$-mode gate, however, acted to entangle separable states, and separate entangled states, though it also degraded the purity. The limited contrast in the quantum interference of the two RHOM sources contributed to this reduction, by occasionally depositing two photons into one `qubit'. Gate and tomography calibration errors likely also contributed.
Since the entangling gate operates on the input state's \emph{phase}, we must examine with care the phase of the output state, $\mathrm{arg}[\hat\rho]$. The intrinsic and uncalibrated $z$-rotations on each qubit result in complicated phase pictures (Figure \ref{figure4}e,f). {To compare these to their ideal counterparts, we computationally applied $\hat R_z(\zeta_t) \otimes \hat R_z(\zeta_b)$ to the reconstructed output state, and optimised the fidelity over local $z$-rotations via $\zeta_t$ and $\zeta_b$. The resulting fidelities are listed in {Table~\ref{table:data}} and the process is shown visually in Figure \ref{figure5}.}
\section*{Discussion}
We have presented a silicon-on-insulator quantum photonic device which embeds capabilities for the generation, manipulation, and analysis of two-qubit entangled states, by leveraging on-chip linear and non-linear optics. We showed how the device can prepare a variety of entangled and separable states, and operate on them using a switchable entangling gate. We demonstrated a new reconfigurable source of variably path-entangled non-degenerate photon pairs, using reversed Hong-Ou-Mandel quantum interference, and used on-chip quantum state tomography to measure its performance.
The integration of this source with a complex integrated linear optical network enabled both the entanglement and disentanglement of the on-chip generated quantum states.
Device performance was hindered by imperfect beam-splitters and high coupling losses, leading to issues with stability, and ultimately limiting the measurable purity and entanglement. However, the use of more advanced fibre couplers, such as those based on ultra-low loss gratings~\cite{Ding:2013hl}, together with adaptive methods, employing multiple imperfect MZIs for the realisation of a very high-quality one~\cite{Wilkes:2016ba}, can overcome these limitations, and enable high-performance, large-scale silicon photonic quantum devices in the near future.
\section*{Acknowledgements}
We thank Damien Bonneau, Jianwei Wang, and Dylan Mahler for valuable discussions and support. We are grateful to Alasdair Price for help with preliminary characterisation. We also thank the staff of the James Watt Nano-fabrication Centre in Glasgow. We acknowledge support from the European Union through the BBOI, and from the QUCHIP projects. M.G. Thompson acknowledges support from an Engineering and Physical Sciences Research Council (EPSRC, UK) Early Career Fellowship and from the European Research Council (ERC Grant Agreement number: 640079 QPE ERC-2014-ST). J.W. Silverstone acknowledges an EPSRC Doctoral Training Account, and a Natural Sciences and Engineering Research Council (Canada) Alexander Graham Bell Canada Graduate Scholarship. J.L.O'B. acknowledges a Royal Society Wolfson Merit Award and a Royal Academy of Engineering Chair in Emerging Technologies.
\section*{Author contributions statement}
R.S. and J.W.S. contributed equally to this work. They conceived and designed the device, performed the experiments, and analysed the data. M.J.S. and M. Sorel. fabricated the device. S.M., T.Y., M.F., M. Sasaki, and H.T. provided the superconducting detectors and M.G. Tanner, C.M.N., and R.H.H. built the detector system. M.G. Thompson supervised the work. All authors contributed to the manuscript.
|
1,314,259,994,977 | arxiv | \section{Introduction}
There is ample evidence that supports the
existence of dark matter (DM) and
dark energy (DE) in the universe \cite{Seljak}.
Due to its charge neutrality and dust-like equation of state (i.e.,
negligible pressure), dark matter starts to cluster
gravitationally very early in the history of the Universe,
and is crucial for the formation of large scale structure.
Dark energy, on the other hand, becomes relevant
only more recently, and is presumed to be a smooth
component with a negative equation of state in order
to fuel the accelerated expansion of the Universe \cite{Reviews}.
At the background level, dark energy (or any other fluid) is completely
determined by its equation of state $w=p_e/\rho_e$, where $p_e$ is the
pressure and $\rho_e$ is the energy density of dark energy.
Already at
this level dark energy can affect large scale
structures \cite{Linder05} -- see also \cite{LiberatoRosenfeld}
for the effect in different parameterizations of the equation
of state of dark energy.
However, if dark energy is in fact a manifestation of a dynamical
mechanism such as a scalar field, then it will also develop
inhomogeneities due to its gravitational interactions with itself and
with dark matter \cite{Cobleetal}.
In linear perturbation theory,
besides the energy density perturbation
$\delta\rho$, we need two extra degrees of freedom to
characterize cosmological perturbations:
the pressure perturbation $\delta p$ and the
scalar anisotropic stress $\pi$ \cite{Bardeen,KodamaSasaki}.
Alternatively, one can also use the velocity potential
$\theta=\vec\nabla \cdot \vec{v}$ and
the anisotropic stress \cite{Bertschinger}.
The inhomogeneities of dark energy are often quite small,
particularly in the case of ordinary (canonical) scalar
field models with almost
$\Lambda$-like behaviour -- that is, when $w \simeq -1$
\cite{Cobleetal,DuttaMaor,MotaShawSilk}.
In fact, as $w \rightarrow -1$ the perturbations in all dark
energy models are suppressed in relation to those of dark
matter. However, if that is not the case
then the dark energy density contrast
$\delta_e \equiv \delta\rho_e/\rho_e$ can be either
small or large, depending
on the pressure perturbations of dark energy \cite{US}.
Here we present further evidence of an intriguing possibility
which was first pointed out in supergravity-motivated scalar field
models of dark energy \cite{MotaBruck,NunesMota}: that dark energy can
mutate into a fluid with clustering properties similar to those of dark
matter. We will show that this effect is a generic feature of dark energy,
and that it has a simple origin: when
pressure perturbations are large, the effective equation of state
inside a collapsed region can be completely different from
the equation of state of its homogeneous component.
\section {Gravitational collapse with dark energy}
In the following, in order to specify the properties of the dark
energy perturbations
we will neglect the anisotropic stress. Moreover, we will
characterize the pressure perturbation in a simplified manner,
using the so-called effective (or non-adiabatic) sound velocity
\cite{Hu}, defined as $c_{eff}^2 \equiv \delta p_e/ \delta \rho_e$,
which we will assume to be a function of time only.
Notice that this assumption lacks a formal basis in perturbation
theory,
since $\delta p_e$ is a perturbed variable whose time and spatial
dependences can be, and often are, independent of the
variations of $\delta\rho_e$.
Nevertheless, in a particular gauge (the so-called ``rest frame''
of the fluid, where $T^i_0 = 0$), the effective sound speed
coincides with the phase velocity of linear relativistic
perturbations, $c_X^2$ \cite{Hu,Mukhanov}.
Describing the pressure perturbation as
$\delta p_e = c_{eff}^2 \, \delta\rho_e$
allows us to treat a wide variety
of dark energy models and, crucially, it also allows us to compute
non-linear structure formation using the Spherical Collapse
model (SC) \cite{GunnGott}. Furthermore, in this case
the SC equations (derived in a simplified
relativistic framework) are identical to the equations of
pseudo-Newtonian cosmology \cite{US2}. This means that the
physics of gravitational collapse of structures such as
galaxy clusters is well described within this framework.
Consider then, in the spirit of the SC
model, a spherically symmetric region of
constant dark energy overdensity (the so-called
``top-hat'' density profile.)
Let us call $\rho^c_e = \rho_e + \delta \rho_e$ and
$p^c_e = p_e + \delta p_e$ the energy density and
the pressure of this region, which are modified with
respect to the corresponding background quantitites,
$\rho_e$ and $p_e$ by the perturbations $\delta \rho_e$ and
$\delta p_e$.
The equation of state $w$ is defined as the ratio of
the total pressure to the total energy density, and hence
it will be different
for the background and the interior of the collapsed region. A simple
calculation shows that the equation of state
inside the collapsed region, $w^c$, is given by:
\begin{equation}
w^c = \frac{p_e + \delta p_e}{\rho_e + \delta\rho_e} =
w + (c_{eff}^2 - w) \frac{\delta_e}{1+ \delta_e} \; .
\label{deltaw}
\end{equation}
For small density contrasts $|\delta_e| \ll 1$, the equation of
state inside the overdense
region does not change appreciably. However,
if $c_{eff}^2 \not= w$, then in the nonlinear
regime, where $\delta \gtrsim 1$ (halos),
there could be a substantial modification in $w^c$ with respect
to the background equation of state.
Even in underdense regions (voids), where
$\delta \approx -1$, there could be large modifications of the
equation of state.
Hence, in principle dark energy could even
effectively mutate into dark matter inside halos and voids.
The above argument is completely general. What remains to be shown is whether
there are models in which
this dramatic situation is actually realized.
This requires a non-linear analysis of the evolution of pertubations for two
gravitationally coupled fluids, dark energy and dark matter
(we will neglect radiation and baryons in what follows).
Unfortunately, at present there are no totally rigorous methods
for performing this analysis -- except in the
case of canonical scalar fields, but even then only
approximately \cite{DuttaMaor,MotaShawSilk}.
Here we employ a generalization
of the SC model for the case of a relativistic fluid with pressure.
In the next section we present the
relevant equations and mention under which
conditions they are equivalent to a pseudo-Newtonian approach.
We will then analyse the evolution of the coupled system
of perturbations for a wide variety of dark energy models for
which the pressure perturbations are characterized by
some homogeneous effective sound speed.
\section{Non-linear evolution}
We define $H = \dot{a}/a$ and $h = \dot{r}/r$ as the expansion rates for the
background ($a$ is the scale factor) and for the
perturbed region ($r$ is the size of the collapsing region),
respectively.
In what follows we work within two assumptions, namely,
that there is no non-gravitational interaction between DE and DM,
and that the total energy of both DE and DM
contained in the collapsed region is constant.
The possibility of including DM-DE interactions in the study of
structure formation was studied in \cite{NunesMota,MotaManera} and
a discussion of possible outflow of energy from the collapsed region can be found in
\cite{MotaBruck,NunesSilvaAghanim}.
Using the continuity equations for the background and for the
perturbed region for a fluid species $j$:
\begin{equation}
\frac{ \dot{\rho}_j}{\rho_j} = - 3 H (1 + w_j) \quad , \quad
\frac{ \dot{\rho}_j^c}{\rho_j^c} = - 3 h (1 + w^c_j),
\end{equation}
we obtain, for the density contrasts:
\begin{equation}
\dot{\delta}_j = - 3 (1+\delta_j) \left[ h (1 + w^c_j) - H (1 + w_j) \right].
\end{equation}
Obviously, for matter we have $w_m=w^c_m=0$.
Using Eq. (\ref{deltaw}) and that the local expansion rate is
related to the velocity
field in the perturbed region by $h = H + \theta/3 a$,
we arrive at:
\begin{equation}
\dot{\delta}_j + 3 H (c_{j \, eff}^{2} - w_j) \delta_j
+ \frac{\theta}{a} \left[ (1 + w_j) + (1 + c_{j \, eff}^{2})
\delta_j \right]=0 \; .
\end{equation}
The equation that determines the evolution of $\theta$ comes from
the ``acceleration'' in the perturbed region:
\begin{equation}
\dot{\theta} + H \theta + \frac{\theta^2}{3 a} + \frac{3}{2} H^2 a \left
[ \Omega_m \delta_m + (1+3 c_{eff}^2) \Omega_{e} \delta_{e} \right] = 0.
\end{equation}
Notice that there is only one equation for the peculiar velocity,
even in the case of 2 fluids. This is clearly necessary, because
in the SC model it is a single
spherically symmetric region that detaches from the background,
with a peculiar expansion rate given by $h = H + \theta/3 a$.
When there is pressure and pressure gradients,
relativistic corrections almost
surely break this identity between the velocities of the fluids,
which means that the SC model with a top-hat profile
is inconsistent with a fully relativistic calculation.
The assumption that $c_{eff}^2$ is only time dependent
implies that $\delta p$ is also only time dependent -- which
ultimately guarantees the validity of the top hat SC model.
The SC equations capture
many features of the gravitational physics one expects to find at
the scale of the collapsed structures we see today. This is because
the exact same equations can be derived from a pseudo-Newtonian
treatment of perturbations
when gradients of pressure can be neglected -- as is the present
case, of a top hat profile.
When linearized, the ensuing equations correspond to the
linear equations from General Relativity in the case
$c_{eff}^2 = 0$ for sub-horizon scales \cite{Ribamar}.
Furthermore, even
when $c_{eff}^2 \not= 0$, although the equations are not
equivalent anymore, still the growing modes of the
linearized pseudo-Newtonian perturbations
are identical to the growing modes of the
linearized relativistic perturbations
\cite{US2}.
\section{Numerical solutions}
We solved the coupled differential equations
for $\delta_{e}$, $\delta_{m}$ and $\theta$, for a few
representative models of dark energy.
In our approximation, a dark energy model is determined by
its background equation of state and the effective speed of
sound.
In a previous paper we investigated the particular case
where the sound speed of dark energy is equal to
its equation of state, $c_{eff}^2 = w$,
and therefore $w^c = w$. In that case, one can see clearly
from Eq. (\ref{deltaw}) that there is no mutation \cite{US}.
In this letter we expand our previous analysis
to the following cases:
$c_{eff}^2 = 1$,
$c_{eff}^2 = 0$,
$c_{eff}^2 = -1$ and
$c_{eff}^2 = -w$.
The first case is motivated by the common situation when
dark energy is modelled by a canonical scalar field,
since in the gauge corresponding
to the rest frame of the scalar field the effective sound speed is
$c_{eff}^2 = 1$ \cite{Hu}.
The second case (null pressure perturbations) can occur in
so-called ``silent quartessence'' models \cite{silent}.
The third case represents a perturbation with behaviour close to a cosmological constant
and the last case reproduces the so-called
generalized Chaplygin gas in a certain limit ($\alpha = 1$) \cite{luca}.
\begin{figure}
\vspace{0.5cm}
\includegraphics[width=9cm]{wc08}%
\caption{Clustered dark energy equation of state for
different cases of effective
speed of sound for background $w = -0.8$:
$c_{eff}^2 = w$ (solid line), $c_{eff}^2 = 1$ (dashed line),
$c_{eff}^2 = 0$ (dot-dashed line),
$c_{eff}^2 = -w$ (dotted line) and
$c_{eff}^2 = -1$ (double dot-dashed line).
The instant of turnaround ($h=0$) is $z=0.3$ for $c_{eff}^2 = w$, $z=0.5$ for both
$c_{eff}^2 = -w$ and $c_{eff}^2 = 1$, and $z=0.6$ for $c_{eff}^2 = 0$ (for $c_{eff}^2 = -1$
there is no turnaround.)
\label{wc08} }
\end{figure}
\begin{figure}
\vspace{0.5cm}
\includegraphics[width=9cm]{wc099}%
\caption{Clustered dark energy equation of state
for different cases of effective
speed of sound for background $w = -0.99$. Lines are the same as in Figure 1.
The cases of $c_{eff}^2 = w$ and $c_{eff}^2 = -1$, as well as
$c_{eff}^2 = 1$ and $c_{eff}^2 = -w$,
lie on top of each other.
Turnaround occurs at approximately $z\simeq0.6$ in all cases.
\label{wc099} }
\end{figure}
We use adiabatic initial conditions for the perturbations
[$\delta_{e}^i = (1 + w) \delta_{m}^i$] at a redshift $z=1000$,
an initial velocity field coincident with the Hubble flow ($\theta^i = 0$),
$H_0 = 72$ km s$^{-1}$ Mpc$^{-1}$, $\Omega_m = 0.25$ and
$\Omega_{e} = 0.75$. In the examples shown below
we fixed the background equation of state of dark energy
at $w = -0.8$.
In Fig. 1 we show the values of the clustered equation
of state $w_c$ as a function of the redshift.
The initial condition $\delta_{m}^i$ was chosen such that the dark energy
perturbation $\delta_{e} \sim {\cal{O}}(1)$ today.
The perturbation in dark matter is typically one to two
orders of magnitude larger, which is consistent with the
typical density contrast in galaxy clusters, for which
$\delta_m \sim {\cal{O}}(10^3)$.
As discussed above, for the case $c_{eff}^2 = w$ we obtain no
mutation in the equation of state in the
perturbed region.
However, substantial modifications are found for other possibilities of
$c_{eff}^2$. The largest modifications arise for
$c_{eff}^2 = 1$, which in our example is very similar to $c_{eff}^2 = -w$.
In these two cases, even a complete
metamorphosis of dark energy into a fluid which
clusters as strongly as dark matter
is possible at recent epochs, due to the large perturbations inside
the collapsed region.
The behaviour for $c_{eff}^2 = -1$ is also easily understood from
Eq. (\ref{deltaw}), since this is the only case where $c_{eff}^2 - w <1$.
As expected, the effect of mutation is greatly reduced for a background
equation of state close to that of a cosmological constant,
independent of $c_{eff}^2$.
We illustrate that fact in Fig. 2, where we show the equation of state
inside the collapsing region
for different models of clustered dark energy, in the case of a background
equation of state $w= -0.99$, and with the same initial conditions
that were used in Fig. 1. Nevertheless, even in this case
large density contrasts can still arise in the dark energy component,
which lead to the mutation of dark energy.
\section{Conclusions}
We have shown that it is possible to change radically
the clustering properties of dark energy in
collapsed regions (halos and voids.)
We exemplified this behaviour with a few models for the
dark energy perturbations, and showed that it happens
not only in scalar field models, but also in generic
models of dark energy -- in particular the Generalized
Chaplygin Gas and Silent Quartessence models.
Since the physics of most observed collapsed structures, such as
galaxy clusters, is well approximated by
quasi-Newtonian physics, this dynamical mutation should be
a general phenomenom.
Clearly, this is a crucial issue for all attempts to
compute the influence of dark energy on the formation of
large scale structures.
More detailed studies, including a relativistic approach and
using different realistic parameterizations of the dark energy
equation of state are currently under way
\cite{US2}.
\section*{Acknowledgments}
We would like to thank
Ioav Waga for many fruitful discussions.
This work has been supported by
FAPESP grants 04/13668-0 (L.R.A. and R.R.) and 05/00554-0 (R.C.B.),
a CNPq grant 309158/2006-0 (R.R.) and a CAPES grant (L.L.).
|
1,314,259,994,978 | arxiv | \section{Introduction}
Synchronisation is an ubiquitous phenomenon observed in many complex systems across spatial and temporal scales~\cite{Arenas:2008ku}, from the firing patterns of neurons and the communication of fireflies, to the flow of traffic~\cite{Petri:2013bz,OKeefe:1971bj,Hafting:2005dp}.
One of the most popular dynamical systems, capable of reproducing a wide range of observed synchronisation behaviours, is the Kuramoto model of coupled oscillators
~\cite{kuramoto1975self,acebron2005kuramoto,rodrigues2016kuramoto}.
Whilst the model was originally formulated in terms of all-to-all interacting oscillators, the interactions between oscillators are commonly considered inhomogeneous and represented with a graph, whose structure affects the resulting dynamics.
For example, while the full synchronisation of the oscillator population is usually a strong attractor for the dynamics irrespective of the underlying graph ~\cite{Arenas:2008ku,rodrigues2016kuramoto}, the transient dynamics on the path towards synchronisation can reveal the modular structure of the oscillators' interactions~\cite{Arenas:2006ba}.
Beyond the structure of oscillator interactions, other variations of the Kuramoto model have been studied extensively, including: time-delayed interactions~\cite{yeung1999time,Hellyer:2015ci}, oriented or signed interactions~\cite{hong2011kuramoto,delabays2019kuramoto}, time-varying parameters, stochasticity, and more, see~\cite{Arenas:2008ku} for a comprehensive review.
Of particular interest for this study, the introduction of a frustration parameter~\cite{sakaguchi1986soluble} in the nonlinear term of the Kuramoto model, then known as the Sakaguchi-Kuramoto morel, can produce rich dynamics~\cite{Abrams:2004hq,Shanahan:2010go,omel2012nonuniversal,nicosia2013remote} and appears in many applications~\cite{wiesenfeld1996synchronization,filatrella2008analysis}.
Recently, the study of higher-order interactions between elements of a system, that is, models with interactions involving more than two nodes, has garnered momentum and interest~\cite{Battiston:2020kp}.
Higher-order interactions are typically represented with hypergraphs or simplicial complexes, both of which generalize the graph representation of pairwise interactions to instead encode three-, four- and higher-way interactions.
Naturally, extensions of well known dynamical systems have been proposed to investigate the effect of higher order interactions on their behavior~\cite{iacopini2019simplicial,Carletti:2020ux,schaub2020random, millan2020explosive}.
The Kuramoto model --being a paradigmatic model for synchronization phenomena-- is no exception.
In this case, however, there are two main avenues to extend classical oscillator models to higher-order.
The first approach maintains the usual setup of phases defined on nodes of a systems and upgrades the interactions to the polyadic case, e.g. using simplicial complexes as the underlying connectivity structure.
Recent works investigated variations of this node Kuramoto model with higher-order interactions introducing various types of coupling terms~\cite{Skardal:2019ik,Skardal:2020fl}.
These models display a rich variety of synchronization and desynchronization phenomena, as well as multi-stable behavior.
The second approach instead promotes phase variables from nodes to higher-order simplices, thus defining phases for edges, triangles, and all higher-order interactions, coupled by boundary operators as generalized incidence matrices.
Pioneering work in this direction, \cite{millan2020explosive} showed that the edge dynamics projected onto the nodes and faces possesses explosive synchronization properties when specific nonlinear and non-local couplings are introduced between the two projections. More recently, also a version of the same model with local coupling between orders was introduced~\cite{calmon2021topological}.
In this paper, we extend this latter \textit{simplicial} Kuramoto model~\cite{millan2020explosive} to include: i) weights on any simplices with a precise mathematical formulation based on discrete differential geometry; and, more importantly, ii) linear and nonlinear frustrations.
We will refer to the former --\textit{linear}-- frustrations as natural frequencies yielding non-fully synchronized stationary states, and to the latter --\textit{non-linear}-- as the higher-order generalization of the Sakaguchi-Kuramoto model~\cite{sakaguchi1986soluble}.
The difficulty of introducing proper nonlinear frustration comes from the orientation of the simplices which make, even a naive frustration, orientation dependent.
Here, inspired by previous work on higher-order random walks~\cite{schaub2020random}, we lift the simplices to double their numbers with opposite signs, obtaining an equivalent formulation without frustration and an orientation independent frustration.
We then study the resulting frustrated simplicial Kuramoto on edges with numerical simulations using several measures to quantify the type of dynamics, such as Hodge decomposition, the order parameter and the largest Lyapunov exponent.
\section{Theory}
\subsection{Simplicial complexes and Hodge Laplacian}\label{sec:sc_hl}
The central elements of the mathematical formulation of the Kuramoto model on simplicial complexes are the boundary operators and the related Hodge Laplacians, which are, respectively, generalizations to higher order structures of the graph incidence matrices and of the Laplacian operator.
We briefly review the main concepts we will use in our work following~\cite{grady2010discrete}, see Appendix~\ref{discrete_geo} for more details.
A $k$-simplex is defined by a set of $k+1$ nodes (a 1-simplex is an edge, a 2-simplex is a triangle, etc.).
A simplicial complex is defined as a set of simplices in which every face of a simplex is also a simplex.
For our purposes, the relevant connectivity between $k-$simplices will be that induced by sharing a $(k-1)$-simplex as a face, e.g. triangles sharing an edge, or by being faces of a $(k+1)$-simplex, e.g. edges belonging to the same triangle.
A $k$-chain within a simplicial complex is a linear combination of $k$-simplices.
We denote by $n_k$ the number of $k$-simplices of a complex, which is also the dimension of the $k$-chains and $k$-cochains vector spaces, dual to $k$-chains.
The coboundary operator $N_k$ and its dual $N_k^*$ on a simplicial complex are defined using the generalized incidence matrices $B_k^T\in M^{n_{k}\times n_{k+1}}$ which encode the topology of a simplicial complex, and the weight matrices $W_k$, which are diagonal matrices of the $k$-simplices weights
\begin{align}
N_k = B_k\, , \qquad N_k^* = W_{k} B_k^T W^{-1}_{k+1}\, .
\end{align}
Both act on $k$-cochains, defined as linear functional on the space of $k$-chains, see Appendix~\ref{discrete_geo}.
The Hodge Laplacian of order $k$ can then be written as
\begin{align}
L_k &= L_k^{down}+L_k^{up} \label{eq:up_down_Lk} \\
&:=N_{k-1} N_{k-1}^* + N_k^* N_k\, . \label{eq:weighted_Lk}
\end{align}
For $k=0$, $W_0 = I$ and $W_1 = I$, we obtain the graph Laplacian $L_0=D-A$ with $A$ the simplicial complex 1-skeleton, namely the graph node adjacency matrix, and $D$ the diagonal matrix of the nodes degree. The choice $W_0 = D^{-1}$ defines the normalized graph Laplacian $L_0^{norm} = I - D^{-1}A$.
The graph Laplacian $L_0$ can produce two types of dynamics.
When acting on the left of a distribution $f$, it yields the consensus dynamics $\dot f = L_0 f$ for any choice of $W_1$ while by acting on the right, it corresponds to the diffusion dynamics $\dot p = pL_0$.
Equally, both types of dynamics also exist for the edge Laplacian $L_1$~\cite{muhammad2006control,schaub2020random}, defined as
\begin{align}
L_1 = B_0 W_0 B_0^T W^{-1}_1 + W_1 B_1^T W_2^{-1} B_1 \, .
\label{eq:weighted_L1}
\end{align}
We refer to Appendix~\ref{diffusion_kuramoto} for the diffusion formulation of the weighted simplicial Kuramoto model, and we will use the standard approach of consensus formulation in the rest of this paper.
\subsection{Simplicial Kuramoto model}\label{sec:simplicial_kuramoto}
The Kuramoto model~\cite{kuramoto1975self} is typically formulated for a node phase dynamical variable $\theta \in \mathbb R^{n_0}$, with natural frequencies $\omega = (\omega_1, \ldots, \omega_{n_0}) \in \mathbb R^{n_0}$ that are sitting on the nodes of a graph $G = (V, E)$ ($|V|=n_0$, $|E|=n_1$) and interact through the graph adjacency matrix $A_{ij}\in \mathbb R^{n_0\times n_0}$
\begin{align}
\dot \theta_i = \omega_i - \sigma \sum_j A_{ij} \sin(\theta_i - \theta_j)\, .
\label{eq:kuramoto}
\end{align}
For simplicity, we will consider a unit coupling $\sigma=1$ throughout the remainder of this paper and will thus omit it from here on.
The unweighted node Kuramoto model can be equivalently formulated in vector form using the $n_0 \times n_{1}$ incidence matrix $B_0^T$~\cite{jadbabaie2004stability}:
\begin{align}
\dot \theta = \omega - B_0^T\sin(B_0\theta)\, ,
\label{eq:incidence_kuramoto}
\end{align}
which is approximated by the Laplacian dynamics $\dot \theta = \omega - B_0^TB_0\theta = \omega - L_0\theta$ in the limit $B_0\theta\ll 1$.
When $\omega_i=\omega$ for all $i$, it is customary to study the model in a frame rotating at $\omega t$ and thus ignore the internal frequencies, yielding $\dot \theta = L_0\theta$.
The Kuramoto model is therefore a nonlinear extension of the consensus dynamics introduced in Section~\ref{sec:sc_hl}.
The weighted simplicial Kuramoto model is then given for a time-dependent $k$-cochain $\theta^{(k)}$, see~\cite{millan2020explosive} for an unweighted version, as
\begin{align}
\dot \theta^{(k)} = -N_{k-1} \mathrm{sin}\left (N_{k-1}^*\theta^{(k)}\right) - N_k^* \mathrm{sin}\left(N_k\theta^{(k)}\right)\, ,
\label{eq:simplicial_kuramoto}
\end{align}
or equally with the weight and incidence matrices
\begin{align}
\dot \theta^{(k)} &= -B_{k-1} \mathrm{sin}\left (W_{k-1} B_{k-1}^T W^{-1}_k\theta^{(k)}\right)\nonumber \\
&\qquad - W_k B_k^T W_{k+1}^{-1} \mathrm{sin}\left (B_k\theta^{(k)}\right ) \,.\label{eq:weighted_simplicial_kuramoto}
\end{align}
We emphasize that the positions of the weight matrices are not arbitrary but constrained by the geometrical nature of the coboundary operators, see Appendix~\ref{discrete_geo}.
In the limit where $\theta$ is close to the subspace $\mathrm{Ker}(L_k)$, we recover the linear consensus dynamics $\dot\theta^{(k)}=L_k\theta^{(k)}$.
For $k=0$ and a connected graph, the kernel subspace consists of a constant vector, or full synchronization.
Similarly to the node Kuramoto, the internal frequencies of the oscillators can be introduced via a change of rotating frame $\theta^{(k)} \to \theta^{(k)} - h^{(k)} t$ for any vector $h^{(k)} \in \mathrm{ker}(L_k)$.
Indeed, such a vector will leave invariant the nonlinear terms, due to the presence of the boundary operator, and thus only adds a constant drift to the phases.
Again, if we consider $k=0$ and a connected graph, the kernel of $L_0$ is the constant vector, corresponding to the stationary state of consensus dynamics, and thus, by extension, the node Kuramoto model in full synchronization.
For higher-order Kuramoto models $k>0$, the dimension of the $\mathrm{ker}(L_k)$ corresponds to the number of $k$-dimensional holes, i.e. holes bounded by $k$-simplices, or --equivalently-- to the Betti number $\beta_k$ of the simplicial complex.
For $k=0$, the Betti number $\beta_0$ corresponds to the number of connected components, and the stationary states are given by the piece-wise constant vectors to which each component will synchronize.
\subsection{Simplicial Sakaguchi-Kuramoto model}\label{sec:frustrated_kuramoto}
The frustration in the Kuramoto model was first introduced in the Kuramoto–Sakaguchi model~\cite{sakaguchi1986soluble}, and has been studied in the context of graph theory, where the graph topology can give rise to rich repertoires of stationary states such as chimera states ~\cite{Abrams:2004hq,Shanahan:2010go} and remote synchronization~\cite{nicosia2013remote}.
The frustrated node Kuramoto model is usually written as
\begin{align}
\dot \theta_i = \omega_i - \sum_j A_{ij} \sin(\theta_i - \theta_j + \alpha_{ij})\, ,
\label{eq:frustrated_kuramoto}
\end{align}
where $\alpha \in \mathbb R^{n_1}$ is the edge frustration vector, often taken to be constant $\alpha_{ij} = \alpha_1$.
This equation cannot be directly formulated using the incidence matrices because the relative sign between the difference of phases $\theta_i-\theta_j$ and $\alpha_{ij}$ must be independent from the orientation of edges.
In the adjacency matrix formulation, the orientation of edges is `hidden', because $L_0=B_0^TB_0$ and $A=D-L_0$ are independent of edge orientation, and the choice of ordering $\theta_i-\theta_j$, instead of $\theta_j-\theta_i$, is possible irrespective of the edge orientation.
If one writes $B_0^T \sin(B_0\theta + \alpha_1)$, the resulting order in the difference of phases depends on the choice of edge orientation and will not be `node-centered', i.e. the $\theta_i$ term will not always appear in front.
Nevertheless, it is possible to introduce a frustration in the general formulation of the Kuramoto model~\eqref{eq:simplicial_kuramoto} with coboundary operators such that it reduces to the frustrated Kuramoto~\eqref{eq:frustrated_kuramoto} for $k=0$ and remains orientation invariant for $k+1$ simplices.
Our construction uses two ingredients: i) lift matrices~\cite{schaub2020random}, defined as
\begin{align}
V_k =
\begin{pmatrix}
I_{n_k}\\
-I_{n_k}
\end{pmatrix}\, ,
\end{align}
for any order $k$, and ii) the projection onto the positive and negative entries of any matrix $X$, defined as
\begin{align}
X^\pm = \frac12 \big(X \pm \left |X\right |\big )\, .
\label{pm-projection}
\end{align}
The lift matrices create duplicates of simplices of order $k$ with an orientation opposite to the original one, whilst the projection sets half of the doubled simplices to zero, i.e. removes them, based on their signs.
One can define the lift of the coboundary operator as
\begin{align}
N_k \to V_{k+1} N_k V_k^T\, .
\label{N_lift}
\end{align}
The projection to positive or negative entries is often used to define directed node graph Laplacians~\cite{grady2010discrete,chapman2015advection} by transforming the edge orientation to an edge direction with either
\begin{align}
L_{0,\mathrm{out}} = N_0^* N_0^+\, , \quad \mathrm{or} \quad L_{0, \mathrm{in}} = (N_0^-)^* N_0\, .
\end{align}
With $D_\mathrm{out/in}$ the diagonal matrices of out- or in- degrees and $A_\mathrm{dir}$ the corresponding directed adjacency matrix, $L_{0,out}$ models the directed diffusion dynamics written explicitly as $L_{0,\mathrm{out}} = D_\mathrm{out} - A_\mathrm{dir}$ and $L_{0,in}$ corresponds to the directed consensus dynamics with $L_{0,\mathrm{in}} = D_\mathrm{in} - A_\mathrm{dir}$.
As we have seen in the construction of the weighted simplicial Kuramoto model~\eqref{eq:simplicial_kuramoto}, we are using the formulation that yields consensus dynamics. We will thus consider the associated projection onto the negative entries of the lifted simplicial Laplacian as
\begin{align}
\widehat L_k = N_{k-1}^-N_{k-1}^* + (N_k^*V_{k+1}^T)^-V_{k+1}N_k\, .
\label{laplacian-hat}
\end{align}
First, we note that $\widehat L_k=L_k$, see Appendix~\ref{lift_proj}, thus the application of the lift and the projection has a trivial effect on the Hodge Laplacian, but crucially it allows us to introduce the frustration via the linear frustration operator
\begin{align}
\mathcal F_k^{\alpha_k}(N_k): x \mapsto N_k x + \alpha_k\, ,
\end{align}
acting on any cochain $x$ and arbitrary frustration cochain $\alpha_k$.
We can now formulate the frustrated simplicial Kuramoto model as
\begin{align}
\dot \theta^{(k)} &= - \mathcal F_k^{\alpha_k}(N_{k-1}) \left[\mathrm{sin}\left (N_{k-1}^*\theta^{(k)}\right)\right] \nonumber \\
&\qquad - (N_k^*V_{k+1}^T)^- \mathrm{sin}\left(\mathcal F_{k+1}^{\alpha_{k+1}}( V_{k+1}N_k)\left[\theta^{(k)}\right]\right)\\
&=-\alpha_k - N_{k-1}\mathrm{sin}\left ( N_{k-1}^* \theta^{(k)}\right) \nonumber \\
&\qquad - (N_k^*V_{k+1})^- \mathrm{sin}\left (V_{k+1} N_k \theta^{(k)} + \alpha_{k+1}\right)\, .
\label{frustrated_simplicial_kuramoto}
\end{align}
By construction, our formulation is independent of the orientation of the $k+1$-simplices but not of the $k$-simplices because only the action of the $k+1$ lift is non trivial as it acts inside the nonlinear part of the equation, see Appendix~\ref{lift_proj}.
From this point of view, $\alpha_k$ is a linear frustration, whilst $\alpha_{k+1}$ is a nonlinear frustration.
Like in \ref{sec:simplicial_kuramoto} where the internal frequencies are all equal, $\alpha_k$ can be an arbitrary vector not necessarily in $\mathrm{ker}(L_k)$, which corresponds to equal internal frequencies in the node Kuramoto.
This can lead to a variety of dynamics, including partially synchronized dynamics or even non-stationary dynamics if its amplitude is large enough~\cite{millan2020explosive}.
For $k=0$, we recover the frustrated node Kuramoto model~\eqref{eq:frustrated_kuramoto} as
\begin{align}
\dot \theta^{(0)} = -\alpha_0 - (N_0^* V_1^T)^- \mathrm{sin}\left (V_1 N_0 \theta^{(0)} + \alpha_1\right)\, ,
\end{align}
where the natural frequencies vector $\alpha_0$ naturally appears from the frustration operator acting on the down term, whilst the rest vanishes with $N_{-1}=0$.
The case where $k=1$ constitutes the main equation we consider in the rest of this paper, namely the frustrated edge simplicial Kuramoto model
\begin{align}
\dot \theta^{(1)} &= -\alpha_1 - N_0\mathrm{sin}\left ( N_0^* \theta^{(1)}\right) \nonumber \\
&- (N_1^*V_2)^- \mathrm{sin}\left (V_2 N_1 \theta^{(1)} + \alpha_2\right)\, ,
\label{eq:edge_frustrated_kuramoto}
\end{align}
which is invariant under change of face orientations, but not under change of edge orientation.
\subsection{Hodge decomposition of the dynamics}\label{sec:hodge_decomposition}
The Hodge decomposition is an important tool to study the properties of simplicial complexes.
Here, we use it to decompose the dynamics of the oscillators on the simplicial Kuramoto model to understand their properties in relation to the amount of frustration applied.
The Hodge decomposition theorem states that the space of $k$-cochains can be decomposed into three orthogonal spaces~\cite{Eckmann:1945,jiang2011statistical}
\begin{align}
C^{(k)}=\mathrm{Im}(N_{k-1})\oplus \ker(L_k)\oplus \mathrm{Im}(N_k^*)\, ,
\end{align}
which can be seen as analogues to the gradient, harmonic and curl space respectively. When $k=1$ the three orthogonal spaces are exactly the gradient, harmonic and curl space respectively.
Any $k$-cochain $\theta^{(k)}$ can thus be projected onto each subspace $\theta^{(k)} = \theta_\mathrm{grad}^{(k)} + \theta_\mathrm{harm}^{(k)} + \theta_\mathrm{curl}^{(k)}$ as follow
\begin{align}
\begin{split}
\theta_\mathrm{grad}^{(k)} &= N_k \theta^{(k-1)} \\
L_k\theta_\mathrm{harm}^{(k)} &= 0\\
\theta_\mathrm{curl}^{(k)} &= N_{k+1}^* \theta^{(k+1)}\, .
\end{split}
\end{align}
where $\theta^{(k-1)}$ and $\theta^{(k+1)}$ are the corresponding potentials.
Here, instead of computing these potentials, as done for example in~\cite{millan2020explosive}, we project the $k$-cochain $\theta^{(k)}$ onto each subspace using the projection operators
\begin{align}
\begin{split}
P_\mathrm{grad} &= p_{\mathrm{grad}}^Tp_{\mathrm{grad}}\\
P_\mathrm{curl} &= p_{\mathrm{curl}}^Tp_{\mathrm{curl}}\\
P_\mathrm{harm} &= p_{\mathrm{harm}}^T p_{\mathrm{harm}}\, ,
\end{split}
\label{hodge_projections}
\end{align}
where the matrices $p_\mathrm{grad}$ and $p_\mathrm{curl}$ are the orthonormal bases of the ranges of $N_K$ and $N_{k+1}^*$ and $p_\mathrm{harm}$ the orthonormal basis of the kernel of $L_k$.
\subsection{Simplicial order parameter}\label{sec:SOP}
Probably the most popular and fundamental tool to measure the level of synchronization in a coupled dynamical system is the order parameter. It is usually defined as
\begin{align}
R_{0, c}^2(\theta) :=\frac{1}{n_0} \left | \sum_{i=1}^{n_0}
\exp\left( j\theta_i\right) \right |^2\, ,
\label{eq:node_OP}
\end{align}
where $j=\sqrt{-1}$, and was introduced for the original Kuramoto model on a complete graph, i.e. where all oscillators are coupled.
The generalization of the order parameter to any graph structure~\cite{jadbabaie2004stability} can be expressed as
\begin{align}
R_{0,g}^2(\theta) := 1 + \frac{2}{n_0^2} 1_{n_1}\cdot( \cos(N_1^* \theta) -1 )\, ,
\label{order_naive}
\end{align}
where $1_{n_k}$ is the unit vector of dimension $n_k$, see Appendix~\ref{simplicial_order} for the details.
This formulation allows one to write the node Kuramoto model with uniform natural frequencies as a gradient flow of the form
\begin{align}
\dot \theta = -\frac12 n_0^2 \nabla_\theta R_0^2(\theta)\, .
\end{align}
Notice that only the cosine term is needed to express the gradient flow, while the other constant terms are needed for the normalization.
For a simpler derivation, we will thus modify the normalization to define the simplicial order parameter (SOP) as
\begin{align}
R_k^2(\theta^{(k)}) = \frac{1}{C_k}\Big( 1_{n_{k-1}} \cdot W_{k-1}^{-1} \cos(N_{k-1}^*\theta^{(k)})\Big. \nonumber \\
\Big.+1_{n_{k+1}} \cdot W_{k+1}^{-1} \cos(N_k\theta^{(k)})\Big)\, ,
\label{order_general}
\end{align}
where the normalization is $C_k=1_{n_{k-1}}\cdot W_{k-1}^{-1}1_{n_{k-1}} + 1_{n_{k+1}}\cdot W_{k+1}^{-1}1_{n_{k+1}}$ which corresponds to the weighted sum of nodes and faces of the simplex, or the combined number of nodes and faces for unweighted simplicial complexes, see Appendix~\ref{simplicial_order} for details.
As expected, $R_k = 1$ if $\theta^{(k)}$ is in the harmonic space, which corresponds to full synchronization.
Notice that for $k>1$, the harmonic space is in general not spanned by the constant vector, and full synchronization does not correspond to equal $\theta_j$ values on the $k$-simplices.
The simplicial order parameter generalizes the notion of full synchronization to the instantaneous phase vector to be in the harmonic space, where the phases are in general not equal, except in the node Kuramoto case.
This type of harmonic synchronization is therefore akin to a \textit{simplicial} phase locking, in which each higher-order phase evolves with a different proper frequency but overall the whole dynamics lives within the harmonic space, i.e. $\mathrm{ker}(L_k)$ for the corresponding $k$.
In addition, if the dimension of the harmonic space is larger than one, the fully synchronized state is in fact a linear combination of the basis vectors of the harmonic space.
For $\alpha_2=0$ and if $\alpha_1$ is harmonic, the particular linear combinations will be dictated by the choice of $\alpha_1$, or, if absent, by the choice of initial conditions. Thus our formulation extends the notion of full synchronization beyond constant phases to include a generalized harmonic phase lock.
As in the node Kuramoto case, this order parameter acts as a potential for the gradient flow formulation of the full $k$-order Kuramoto dynamics as
\begin{align}
\dot \theta^{(k)} = -C_k W_k\nabla_{\theta^{(k)}} R_k^2(\theta^{(k)})\, .
\end{align}
Note that this formulation does not contain the harmonic natural frequencies which can be recovered, as before, via a change of rotating frame.
Finally, we notice that in the case of the standard node Kuramoto, this measure corresponds to the weighted generalization of~\eqref{order_naive} with a different normalization factor
\begin{align}
\mathcal R_0^2(\theta^{(0)}) = \frac{1}{C_0} 1_{n_1} \cdot W_1^{-1} \cos(N_1^*\theta^{(0)})\, ,
\label{node_order}
\end{align}
where $C_0 =\sum_{i=0}^{n_1} (W_1^{-1})_{ii}$ is the weighted sum of edges, or for an unweighted graph, the number of edges.
\section{Examples}
\subsection{Frustrated simplicial Kuramoto model on a face}\label{sec:example_1}
\begin{figure*}[htbp]
\centering
\includegraphics[width=0.9\textwidth]{figure_1.png}
\caption{This figure illustrates the effect of frustration in a simplicial complex composed of a single face and with the orientation of an edge reversed (panel {\bf d}).
We scan a range of values for both frustration parameters $\alpha_1$ and $\alpha_2$
Panel {\bf a} shows the average value of the simplicial order parameter defined in~\eqref{order_general} in the stationary regime of the solution.
Panels {\bf b} and {\bf c} respectively show the slope of the time evolution of the gradient and curl projection of the dynamics.
The regions in white show where the projections are time independent, i.e. constant.
In panel {\bf e} two typical stationary trajectories of the dynamics are shown in the regime with, panel {\bf e(i)}, and without, panel {\bf e(ii)}, a stationary curl. The frustration parameter values are indicated by the magenta and green dots respectively in panels {\bf a,b,c}.
The circle markers on the trajectories are equally spaced in time along one cycle.
Panel {\bf f} illustrates the sharp transition between vanishing and non vanishing slope of the projection of the gradient in {\bf f(ii)} and the resulting change of the trajectories in {\bf f(i)}. The frustration parameter values are shown by rounds of corresponding colors in panels {\bf a,b,c}.
Notice that both curl projections overlap across the transition in {\bf f(ii)}.
}
\label{fig:figure_1}
\end{figure*}
To showcase the properties of the frustrated simplicial Kuramoto model, we begin with one of the simplest examples: the single face complex of a triangle graph.
The single face triangle complex has no hole and thus the harmonic space of the Hodge Laplacian is of dimension zero. Therefore, to be in the full synchronization regime, defined as $\theta_i=\theta_j\ \forall i,j$ in the absence of harmonic space, one would expect that that state is only accessible for the non-frustrated model with $\alpha_1=\alpha_2=0$ in~\eqref{eq:edge_frustrated_kuramoto}. We will show it is not the case and the dynamics can still reach full-synchronization.
For simplicity, and without loss of generality, we will use $\alpha_1$ as a constant vector on all three edges with amplitude denoted as $\alpha_1$.
In addition, whilst the model is invariant to face orientation, it is not invariant to edge orientation. We thus have two non-equivalent choices for edge orientation: (i) a fully oriented complex, or (ii) one edge oriented in the opposite direction, as shown in Fig.~\ref{fig:figure_1}(d).
In (i) the fully oriented complex, all edges are equivalent and the frustrated simplicial Kuramoto model reduces to the scalar equation
\begin{align*}
\dot \theta = -\alpha_1 - \sin(3\theta + \alpha_2)\, .
\end{align*}
If $|\alpha_1| < 1$, any initial condition will converge, as time $\rightarrow\infty$ to full synchronisation with phase $\theta_\infty =\frac13 \left( \sin^{-1}(-\alpha_1) -\alpha_2\right)$ in the stationary state.
Otherwise, the stationary solution will be periodic around a linearly increasing trend.
In (ii), the case of a flipped edge orientation, only two edges are equivalent, yielding the following coupled differential equations
\begin{align*}
\dot \theta_1 &= -\alpha_1 - \sin(-\theta_1 + \theta_2) - \sin(2\theta_1 - \theta_2 + \alpha_2)\\
\dot \theta_2 &= - \alpha_1 + 2\sin(-\theta_1 + \theta_2) - \sin(2\theta_1 - \theta_2 + \alpha_2)\, .
\end{align*}
We solve these equations numerically for values of $\alpha_1\in[0, 2.5]$ and $\alpha_2\in \left [0, \frac{\pi}{2}\right]$ and show in Fig.~\ref{fig:figure_1}(a-c) three measures that help us characterize the ensuing dynamics.
In Fig.~\ref{fig:figure_1}(a), we plot the simplicial order parameter ~\eqref{order_general} where we observe a full synchronization regime, $R^2_1(\theta^{(1)})=1$, for the non frustrated case with $\alpha_1=0,\ \alpha_2=0$, but also for a large region of the $\alpha_1$ and $\alpha_2$ parameter space.
To understand this regime further in term of the Hodge decomposition of the stationary state, we show in Fig.~\ref{fig:figure_1}(b-c) the slope of the temporal evolution of the projection of the solution onto the gradient and the curl subspaces once a stationary regime is reached. In white, we plot the regions where the projection is constant to distinguish with those cases where the slope is equal to zero but the solution is non-constant, in dark blue.
We highlight some important observations from these plots.
First, the region where the gradient component of the solution is non-constant matches with the region where we observe a large drop in synchronization.
This suggests that when the gradient, and curl, component of the phases becomes too large, the synchronization is abruptly reduced.
Notice that this region is bounded below by $\alpha_1=1$, as for the fully oriented case.
Second, the region where the projection of the curl is not constant is strictly contained within the region of non-constant gradient. This is a general result that we show below.
In Fig.~\eqref{fig:figure_1}(e) we show two typical trajectories of (i) non-constant gradient and (ii) non-constant gradient and non-constant curl (corresponding to the magenta and green markers on Fig.~\eqref{fig:figure_1}(a-c) respectively).
We observe that the trajectory is a Lissajous curve when the curl component is constant and a more complex trajectory otherwise.
The Lissajous behavior is simply explained by imposing a constant curl, $\theta_2 = 2\theta_1 + \delta$ for a constant $\delta$, which reduces the coupled differential equations to a one dimensional dynamical system
\begin{align*}
\dot \theta_1 = -\alpha_1 - \sin(\alpha_2) - \sin(\theta_1)\, ,
\end{align*}
parameterizing the speed of motion on this curve, represented in Fig.~\eqref{fig:figure_1}(e)(i) as dots equally spaced in time.
Finally, in the regime with non-vanishing curl, upper right of Fig.~\eqref{fig:figure_1}(a-c), there exists a sharp transition along $\alpha_1$ between almost vanishing and positive gradient slope while the curl projection grows continuously.
In Fig.~\ref{fig:figure_1}(f), we show two trajectories on each side of this transition (blue and orange dots in Fig.~\eqref{fig:figure_1}(a-c)) which are partially overlapping where segments of trajectories parallel to the $\mathrm{sin}(\theta_1)$ axis are switching sign of $\mathrm{sin}(\theta_2)$.
A more precise understanding of this transition in the context of dynamical system theory could be of interest but is beyond the scope of this work.
Although simple, this simplicial complex already displays interesting and non-trivial dynamical behavior of the simplicial Sakaguchi-Kuramoto model when the frustrations are turned on.
However, this example does not contain a hole, i.e. there is no harmonic component to the dynamics. We explore the role of the harmonic component of the dynamic in the next section.
\subsection{Synchronization and edge orientation}\label{sec:example_2}
\begin{figure*}[htbp]
\centering
\includegraphics[width=\textwidth]{figure_2.png}
\caption{We consider the simplicial complex in panel {\bf a} comprising a single hole in white, faces in gray and edge orientation with black arrows. To study the effect of edge orientation on the dynamics, we construct two modified simplicial complexes with the (ii) blue edge reversed and (iii) both the blue and red edges reversed.
In {\bf b} we set the linear frustration parameter $\alpha_1=0$ and scan the nonlinear frustration parameter $\alpha_2$ for the three complexes and plot the slope of the projection of the harmonic, gradient and curl component of the solution in the top row.
If the slope value is absent, it corresponds to a constant projection. For example in panel {\bf bi}, only the harmonic projection is non-constant in time. If the slope is present with a value of $0$, the solution is oscillating around a fixed point, for example in panel {\bf biii} for gradient slope at large $\alpha_2$.
In the bottom row, we show the average and standard deviation across time of the simplicial order parameter. The order parameter is $1$ for $\alpha_2=0$ and decreases as the nonlinear frustration increases. The standard deviation of the order parameter allows us to detect in which regime the solution is non-constant in the gradient or curl space. With this simplicial complex, the solution is non-constant only when the grad and/or curl are non-constant, even for high $\alpha_2$, which we will see is in contrast to the next example in Section~\ref{sec:example_3}.
}
\label{fig:figure_2}
\end{figure*}
For our second example, we use a slightly larger simplicial complex which we display in Fig.~\ref{fig:figure_2}(a) to study the properties of the dynamics in the presence of a hole.
We previously mentioned in Sections~\ref{sec:frustrated_kuramoto} and~\ref{sec:SOP}, that if $\alpha_1\in\mathrm{Ker}(L_1)$ and $\alpha_2=0$, the dynamics will fully synchronize with the stationary state $\theta^{(1)}=\alpha_1$. $\alpha_2>0$ will perturb the stationary state by increasing the gradient and curl components, but may remain in a simplicial phase-lock for a wide range of parameters, including at high frustration.
In Fig.~\ref{fig:figure_2}(b)(i), without loss of generality, we fix $\alpha_1=0$ and scan $\alpha_2\in \left [0, \frac{\pi}{2}\right ]$ and observe that for a given choice of edge orientation, the level of synchronization, as measured by the simplicial order parameter, decreases with $\alpha_2$, while its standard deviation remain null, which is as an indicator of simplicial phase-locking.
The projections onto the gradient and the curl spaces are constant, while the harmonic projection is not.
We also notice that the dynamics are very sensitive to changes in orientation. Reversing the orientation of the blue edge, Fig.~\ref{fig:figure_2}(a), has a dramatic impact on the solution, with the gradient component becoming non-constant for some choices of $\alpha_2$, see Fig.~\ref{fig:figure_2}(b)(ii). Reversing the orientation of both the blue and red edges make both the gradient and curl components non-constant (see Fig.~\ref{fig:figure_2}(b)(iii)).
Similar to the first example of the single face triangle complex, we observe that the projection onto the curl is non-constant only if the projection onto the gradient is also non-constant.
We now show the existence of two critical values for $\alpha_2$ corresponding to changes of regime: $\alpha_{2, g}$ when the gradient becomes non-constant and $\alpha_{2, c}$ when the curl becomes non-constant, and that $\alpha_{2, c}\geq \alpha_{2, g}$.
For simplicity, we set $\alpha_1=0$, but any $\alpha_1\in\mathrm{Ker}(L_1)$ can be considered.
First, for small $\alpha_2 < \alpha_{2, g}$, i.e. when the gradient and curl component are constant in the simplicial phase-lock regime, the solution is of the form
\begin{align*}
\theta_{\infty}(t) = \Omega t h + \epsilon\, ,
\end{align*}
for a scalar $\Omega$, $h\in \mathrm{Ker}(L_1)$ and $\epsilon \in\mathrm{Ker}(L_1)^\perp $ small, and thus
\begin{align}
\Omega h = - L_1 \epsilon - (N^*_1V_2)^- \mathbf 1\alpha_2\, .
\label{omega-h}
\end{align}
The term $(N^*_k V_{k+1})^- 1_{n_{k+1}}$ counts the number of $k+1$-simplices adjacent to each $k$-simplex and is therefore a generalized degree. $(N^*_0 V_1)^- 1_{n_1}$ is simply the weighted node degree and $(N^*_1 V_2)^- 1_{n_2}$ the weighted edge degree.
From equation~\eqref{omega-h}, $\Omega$, $h$ and $\epsilon$ are defined as
\begin{align}
\Omega h &= - P_\mathrm{harm} (N^*_1V_2)^- \mathbf 1\alpha_2\label{Omega}\\
L_1 \epsilon &= - P_\mathrm{harm}^\perp (N^*_1V_2)^- \mathbf 1\alpha_2\label{epsilon}\, .
\end{align}
In this linear approximation, there always exists a solution of equation~\eqref{epsilon} for $\epsilon$, but, in the nonlinear regime, the presence of the sine function may prevent any solution to exist and the system will leave the phase-locked regime for $\alpha_2 > \alpha_{2, g}$.
The exact value of $\alpha_{2, g}$ is difficult to find analytically, as we see in Fig.~\eqref{fig:figure_2}, it depends not only on the structure of the simplicial complex but on the edge orientation as well.
In addition, if the dimension of the kernel of $L_1$ is larger than $1$, the direction of the vector $h$ in the harmonic space may also depend on $\alpha_2$.
For small $\alpha_2$, the value of $\Omega$ is represented in Fig.~\eqref{fig:figure_2}(b) by the value of the harmonic slope (in green), and increases quasi-linearly as a function of $\alpha_2$ as expected from equation~\eqref{Omega}.
For larger values of $\alpha_2$, the previous linearization is not valid and cannot be used to correctly approximate the dynamics.
However, the Hodge decomposition is still valid and the corresponding projections operators defined in equations~\eqref{hodge_projections} allow us to decompose the simplicial Kuramoto equation in its gradient and rotational parts as
\begin{align}
\dot \theta_g := P_\mathrm{grad}\dot \theta^{(1)} &= - N_0 \sin(N_0^*\theta_g)\nonumber\\
\qquad &- P_\mathrm{grad} (N_1^*V_2)^- \sin(V_2N_1\theta_c + \alpha_2)\label{eq:coupling}\\
\dot \theta_c := P_\mathrm{curl} \dot \theta^{(1)} &= - P_\mathrm{curl} (N_1^*V_2)^- \sin(V_2N_1\theta_c + \alpha_2)\, ,
\end{align}
where $\theta^{(1)} = \theta_g + \theta_c + \theta_h$ from the Hodge decomposition.
These two equations are coupled by the term $P_\mathrm{grad}(N_1^*V_2)^-\sin(V_2 N_1 \theta)$ which vanishes if $\alpha_2=0$ because $ P_\mathrm{grad}(N_1^*V_2)^- \sin(V_2 N_1 \theta) = P_\mathrm{grad}N_1^* \sin (N_1\theta)=0$.
The fact that, in the absence of nonlinear frustration, the curl, grad and harmonic projection of the dynamics are decoupled was already noted in~\cite{millan2020explosive} and is a direct result of the orthogonality of these three spaces.
The nonlinear frustration makes the dynamics of the gradient projection depend on the solution of the curl projection.
This coupling relies on the presence of the lift and projections which are necessary to preserve the independence on the face orientation of the dynamics.
The presence of the coupling in the gradient equation explains why the dynamics of the curl projection can be non-constant only if the dynamics of the grad projection is also non-constant.
Indeed, for $\alpha_{2, g} < \alpha_2 < \alpha_{2, c}$, the curl projection equation is stationary with a time-independent $\theta_{c, \infty}$ solution, i.e. $\dot \theta_{c, \infty} = 0$.
The coupling term in the grad projection is then a constant, which we denote as $\delta$, and the Kuramoto dynamics reduces to
\begin{align*}
\dot \theta_g = - \delta -N_0 \sin(N_0^*\theta_g) \, .
\end{align*}
These dynamics correspond to the edge Kuramoto model on the complex without faces and a non-harmonic natural frequency $\delta$. It has a transition from synchronization to non-synchronization regime at $\alpha_{2, g}$.
For $\alpha_2 > \alpha_{2, c}$, the dynamics are non-stationary in the curl projection and in the gradient due to the presence of $\delta$.
While the order of the transitions to non-stationarity for the different components hold whenever they exist, their existence and exact behavior is dependent on the orientation of the edges and the localization of holes.
Remarkably, even the simple example we used here displays an abundance of varying behaviors, of which we have only described representative examples: we observe no transitions in Fig.~\ref{fig:figure_2}(b)(i), only a gradient transition as in Fig.~\eqref{fig:figure_2}(b)(ii) or two transitions as in Fig.~\eqref{fig:figure_2}(b)(ii) with a near singular re-phase-locked synchronization gap.
In Fig.~\ref{fig:figure_2}(b)(ii-iii), we also observe a re-synchronization to a phase-locked regime for $\alpha_2>\alpha_{2, c}$ until $\frac{\pi}{2}$, possibly a result of the small size of the complex and high degree of symmetry.
Indeed, as we observe in the next section, this regime does not exist for larger, more irregular complexes, see Fig.~\ref{fig:figure_3}, and is replaced by a more chaotic regime.
\subsection{Larger simplicial complex}\label{sec:example_3}
\begin{figure}[htpb]
\centering
\includegraphics[width=0.9\columnwidth]{figure_3.png}
\caption{We consider the simplicial complex of panel {\bf a} obtained from a Delaunay triangulation of random points on a plane around two circular holes. In panel {\bf b}, we set the linear frustration parameter $\alpha_1=0$ and scan across the nonlinear frustration parameter $\alpha_2$. We plot the slope of the projection of the harmonic, gradient and curl component of the solution in the top row. If the slope value is absent, it corresponds to a constant projection. On the bottom row, we show the average and standard deviation across time of the simplicial order parameter. In addition to the two critical $\alpha_2$, we manually highlighted two possible points corresponding to the onset of chaotic dynamics regimes with $\alpha_{2, a}$ and $\alpha_{2, b}$.
The standard deviation of the curves in the top of ${\bf b}$, which are calculated over $10$ simulations with random initial conditions, is zero, except for large $\alpha_2$ where it is small and is represented by the thickness of the curves.
In {\bf c}, the dark line represents the average largest Lyapunov exponent over edges, and the shaded gray region between the lower an upper quartile of the corresponding distribution.
For the sake of comparison, we have included a light brown curve that is the mean largest Lyapunov exponent for the simplicial complex in Fig.~\ref{fig:figure_2}(b)(iii).
}
\label{fig:figure_3}
\end{figure}
Until now, we have studied the frustrated simplicial Kuramoto dynamics on small simplicial complexes in order to study and understand in detail the effects of the frustration on the dynamics.
As a final example for this paper, we consider a larger simplicial complex constructed from a Delaunay triangulation of random points on a plane around two circular holes as illustrated in Fig~\ref{fig:figure_3}(a).
In Fig.~\ref{fig:figure_3}(b), we show the same analysis as in Fig.~\ref{fig:figure_2} with the slope of the projections and the simplicial order parameter.
As expected, we observe more complex dynamics from the shape of these curves, obtained after averaging over $10$ simulations with random initial conditions.
In particular, we do not observe any re-synchronization for large $\alpha_2$ but rather an even more complex set of dynamics as shown by the standard deviation of the simplicial order parameter in Fig.~\eqref{fig:figure_3}(b).
To better quantify these complex dynamics, we compute the largest Lyapunov exponent~\cite{rosenstein1993practical, scholzel_christopher_2019_3814723} of the trajectories of each edge phase and show in Fig.~\ref{fig:figure_3}(c) the mean and quartile of them for each value of $\alpha_2$.
As soon as the dynamics are no longer constant, i.e. $\alpha_2>\alpha_{2, g}$, the largest Lyapunov exponent is on average positive, but increases significantly for larger $\alpha_2$, clearly indicative of chaotic dynamics.
We visually noticed two different regime of chaotic dynamics, which we highlighted with $\alpha_{2, a}$ and $\alpha_{2, b}$ which corresponds to the start of the decrease of the slope of the gradient and the curl projection, respectively.
For $\alpha_2>\alpha_{2, b}$, we also observe some sensitivity to initial condition on the value of the slope of the projection, the line thickness is the standard deviation across $10$ simulations with random initial conditions.
These two regimes would be interesting to study in more detail, since a decrease of the slope of the projection can either suggests more synchronization, as in the examples of Fig.~\ref{fig:figure_2}, or more random or chaotic dynamics, as in Fig.~\ref{fig:figure_3}.
Understanding the transition between these two regimes in term of the complexity of the simplicial complex, where complexity is for example measured by the number of holes, their relative localization and the symmetries of the simplicial complex, is an open problem.
The Lyapunov exponent seems however a promising measure to identify the switching between the two regimes: for the example of Fig.~\ref{fig:figure_2}(biii) it remains at low values (see brown line in Fig.~\ref{fig:figure_3}c) and does not increase for large $\alpha_2$.
\section{Conclusion}
In this work, we extend a previously introduced Kuramoto model on simplicial complexes~\cite{millan2020explosive} to include weights on any simplices as well as a linear and a non-linear frustration term to define the simplicial Sakaguchi-Kuramoto.
This formulation naturally allows us to generalize the notion of synchronization, internal frequencies, and edge frustration, of the standard Kuramoto model.
Without frustration, the Kuramoto dynamics can be decomposed into three independent sub-systems aligned with the orthogonal spaces given by the Hodge decomposition.
However, we have demonstrated that by adding frustration to the dynamics, the harmonic, gradient and curl subspaces become hierarchically coupled, see equation~\eqref{eq:coupling}.
The dynamics in the harmonic space is coupled to both the gradient and curl subspaces even in the absence of harmonic linear frustration. In the linear regime of small nonlinear frustration, the amplitude of the dynamics in the harmonic space is proportional to the amount of frustration.
In the nonlinear regime of simplicial Sakaguchi-Kuramoto, we showed that the dynamics is highly varied, from constant to chaotic solutions.
Most surprisingly, the edge orientation is of fundamental importance in the resulting Kuramoto dynamics and the change of orientation of one edge can be enough to dramatically alter the dynamics.
Understanding the precise relationship between the choice of orientation for a given simplicial complex and the resulting type of dynamics has remained elusive so far but would be an interesting topic to gain further understanding of these systems particularly in the context of control.
We foresee various interesting directions for further interrogation of our frustrated simplicial Kuramoto and also additional adaptions. Firstly, despite having introduced weights on the simplices, we did not explicitly study their role in the resulting dynamics since it would require obtaining a meaningful choice or parameterization of the weights, and this lay outside the scope of this work. However, looking forward we believe that assessing the role of weights would be an interesting avenue of study.
Secondly, whilst we used consensus dynamics in our formulation, we also mentioned earlier the dual formulation of the diffusion Kuramoto (see Appendix~\ref{diffusion_kuramoto}). Indeed, examining how the dynamics of the consensus and diffusion formulations deviate in the weighted setting could be of interest. Thirdly, we did not explore the possible interplay between the linear and nonlinear frustration.
The linear frustration is known to have a transition between stationary and non-stationary regimes~\cite{millan2020explosive}, but may also affect the types of dynamics with nonlinear frustration, which can even be made non-constant.
Finally, as we have shown with our three examples, the topology of the simplicial complex is crucial to determine the type of dynamics and in particular its complexity.
A more complete characterisation in term of graph theoretical or topological measures would be of interest to identify the criteria necessary for the transition between non-stationary and chaotic dynamics.
In fact, particular geometries of simplicial complexes may support more specific types of dynamics, with maybe partial, cluster or metastable synchronisations.
\section*{Acknowledgments}
GP acknowledges partial support from Intesa Sanpaolo Innovation Center. The founder had no role in study design, data collection, and analysis, decision to publish, or preparation of the manuscript.
RP acknowledges funding through EPSRC award EP/N014529/1 supporting the EPSRC Centre for Mathematics of Precision Healthcare at Imperial and the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) Project-ID 424778381-TRR 295. PE acknowledges support from the NIHR Imperial Biomedical Research Centre (BRC) (grant number NIHR-BRC-P68711)
|
1,314,259,994,979 | arxiv | \section{Introduction}
We will demonstrate a relationship between a subclass of combinatorial games, such as \textsc{Domineering} and \textsc{Col}, and algebraic structures defined on simplicial complexes. There are two relationships, one via the maximal legal positions and the other through the minimal illegal positions. We will begin by giving the necessary background, first from combinatorial game theory, then from combinatorial commutative algebra.
For a game \textit{perfect information} means that both players know which game they are playing, on which board, and the current position. No \textit{chance} means that no dice can be rolled or cards can be dealt, or any other item involving probability can be used.
\begin{definition}\label{def:game}
A \textit{combinatorial game} is a 2-player game with perfect information and no chance, where the two players are \textit{Left} and \textit{Right} (denoted by $L$ and $R$ respectively) and they do not move simultaneously. Then a game is a set $P$ of \textit{positions} with a specified starting position. \textit{Rules} determine from which position to which position the players are allowed to move. A \textit{legal position} is a position that can be reached by playing the game from the starting position (which is legal) according to the rules. Moving from position $P$ to position $Q$ is called a \textit{legal move} if both $P$ and $Q$ are legal positions and the move is allowed according to the rules. $Q$ is usually called an option of $P$.
\end{definition}
In this paper, a combinatorial game will be denoted by its name in \textsc{Small Caps}. Well known examples of combinatorial games are \textsc{Chess}, \textsc{Checkers}, \textsc{Tic-Tac-Toe}, \textsc{Go}, and \textsc{Connect Four}. Examples of games that are not combinatorial games include bridge, backgammon, poker, and Snakes and Ladders.
Although games usually have a `winning condition' associated to them, {i.e}\onedot} \def\Ie{{I.e}\onedot rules as to which player wins, for the purposes of this paper games do not need to have a notion of winning identified.
We will assume that the board on which games are played is a graph (or can be represented as a graph). A space on a board is then equivalent to a vertex and we use the two terms interchangeably.
\begin{definition}
A \textit{strong placement game} is a combinatorial game which satisfies the following:
\begin{itemize}
\item[(i)] The starting position is the empty board.
\item[(ii)] Players place pieces on empty spaces of the board according to the rules.
\item[(iii)] Pieces are not moved or removed once placed.
\item[(iv)] The rules are such that if it is possible to reach a position through a sequence of legal moves, then any sequence of moves leading to this position consists only of legal moves.
\end{itemize}
The \textsc{Trivial} placement game on a board is the strong placement game that has no additional rules.
\end{definition}
A \textit{basic position} is a board with only one piece placed. Any position, whether legal or illegal, in a strong placement game can be decomposed into basic positions.
The concept of a placement game originates in Brown et al \cite{GamePol} where condition (iv) is replaced by the condition that if it is legal to place a piece at one point, it must have been legal at any point before. We call this type of game a `medium placement game'. A `weak placement game' is a combinatorial game that satisfies the above conditions (i) through (iii).
Note that (iv) implies that every subposition of a legal position is also legal.
Placement games were only recently defined formally by Brown {et al}\onedot in \cite{GamePol}, even though several placement games, for example \textsc{Tic-Tac-Toe} or \textsc{Domineering}, have been known and studied for a long time. In this work, we will consider strong placement games exclusively.
Throughout this paper `placement game' refers to a strong placement game.
Here are three more we will use as examples.
\begin{definition}
In \textsc{Snort}, players may not place pieces on a vertex adjacent to a vertex containing a piece from their opponent.
\end{definition}
\begin{definition}\label{def:col}
In \textsc{Col}, players may not place pieces on a vertex adjacent to a vertex containing one of their own pieces.
\end{definition}
\begin{definition}
In \textsc{NoGo}, at every point in the game, for each maximal group of connected vertices of the board that contain pieces placed by the same player, one of these needs to be adjacent to an empty vertex.
\end{definition}
In these games, the pieces only occupy one vertex each, which is in fact not necessary. For example in \textsc{Crosscram} \cite{Ga74} and \textsc{Domineering} \cite{BCG04} the players' pieces occupy two adjacent vertices.
\begin{definition}
The \textit{disjunctive sum} between two positions of combinatorial games $G$ and $H$ is the position in which a player can play in one of $G$ and $H$ but not both simultaneously.
\end{definition}
Assuming implicitly that placement games are part of a disjunctive sum implies that a board might be filled with more pieces of one player than of the other. Making this assumption is very useful since in many placement games the board might `break up' into the disjunctive sum of smaller boards.
\begin{example}
For an example, consider \textsc{Col } played on the path $P_7$. Then the position on the left of Figure \ref{fig:disjunctive3} is equivalent to the one in which the middle space is `deleted' (on the right), {i.e}\onedot} \def\Ie{{I.e}\onedot it is equivalent to the disjunctive sum of the two \textsc{Col } positions on the right, one of which has two Right pieces but no Left pieces.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=0.5]
\foreach \y in {0,1}{
\draw (0,\y)--(7,\y);
\draw (8,\y)--(11,\y);
\draw (12,\y)--(15,\y);}
\foreach \x in {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}{
\draw (\x,0)--(\x,1);}
\node at (0.5, 0.5) {$R$};
\node at (2.5, 0.5) {$R$};
\node at (3.5, 0.5) {$L$};
\node at (4.5, 0.5) {$R$};
\node at (6.5, 0.5) {$L$};
\node at (8.5, 0.5) {$R$};
\node at (10.5, 0.5) {$R$};
\node at (12.5, 0.5) {$R$};
\node at (14.5, 0.5) {$L$};
\node at (7.5, 0.5) {$\cong$};
\node at (11.5, 0.5) {$+$};
\end{tikzpicture}
\end{center}
\caption{A \textsc{Col } Position That is the Disjunctive Sum of Two \textsc{Col } Positions}
\label{fig:disjunctive3}
\end{figure}
\end{example}
For a placement game $G$ and a board $B$, let \[f_i(G, B)\] denote the number of positions with $i$ pieces played, regardless of which player the pieces belong to. If the game and board are clear from context, we shorten the notation to $f_i$.
\begin{definition}[Brown {et al}\onedot \cite{GamePol}]\label{def:gamepol}
For a game $G$ played on a board $B$, the \textit{game polynomial} is defined to be
\[P_{G, B}(x)=\sum_{i=0}^k f_i(G,B)x^i.\]
$P_{G,B}(1)$ is then the total number of legal positions of the game.
\end{definition}
The motivation for game polynomials came from Farr \cite{Fa03} in 2003 where the number of end positions and some polynomials of the game \textsc{Go} were considered, and work in this area was continued by Tromp and Farneb\"ack \cite{TF07} in 2007 and by Farr and Schmidt \cite{FS08} in 2008. Even though \textsc{Go} is not a placement game since pieces are removed, it shares many properties with this class of games. Thus it was natural for the authors of \cite{GamePol} to consider the concept of game polynomials for placement games.
We will now introduce concepts from combinatorial commutative algebra that we will need to construct simplicial complexes equivalent to placement games.
\begin{definition}
A \textit{simplicial complex} $\Delta$ on a finite vertex set $V$ is a set of subsets (called \textit{faces}) of $V$ with the conditions that if $A\in \Delta$ and $B\subseteq A$, then $B\in \Delta$. The \textit{facets} of a simplicial complex $\Delta$ are the maximal faces of $\Delta$ with respect to inclusion. A \textit{non-face} of a simplicial complex $\Delta$ is a subset of its vertices that is not a face. The \textit{$f$-vector} $(f_0, f_1, \ldots, f_k)$ of a simplicial complex $\Delta$ enumerates the number of faces $f_i$ with $i$ vertices. Note that if $\Delta\neq\emptyset$, then $f_0=1$.
\end{definition}
In the algebraic literature, the $f$-vector of a complex is usually indexed from $-1$ to $k-1$ as this is the ``dimension'' of the face (the number of vertices minus 1). Due to the connection between placement games and simplicial complexes, we have chosen the combinatorial indexing.
Recall that an \textit{ideal} $I$ of a ring $R=R(+,\cdot)$ is a subset of $R$ such that $(I,+)$ is a subgroup of $R$ and $rI\subseteq I$ for all $r\in R$.
Let $k$ be a field and $R=k[x_1,\ldots,x_n]$ a polynomial ring. Given a simplicial complex $\Delta$ on $n$ vertices, we can label each vertex with an integer from $1$ to $n$. Each face $F$ (resp.~non-face $N$) of $\Delta$ can then be represented by a square-free monomial of $R$ by including $x_i$ in the monomial representing the face $F$ (resp.~the non-face $N$) if and only if the vertex $i$ belongs to $F$ (resp.~$N$). We then have the following (see \cite{BH93} and \cite{Fa02} for more information):
\begin{definition}\label{def:SRFideal}
The \textit{facet ideal} of a simplicial complex $\Delta$, denoted by $\facetI{}$, is the ideal generated by the monomials representing the facets of $\Delta$. The \textit{Stanley-Reisner ideal} of a simplicial complex $\Delta$, denoted by $\SRideal{}$, is the ideal generated by the monomials representing the minimal non-faces of $\Delta$.
\end{definition}
\begin{definition}\label{def:SRFcomplex}
The \textit{facet complex} of a square-free monomial ideal $I$, denoted by $\facetsc{}$, is the simplicial complex whose facets are represented by the square-free monomials generating $I$. The \textit{Stanley-Reisner complex} of a square-free monomial ideal $I$, denoted by $\SRsc{}$, is the simplicial complex whose faces are represented by the square-free monomials not in $I$.
\end{definition}
To clarify these concepts, we will give two examples:
\begin{example}
Consider the simplicial complex $\Delta$ in Figure \ref{fig:SRfacetex1} with the labeling of the vertices as given.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[line width=1.3] (0,0)--(2,0);
\draw[line width=1.3] (0,1.5)--(1,2)--(2,1.5);
\filldraw[fill=gray!70, line width=1.3] (0,1.5)--(0,0)--(1,0.75)--cycle;
\filldraw[fill=gray!70, line width=1.3] (2,1.5)--(2,0)--(1,0.75)--cycle;
\filldraw (0,0) circle (0.066cm);
\filldraw (2,0) circle (0.066cm);
\filldraw (0,1.5) circle (0.066cm);
\filldraw (1,2) circle (0.066cm);
\filldraw (2,1.5) circle (0.066cm);
\filldraw (1,0.75) circle (0.066cm);
\draw (-0.35,0) node {$3$};
\draw (-0.35,1.5) node {$2$};
\draw (2.35,0) node {$5$};
\draw (2.35,1.5) node {$6$};
\draw (1,0.5) node {$4$};
\draw (1,1.75) node {$1$};
\end{tikzpicture}
\end{center}
\caption{An Example of a Simplicial Complex}
\label{fig:SRfacetex1}
\end{figure}
The facet ideal of $\Delta$ then is
\[\facetI{}=\langle x_1x_2, x_1x_6, x_2x_3x_4, x_3x_5, x_4x_5x_6\rangle,\]
and the Stanley-Reisner ideal of $\Delta$ is
\[\SRideal{}=\langle x_1x_3, x_1x_4, x_1x_5, x_2x_5, x_2x_6, x_3x_4x_5, x_3x_6\rangle.\]
\end{example}
\begin{example}
Consider the square-free monomial ideal $I=\langle x_1x_3, x_2x_3x_4\rangle$. The facet complex $\facetsc{}$ is given in Figure \ref{fig:SRfacetex2f} and the Stanley-Reisner complex $\SRsc{}$ is given in Figure \ref{fig:SRfacetex2SR}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}
\draw[line width=2] (0,0)--(0,2);
\filldraw[fill=gray!70, line width=2] (0,0)--(2,0)--(2,2)--cycle;
\filldraw (0,0) circle (0.1cm);
\filldraw (2,0) circle (0.1cm);
\filldraw (0,2) circle (0.1cm);
\filldraw (2,2) circle (0.1cm);
\draw (-0.35,0) node {$3$};
\draw (-0.35,2) node {$1$};
\draw (2.35,0) node {$2$};
\draw (2.35,2) node {$4$};
\end{tikzpicture}
\end{center}
\caption{Facet Complex of $I=\langle x_1x_3, x_2x_3x_4\rangle$}
\label{fig:SRfacetex2f}
\end{figure}
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}
\filldraw[fill=gray!70, line width=2] (-2,0)--(0,0)--(0,-2)--cycle;
\draw[line width=2] (0,0)--(2,0);
\draw[line width=2] (2,0)--(0,-2);
\filldraw (0,0) circle (0.1cm);
\filldraw (2,0) circle (0.1cm);
\filldraw (0,-2) circle (0.1cm);
\filldraw (-2,0) circle (0.1cm);
\draw (-2,0.5) node {$1$};
\draw (0,0.5) node {$2$};
\draw (2,0.5) node {$3$};
\draw (0,-2.5) node {$4$};
\end{tikzpicture}
\end{center}
\caption{Stanley-Reisner Complex of $I=\langle x_1x_3, x_2x_3x_4\rangle$}
\label{fig:SRfacetex2SR}
\end{figure}
\end{example}
It is clear that the facet operators are inverses of each other, i.e. $\mathcal{F}(\facetI{})=\Delta$ and $\mathcal{F}(\facetsc{})=I$, from their definitions. This is also true of the Stanley-Reisner operators: A minimal non-face of $\SRsc{}$ is a minimal monomial generator of $I$, thus a generator of $I$, showing $\mathcal{N}(\SRsc{})=I$. Similarly, since $\SRideal{}$ contains all monomials representing non-faces, a square-free monomial not in $\SRideal{}$ has to be a face of $\Delta$, thus $\mathcal{N}(\SRideal{})=\Delta$.
This shows that both the facet and the Stanley-Reisner operators give a bijection between the set of all square-free monomial ideals in $n$ variables and the set of all simplicial complexes on $n$ vertices.
\section{Constructing Monomials and Simplicial Complexes from Placement Games}
We will now introduce a construction that associates a set of monomials and a simplicial complex to each placement game.
Given a placement game $G$ on a board $B$, we can construct a set of square-free monomials in the following way: First, label the basic positions by $1,2,\ldots,n$. For each legal position we then create a square-free monomial by including $x_i$ if Left has played in position $i$ and $y_j$ if Right has placed in position $j$. The empty position (before anyone has started playing) is represented by $1$.
\begin{example}\label{ex:Col2}
Consider \textsc{Col } played on the path $P_3$. We label the basic positions, in this case the spaces of the board, as given in Figure \ref{fig:ColP3label}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=0.5]
\draw (0, 0) -- (3, 0) -- (3, -1) -- (0, -1) -- (0, 0);
\draw (1, 0) -- (1, -1);
\draw (2, 0) -- (2, -1);
\draw (0.5, -0.5) node {1};
\draw (1.5, -0.5) node {2};
\draw (2.5, -0.5) node {3};
\end{tikzpicture}
\end{center}
\caption{Labeling $P_3$}
\label{fig:ColP3label}
\end{figure}
The maximum legal positions and their corresponding monomials are given in Figure \ref{fig:ColP3maxpos}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=0.5]
\draw (0, 0) -- (3, 0) -- (3, -1) -- (0, -1) -- (0, 0);
\draw (1, 0) -- (1, -1);
\draw (2, 0) -- (2, -1);
\draw (0.5, -0.5) node {$L$};
\draw (1.5, -0.5) node {$R$};
\draw (2.5, -0.5) node {$L$};
\draw (5,-0.5) node {$x_1y_2x_3$};
\draw (8, 0) -- (11, 0) -- (11, -1) -- (8, -1) -- (8, 0);
\draw (9, 0) -- (9, -1);
\draw (10, 0) -- (10, -1);
\draw (8.5, -0.5) node {$L$};
\draw (10.5, -0.5) node {$R$};
\draw (13,-0.5) node {$x_1y_3$};
\draw (0, -2) -- (3, -2) -- (3, -3) -- (0, -3) -- (0, -2);
\draw (1, -2) -- (1, -3);
\draw (2, -2) -- (2, -3);
\draw (0.5, -2.5) node {$R$};
\draw (1.5, -2.5) node {$L$};
\draw (2.5, -2.5) node {$R$};
\draw (5,-2.5) node {$y_1x_2y_3$};
\draw (8, -2) -- (11, -2) -- (11, -3) -- (8, -3) -- (8, -2);
\draw (9, -2) -- (9, -3);
\draw (10, -2) -- (10, -3);
\draw (8.5, -2.5) node {$R$};
\draw (10.5, -2.5) node {$L$};
\draw (13,-2.5) node {$y_1x_3$};
\end{tikzpicture}
\end{center}
\caption{Maximum Legal Positions for \textsc{Col } on $P_3$}
\label{fig:ColP3maxpos}
\end{figure}
\end{example}
Using these monomials, we can build a simplicial complex $\Delta_{G,B}$ on the vertex set $V=\{x_1, \ldots, x_n, y_1, \ldots, y_o\}$ by letting a subset $F$ of $V$ be a face if and only if there exists a square-free monomial $m$ representing a legal position such that each element of $F$ divides $m$.
\begin{definition}\label{def:gamecomplex}
A simplicial complex that can be constructed from a placement game $G$ on a board $B$ in this way is called a \textit{legal complex} and is denoted by $\Delta_{G,B}$.
\end{definition}
\begin{example}\label{ex:ColP3}
Consider \textsc{Col } played on the path $P_3$. Using the notation from Example \ref{ex:Col2}, we get the legal complex $\Delta_{\textsc{Col}, P_3}$ as given in Figure \ref{fig:ColP3sc}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=1]
\filldraw[fill=gray!70, line width=2pt, draw=black] (0, 0)--(1.73, 1)--(1.73, -1)-- cycle;
\filldraw[fill=gray!70, line width=2pt, draw=black] (5.46, 0)--(3.73, 1)--(3.73, -1)-- cycle;
\draw[line width=2pt] (1.73, 1)--(3.73, 1);
\draw[line width=2pt] (1.73, -1)--(3.73, -1);
\filldraw (0,0) circle (0.1cm);
\filldraw (1.73,1) circle (0.1cm);
\filldraw (1.73,-1) circle (0.1cm);
\filldraw (5.46,0) circle (0.1cm);
\filldraw (3.73,1) circle (0.1cm);
\filldraw (3.73,-1) circle (0.1cm);
\draw (-0.7,0) node {$y_2$};
\draw (1.73,1.4) node {$x_1$};
\draw (1.73,-1.5) node {$x_3$};
\draw (5.98,0) node {$x_2$};
\draw (3.73,1.4) node {$y_3$};
\draw (3.73,-1.5) node {$y_1$};
\end{tikzpicture}
\end{center}
\caption{The Legal Complex $\Delta_{\textsc{Col}, P_3}$}
\label{fig:ColP3sc}
\end{figure}
\end{example}
Observe that the maximum legal positions of a game, {i.e}\onedot} \def\Ie{{I.e}\onedot the positions in which no piece can be placed by either Left or Right (so the game ends), correspond to the facets of $\Delta_{G,B}$ and thus uniquely determine $\Delta_{G,B}$.
In game theoretic terms, the $f$-vector of a legal complex $\Delta_{G,B}$ indicates that there are $f_i$ legal positions with $i$ pieces in the game $G$, regardless if pieces belong to Left or to Right. Thus for placement games the entries of the $f$-vector of the legal complex $\Delta_{G,B}$ are the coefficients of the game polynomial $P_{G, B}$. Therefore we have the following:
\begin{proposition}
\begin{align*}
f_i(G,B)&=\text{number of legal positions in }G\text{ with }i\text{ pieces played on }B,\\
&=\text{number of degree }i\text{ monomials representing legal positions in }G,\\
&=\text{number of faces with }i\text{ vertices in }\Delta_{G,B},
\end{align*}
and we can use any of these concepts to find $f_i$.
\end{proposition}
This also justifies using the same notation for the coefficients of a game polynomial as for entries of a $f$-vector.
We now give three more examples for the construction of monomials and simplicial complexes.
\begin{example}
The cycle $C_3$ is labelled as in Figure \ref{fig:ColC3label}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=0.5]
\draw[line width=2pt, fill=black] (0,0) circle (0.15cm) -- (1,-2) circle (0.15cm) -- (-1,-2) circle (0.15cm)-- (0,0);
\draw (0.7, 0) node {1};
\draw (1.7, -2) node {2};
\draw (-1.7, -2) node {3};
\end{tikzpicture}
\end{center}
\caption{Labeling $C_3$}
\label{fig:ColC3label}
\end{figure}
Now consider \textsc{Col } on $C_3$. The monomials corresponding to the maximum legal positions are
\[\{x_1y_2, x_1y_3, x_2y_3, y_1x_2, y_1x_3, y_2x_3\}.\]
Also consider \textsc{Snort } played on $P_3$ and $C_3$. The maximum monomials then are \[\{x_1x_2x_3, y_1y_2y_3, x_1y_3, x_3y_1\}\]
and \[\{x_1x_2x_3, y_1y_2y_3\}\] respectively.
The legal complexes of all three games are given in Figure \ref{fig:ColSnortP3C3sc}.
\begin{figure}[!ht]
\begin{center}
\begin{tabular}{>{\flushright\arraybackslash} m{1.2cm}| >{\centering\arraybackslash} m{4.5cm} >{\centering\arraybackslash} m{4.5cm}}
& $P_3$ & $C_3$\\\hline
\textsc{Snort } &
\begin{tikzpicture}[scale=0.6]
\filldraw[fill=gray!70, line width=2pt, draw=black] (0, 0)--(1.73, 1)--(1.73, -1)-- cycle;
\filldraw[fill=gray!70, line width=2pt, draw=black] (5.46, 0)--(3.73, 1)--(3.73, -1)-- cycle;
\draw[line width=2pt] (1.73, 1)--(3.73, 1);
\draw[line width=2pt] (1.73, -1)--(3.73, -1);
\filldraw (0,0) circle (0.1cm);
\filldraw (1.73,1) circle (0.1cm);
\filldraw (1.73,-1) circle (0.1cm);
\filldraw (5.46,0) circle (0.1cm);
\filldraw (3.73,1) circle (0.1cm);
\filldraw (3.73,-1) circle (0.1cm);
\draw (-0.7,0) node {$x_2$};
\draw (1.73,1.4) node {$x_1$};
\draw (1.73,-1.5) node {$x_3$};
\draw (5.98,0) node {$y_2$};
\draw (3.73,1.4) node {$y_3$};
\draw (3.73,-1.5) node {$y_1$};
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\filldraw[fill=gray!70, line width=2pt, draw=black] (0, 0)--(1.73, 1)--(1.73, -1)-- cycle;
\filldraw[fill=gray!70, line width=2pt, draw=black] (5.46, 0)--(3.73, 1)--(3.73, -1)-- cycle;
\filldraw (0,0) circle (0.1cm);
\filldraw (1.73,1) circle (0.1cm);
\filldraw (1.73,-1) circle (0.1cm);
\filldraw (5.46,0) circle (0.1cm);
\filldraw (3.73,1) circle (0.1cm);
\filldraw (3.73,-1) circle (0.1cm);
\draw (-0.7,0) node {$x_2$};
\draw (1.73,1.4) node {$x_1$};
\draw (1.73,-1.5) node {$x_3$};
\draw (5.98,0) node {$y_2$};
\draw (3.73,1.4) node {$y_1$};
\draw (3.73,-1.5) node {$y_3$};
\end{tikzpicture} \\
\textsc{Col } &
\begin{tikzpicture}[scale=0.6]
\filldraw[fill=gray!70, line width=2pt, draw=black] (0, 0)--(1.73, 1)--(1.73, -1)-- cycle;
\filldraw[fill=gray!70, line width=2pt, draw=black] (5.46, 0)--(3.73, 1)--(3.73, -1)-- cycle;
\draw[line width=2pt] (1.73, 1)--(3.73, 1);
\draw[line width=2pt] (1.73, -1)--(3.73, -1);
\filldraw (0,0) circle (0.1cm);
\filldraw (1.73,1) circle (0.1cm);
\filldraw (1.73,-1) circle (0.1cm);
\filldraw (5.46,0) circle (0.1cm);
\filldraw (3.73,1) circle (0.1cm);
\filldraw (3.73,-1) circle (0.1cm);
\draw (-0.7,0) node {$y_2$};
\draw (1.73,1.4) node {$x_1$};
\draw (1.73,-1.5) node {$x_3$};
\draw (5.98,0) node {$x_2$};
\draw (3.73,1.4) node {$y_3$};
\draw (3.73,-1.5) node {$y_1$};
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\draw[line width=2pt] (0,0)--(2,2);
\draw[line width=2pt] (2,2)--(4,0);
\draw[line width=2pt] (4,0)--(0,2);
\draw[line width=2pt] (0,2)--(2,0);
\draw[line width=2pt] (2,0)--(4,2);
\draw[line width=2pt] (4,2)--(0,0);
\filldraw (0,0) circle (0.1cm);
\filldraw (2,0) circle (0.1cm);
\filldraw (4,0) circle (0.1cm);
\filldraw (0,2) circle (0.1cm);
\filldraw (2,2) circle (0.1cm);
\filldraw (4,2) circle (0.1cm);
\draw (0,-0.5) node {$y_1$};
\draw (2,-0.5) node {$y_2$};
\draw (4,-0.5) node {$y_3$};
\draw (0,2.5) node {$x_1$};
\draw (2,2.5) node {$x_2$};
\draw (4,2.5) node {$x_3$};
\end{tikzpicture}\\
\end{tabular}
\end{center}
\caption{The Legal Complexes $\Delta_{\textsc{Snort}, P_3}$, $\Delta_{\textsc{Snort}, C_3}$, $\Delta_{\textsc{Col}, P_3}$, and $\Delta_{\textsc{Col}, C_3}$}
\label{fig:ColSnortP3C3sc}
\end{figure}
\end{example}
Note that the legal complexes of \textsc{Col } and \textsc{Snort } on $P_3$ are isomorphic. This is true whenever \textsc{Col } and \textsc{Snort } are played on a bipartite graph, see \cite{MSc}.
\section{The Ideals of a Placement Game}\label{sec:ideals}
Through the monomials that represent legal or illegal positions of a game, we can also associate square-free monomial ideals with a placement game.
\begin{definition}\label{def:legalI}
The \textit{legal ideal}, $\legalI{G,B}$, of a placement game $G$ played on the board $B$ is the ideal generated by the monomials representing maximal legal positions of $G$.
\end{definition}
\begin{definition}\label{def:illegalI}
The \textit{illegal ideal}, $\illegalI{G,B}$, of a placement game $G$ played on the board $B$ is the ideal generated by the monomials representing minimal illegal positions of $G$.
\end{definition}
\begin{definition}\label{def:auxboard}
The \textit{illegal complex}, sometimes called the \textit{auxiliary board} \cite{GamePol}, of a placement game $G$ played on the board $B$, is the simplicial complex whose facets are represented by the monomials of the minimal illegal positions of $G$. It is denoted by $\auxboard{G,B}$.
\end{definition}
The authors in \cite{GamePol} introduce the auxiliary board for ``independence placement games'', which is the class of placement games for which the illegal complex is a graph. The term `independence game' was chosen since the independence sets of $\Gamma_{G,B}$ (considered as a graph) correspond to the legal positions of $G$ played on $B$, {i.e}\onedot} \def\Ie{{I.e}\onedot the faces of $\Delta_{G,B}$.
\begin{proposition}\label{thm:legalillegal}
For a placement game $G$ played on a board $B$ we have the following
\begin{itemize}
\item[(1)] $\legalI{G,B}=\facetI{G,B},$
\item[(2)] $\illegalI{G,B}=\facet{\auxboard{G,B}}=\SRideal{G,B}.$
\end{itemize}
\end{proposition}
\begin{proof}
(1) The facets of $\Delta_{G,B}$ represent the maximal legal positions of $G$. Thus $\facetI{G,B}$ is the ideal generated by the monomials representing the maximal legal positions, which is $\legalI{G,B}$ by definition.
(2) The facets of $\auxboard{G,B}$ are represented by the monomials of the minimal illegal positions of $G$, which by definition generate $\illegalI{G,B}$, proving the first equality.
Since the faces of $\Delta_{G,B}$ represent the legal positions of $G$, the minimal non-faces of $\Delta_{G,B}$ represent the minimal illegal positions, which generate $\illegalI{G,B}$. Thus $\illegalI{G,B}=\SRideal{G,B}$.
\end{proof}
\begin{example}
Consider \textsc{Col } played on the path $P_3$ with labels as in Example \ref{ex:Col2}. We then have the legal ideal
\[\legalI{\textsc{Col}, P_3}=\langle x_1y_2x_3, y_1x_2y_3, x_1y_3, y_1x_3\rangle\]
and the illegal ideal
\[\illegalI{\textsc{Col}, P_3}=\langle x_1x_2, x_2x_3, y_1y_2, y_2y_3\rangle.\]
The illegal complex $\auxboard{\textsc{Col}, P_3}$ is given in Figure \ref{fig:ColP3Aux}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=2]
\node (1) at (0,0) {$x_1$};
\node (2) at (1,0) {$x_2$};
\node (3) at (2,0) {$x_3$};
\node (4) at (0,-1) {$y_1$};
\node (5) at (1,-1) {$y_2$};
\node (6) at (2,-1) {$y_3$};
\draw (1)--(2)--(3)--(6)--(5)--(4)--(1);
\draw (2)--(5);
\end{tikzpicture}
\end{center}
\caption{The Illegal Complex $\auxboard{\textsc{Col}, P_3}$}
\label{fig:ColP3Aux}
\end{figure}
\end{example}
\section{Playing Games on Simplicial Complexes}
In this section we show that games can be played on the illegal or legal complex rather than the board.
Since the facets of the illegal complex represent the minimal illegal positions, we can play on $\auxboard{G,B}$, instead of playing $G$ on the board $B$, according to the following rules:
\begin{ruleset1}\label{rules:illegal}
\begin{enumerate}
\item Left may only play on vertices labelled $x_i$, while Right may only play on vertices labelled $y_i$.
\item Given a facet, pieces played may not occupy all the vertices of the facet.
\end{enumerate}
\end{ruleset1}
Since the facets of $\auxboard{G,B}$ are the minimal illegal positions, any vertex set that does not contain all the vertices of any facet is a legal position of $G$. Thus playing on $\auxboard{G,B}$ according to the above rules results in legal positions.
\begin{example}
Consider \textsc{Col } played on $P_5$. Since pieces may not be placed on the same space, or pieces by the same player placed side by side, the facets of $\auxboard{\textsc{Col}, P_5}$ then consist of the edges between $x_i$ and $y_i$, between $x_i$ and $x_{i+1}$, and between $y_i$ and $y_{i+1}$. It is given in Figure \ref{fig:ColP5Aux}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=2]
\node (1) at (0,0) {$x_1$};
\node (2) at (1,0) {$x_2$};
\node (3) at (2,0) {$x_3$};
\node (4) at (3,0) {$x_4$};
\node (5) at (4,0) {$x_5$};
\node (6) at (0,-1) {$y_1$};
\node (7) at (1,-1) {$y_2$};
\node (8) at (2,-1) {$y_3$};
\node (9) at (3,-1) {$y_4$};
\node (10) at (4,-1) {$y_5$};
\draw (1)--(2);
\draw (2)--(3);
\draw (3)--(4);
\draw (4)--(5);
\draw (5)--(10);
\draw (6)--(7);
\draw (7)--(8);
\draw (8)--(9);
\draw (9)--(10);
\draw (6)--(1);
\draw (2)--(7);
\draw (3)--(8);
\draw (4)--(9);
\end{tikzpicture}
\end{center}
\caption{The Illegal Complex $\auxboard{\textsc{Col}, P_5}$}
\label{fig:ColP5Aux}
\end{figure}
Playing on the vertices $x_1, y_3, x_4, y_5$ is legal since we never have both vertices of an edge. This position is shown on the top of Figure \ref{fig:SRCcol}, while the bottom shows the corresponding position played on $P_5$.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=1]
\node[circle, draw, minimum size=8mm] (1) at (0,0) {$L$};
\node[circle, draw, minimum size=8mm] (2) at (2,0) {};
\node[circle, draw, minimum size=8mm] (3) at (4,0) {};
\node[circle, draw, minimum size=8mm] (4) at (6,0) {$L$};
\node[circle, draw, minimum size=8mm] (5) at (8,0) {};
\node[circle, draw, minimum size=8mm] (6) at (0,-2) {};
\node[circle, draw, minimum size=8mm] (7) at (2,-2) {};
\node[circle, draw, minimum size=8mm] (8) at (4,-2) {$R$};
\node[circle, draw, minimum size=8mm] (9) at (6,-2) {};
\node[circle, draw, minimum size=8mm] (10) at (8,-2) {$R$};
\node at (0,0.75) {$x_1$};
\node at (2,0.75) {$x_2$};
\node at (4,0.75) {$x_3$};
\node at (6,0.75) {$x_4$};
\node at (8,0.75) {$x_5$};
\node at (0,-2.75) {$y_1$};
\node at (2,-2.75) {$y_2$};
\node at (4,-2.75) {$y_3$};
\node at (6,-2.75) {$y_4$};
\node at (8,-2.75) {$y_5$};
\draw (1)--(2);
\draw (2)--(3);
\draw (3)--(4);
\draw (4)--(5);
\draw (5)--(10);
\draw (6)--(7);
\draw (7)--(8);
\draw (8)--(9);
\draw (9)--(10);
\draw (6)--(1);
\draw (2)--(7);
\draw (3)--(8);
\draw (4)--(9);
\node[circle, draw, minimum size=8mm] (a) at (0,-4) {$L$};
\node[circle, draw, minimum size=8mm] (b) at (2,-4) {};
\node[circle, draw, minimum size=8mm] (c) at (4,-4) {$R$};
\node[circle, draw, minimum size=8mm] (d) at (6,-4) {$L$};
\node[circle, draw, minimum size=8mm] (e) at (8,-4) {$R$};
\draw (a)--(b);
\draw (b)--(c);
\draw (c)--(d);
\draw (d)--(e);
\node at (0,-4.75) {$1$};
\node at (2,-4.75) {$2$};
\node at (4,-4.75) {$3$};
\node at (6,-4.75) {$4$};
\node at (8,-4.75) {$5$};
\end{tikzpicture}
\end{center}
\caption{A Legal Position on $\auxboard{\textsc{Col}, P_5}$ and on $P_5$}
\label{fig:SRCcol}
\end{figure}
\end{example}
The next example of an illegal complex has a facet of cardinality 3.
\begin{example}
Consider \textsc{NoGo } played on the path $P_3$. The legal ideal is
\[\legalI{\textsc{NoGo},P_3}=\langle x_1x_2, x_1x_3, x_1y_3, x_2x_3, y_1x_3, y_1y_2, y_1y_3, y_2y_3\rangle\]
while the illegal ideal is
\[\illegalI{\textsc{NoGo}, P_3}=\langle x_1x_2x_3, y_1y_2y_3, x_1y_1, x_1y_2, x_2y_2, x_2y_3, x_3y_3, y_1x_2, y_2x_3\rangle.\]
The illegal complex is given in Figure \ref{fig:NogoP3aux}.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}
\filldraw[fill=gray!70, draw=black] (0, 1)--(1.73, 0)--(0, -1)-- cycle;
\filldraw[fill=gray!70, draw=black] (5.46, 1)--(3.73,0)--(5.46, -1)-- cycle;
\draw (0,1)--(5.46,1)--(1.73,0)--(5.46,-1)--(0,-1)--(3.73,0)--(0,1);
\draw (1.73,0)--(3.73,0);
\filldraw (0,1) circle (0.1cm);
\filldraw (1.73,0) circle (0.1cm);
\filldraw (0,-1) circle (0.1cm);
\filldraw (5.46,1) circle (0.1cm);
\filldraw (3.73,0) circle (0.1cm);
\filldraw (5.46,-1) circle (0.1cm);
\draw (-0.5,1) node {$x_1$};
\draw (1.2,0) node {$x_2$};
\draw (-0.5,-1) node {$x_3$};
\draw (5.96,1) node {$y_1$};
\draw (4.26,0) node {$y_2$};
\draw (5.96,-1) node {$y_3$};
\end{tikzpicture}
\end{center}
\caption{The Illegal Complex $\auxboard{\textsc{NoGo}, P_3}$}
\label{fig:NogoP3aux}
\end{figure}
\noindent Then playing on $x_1$ and $x_2$ is legal (they form a face, but not a facet), while playing on $x_1, x_2$, and $x_3$ is illegal.
\end{example}
Similarly, playing on the legal complex $\Delta_{G,B}$ according to the following rules is also equivalent to playing $G$ on $B$:
\begin{ruleset2}\label{rules:legal}
\begin{enumerate}
\item Left may only play on vertices labelled $x_i$, while Right may only play on vertices labelled $y_i$.
\item The set of occupied vertices needs to be a face of $\Delta_{G,B}$.
\end{enumerate}
\end{ruleset2}
\begin{example}
Consider \textsc{Col } played on $C_3$. The position on the left in Figure \ref{fig:gclegal} is legal, while the one on the right is illegal when playing on the complex.
\begin{figure}[!ht]
\begin{center}
\begin{tikzpicture}[scale=1]
\node[circle, draw, minimum size=8mm] (1) at (0,2) {};
\node[circle, draw, minimum size=8mm] (2) at (2,2) {$L$};
\node[circle, draw, minimum size=8mm] (3) at (4,2) {};
\node[circle, draw, minimum size=8mm] (4) at (0,0) {};
\node[circle, draw, minimum size=8mm] (5) at (2,0) {};
\node[circle, draw, minimum size=8mm] (6) at (4,0) {$R$};
\draw (4)--(2)--(6)--(1)--(5)--(3)--(4);
\draw (0,-0.75) node {$y_1$};
\draw (2,-0.75) node {$y_2$};
\draw (4,-0.75) node {$y_3$};
\draw (0,2.75) node {$x_1$};
\draw (2,2.75) node {$x_2$};
\draw (4,2.75) node {$x_3$};
\node[circle, draw, minimum size=8mm] (a) at (6,2) {};
\node[circle, draw, minimum size=8mm] (b) at (8,2) {$L$};
\node[circle, draw, minimum size=8mm] (c) at (10,2) {};
\node[circle, draw, minimum size=8mm] (d) at (6,0) {$R$};
\node[circle, draw, minimum size=8mm] (e) at (8,0) {};
\node[circle, draw, minimum size=8mm] (f) at (10,0) {$R$};
\draw (d)--(b)--(f)--(a)--(e)--(c)--(d);
\draw (6,-0.75) node {$y_1$};
\draw (8,-0.75) node {$y_2$};
\draw (10,-0.75) node {$y_3$};
\draw (6,2.75) node {$x_1$};
\draw (8,2.75) node {$x_2$};
\draw (10,2.75) node {$x_3$};
\node at (2,-1.5) {(A) Legal Position};
\node at (8,-1.5) {(B) Illegal Position};
\end{tikzpicture}
\end{center}
\caption{A Legal and an Illegal Position when Playing on $\Delta_{\textsc{Col}, C_3}$}
\label{fig:gclegal}
\end{figure}
\end{example}
Notice that both the legal complex and the illegal complex give a representation of the game \emph{and} the board. Thus, we can use the two complexes interchangeably, which is of advantage since sometimes the illegal complex is simpler than the legal complex (for example, the legal complex of \textsc{Col } played on $P_5$ has facets with 5 vertices, while in the illegal complex the facets have 2 vertices).
The next theorem recapitulates these discussions.
\begin{theorem}
Given a placement game $G$ played on a board $B$, there exist simplicial complexes $\Delta$ and $\Gamma$ such that $G$ is equivalent to the game with the Illegal Ruleset played on $\Gamma$, and equivalent to the game with the Legal Ruleset played on $\Delta$.
\end{theorem}
\begin{proof}
As shown above, $\Delta=\Delta_{G,B}$ the legal complex and $\Gamma=\Gamma_{G,B}$ the illegal complex satisfy this.
\end{proof}
\section{Discussion}
From the construction of legal complexes from placement games, there are several questions that arise naturally.
One question of interest is a possible reverse construction. In other words, we are looking at what conditions a simplicial complex has to satisfy to be a legal complex. In \cite{GameCompII} we explore this question further.
Another natural direction to pursue is how the algebra of a square-free monomial ideal $I$ (such as Cohen-Macaulayness, localization/deletion-contraction) affects the rulesets of the games played on the simplicial complexes $\facetsc{}$ and $\SRsc{}$.
|
1,314,259,994,980 | arxiv | \section{Introduction}
Transfer matrix ($\mathrm{T}$-matrix) is a classic quantum mechanics approach that is widely used to treat a variety of physical problems~\cite{Sanchez-Soto2012a}. Linearly relating the parameters of the Schr\"{o}dinger waves in the two sides of a potential barrier, the $\mathrm{T}$-matrix contains a rich information of quantum characteristics of the potential examined. The effectiveness of the $\mathrm{T}$-matrix approach relies on its analytic simplicity and on the fact that $\mathrm{T}$-matrices can be easily multiplied when treating relatively complicated potential barriers. Exact expressions for the energy structure as well as the transport characteristics of semiconductor super-lattices that were derived by Esaki and Tsu~\cite{Tsu1973a} could be seen as a typical example of elegant successes of this approach.
As for the graphene, when charge carriers behave like the two-dimensional (2D) Dirac relativistic fermions, the $\mathrm{T}$-matrix approach has also been shown to be an effective approach. For the graphene nanostructures induced by one-dimensional (1D) potentials, such as the multi-barrier structures or the $n$-$p$-$n$-junctions, the $\mathrm{T}$-matrix calculations have been developed to study the energy spectrum~\cite{Nguyen2009a} as well as the dynamical characteristics ~\cite{Nguyen2009b}. In particular, the $\mathrm{T}$-matrix approach was successfully used to calculate the electronic band structure and the transport properties of various single-/bi-layer graphene superlattices induced by periodic electrostatic and/or magnetic potentials (see, for example,~\cite{Barbier2008a, Pham2014a} and references therein). Note that, traditionally, the $\mathrm{T}$-matrix approach was just suggested for (quasi) 1D potential problems.
The present work is devoted to another class of graphene nano-structures that are induced by a cylindrically symmetric potentials, known as circular graphene quantum dots (GQDs)~\cite{Apergel2010a}. Experimentally, a circular GQD can be created using an appropriate circular top gate in the way as described in Ref.~\cite{Stander2009a}. Thanks to the fact that the gate potential can be tuned externally, such a gate-induced GQD can be easily controlled as regards its carrier density and effective radius. Theoretically, circular GQDs were often modelled by confinement potentials of either rectangular~\cite{Matulis2008a,Hewageegana2008a,Bardarson2009a, Recher2009a,Park2010a,Heinisch2013a,Guerrero-Becerra2014a,Schulz2014a} or power law forms~\cite{Chen2007a,Giavaras2009a,Giavaras2011a}. Then, by solving the Dirac-like equation for the chosen potential one obtained the dot energy spectrum and the associated quantities. It was shown that for the gapless pristine graphene in the absence of a magnetic field, due to the Klein tunneling, in general, it is not truly bound states but just quasi-bound ones with a finite trapping time that can be induced by an electrostatic confinement potential (see below, the text following Eq.~(\ref{eq: bound states}), for exceptional cases). An energy gap~\cite{Nguyen2009a,Recher2009a} and/or a perpendicular magnetic field \cite{Chen2007a,Giavaras2009a,deMartino2007a} can enhance the trapping time of quasi-bound states (QBSs) and induce even the bound states. A smoothness of confinement potential was also shown to enhance the trapping time of QBSs. However, solving the Dirac-like equation with a smooth potential is often rather problematic.
The purpose of this work is to extend the $\mathrm{T}$-matrix approach to study the electronic properties of circular GQDs induced by more general radial confinement potentials. For simplicity, our discussions are essentially limited to the case of zero magnetic field. Nevertheless, we briefly describe in an Appendix how to extend the approach to the case where a perpendicular homogeneous magnetic field is applied to the dot plane.
Note that, in reality, a GQD with well-defined discrete energy levels can be created by cutting a structure with the desired geometry from a flake of graphene \cite{Todd2009a,Espinosa-Ortega2013a}. However, so far there is a serious problem in fabricating such GQDs with atomic precision termination, while it was shown that the electronic properties of these GQDs are quantitatively sensitive to their precise terminations~\cite{Espinosa-Ortega2013a}. From the future electronics application point of view it is desirable to find the way of creating GQDs by the confinement potentials so that the trapping time of localized states is long enough to satisfy the application requirements and the electronic properties of the structure can be controlled externally.
The paper is organized as follows. Sec.~\ref{sec: general} presents the main results of the paper. It is there shown that for a very general class of circular GQDs, the bound and quasi-bound states spectral equations as well as the associated quantities, such as the local density of states and the resonance scattering characteristics, can all be expressed exactly in terms of the elements of the $\mathrm{T}$-matrix for the corresponding radial confinement potential. In Sec.~\ref{sec: examples} we show, as an example, the numerical solutions of the presented equations for the case of trapezoidal radial potential. Among the obtained results, it is in particular suggested that thermal fluctuations and/or electrostatic disorders may appear as an obstacle to controlling the valley polarization of Dirac electrons. While the paper is closed with a brief summary in Sec.~\ref{sec: conclusion}, the two Appendices are added to describe how the $\mathrm{T}$-matrix can be determined at some particular energies (A) and in the presence of a perpendicular magnetic field (B).
\section{General consideration}
\label{sec: general}
Let us consider a single-layer circular GQD defined by the radial confinement potential $U(r)$ that is assumed to be smooth on the scale of the graphene lattice spacing. Using the units such that $\hbar=1$ and the Fermi velocity $v_F = 1$ (quasi-relativistic quantum units), the low-energy electron dynamics in this structure can be described by the 2D Dirac-like Hamiltonian
\begin{equation}
H= \vec{\sigma} \vec{p} + \nu \Delta \sigma_z + U(r),
\label{eq: H U}
\end{equation}
where $\vec{\sigma}=(\sigma_x,\sigma_y)$ are the Pauli matrices, $\vec{p}=-i (\partial_x, \partial_y)$ is the 2D momentum operator, $\nu$ is the valley index ($\nu = \pm$ for the valleys $K$ and $K'$, respectively) and $\Delta \sigma_z$ is the constant mass term~\cite{Neto2009a}.
We look for the eigen-functions of the Hamiltonian~(\ref{eq: H U}) at energy $E$. Because of the cylindrical symmetry of $U(r)$, in the polar coordinates $(r, \phi )$ these eigen-functions can be found in the form
\begin{equation}
\Psi (r,\phi) = e^{i j \phi}
\left(
\begin{array}{cc}
e^{-i \phi/2} \chi_{A} (r) \\
e^{+i \phi/2} \chi_{B} (r)
\end{array}
\right),
\label{eq: eigen j}
\end{equation}
where the total angular momentum $j$ takes half-integer values and the radial spinor $\chi = (\chi_A,\chi_B)^{t}$ satisfies the following equation:
\begin{equation}
\left(
\begin{array}{cc}
U(r) - E + \nu \Delta & - i (\partial_r + \frac{j+\frac{1}{2}}{r}) \\
- i (\partial_r - \frac{j-\frac{1}{2}}{r}) & U(r)- E - \nu \Delta
\end{array}
\right)
\left(
\begin{array}{c}
\chi_A(r)\\
\chi_B(r)
\end{array}
\right)
= 0.
\label{eq: eigen chi U}
\end{equation}
This system of the two first order differential equations for the components $\chi_A$ and $\chi_B$ could be converted to a decoupled second order differential equation for either of these components. However, unless the potential $U(r)$ is simple enough, the resulting second order differential equations are often intractable. Nevertheless, we will show that the electronic characteristics of the circular GQDs described by the Hamiltonian~(\ref{eq: H U}) can be exactly expressed in terms of the elements of a $(2 \times 2)$ $\mathrm{T}$-matrix defined below.
In order to define the expected $\mathrm{T}$-matrix, it should be noted that, in practice, we often have to deal with the confinement potentials $U(r)$ which are flat in the two limiting regions of small and large $r$, i.e.,
\begin{equation}
U(r)=
\left\{
\begin{array}{c}
\mbox{$U_i$, \ \ \ $r \le r_i$}, \\
\mbox{$U_f$, \ \ \ $r \ge r_f$}, \\
\mbox{arbitrary, \ otherwise}.
\end{array}
\right.
\label{eq: general potential}
\end{equation}
In these limiting regions, the eigenstates of Hamiltonian (\ref{eq: H U}) can be found exactly. Indeed, we consider some region $r_a < r < r_b$ where the potential $U(r)$ is constant, $U(r) = \bar{U}$. As is well-known~\cite{Recher2009a,Rubio2015a}, for $E \neq \bar{U} \pm \nu \Delta$ the
general solution to Eq.~(\ref{eq: eigen chi U}) in this region can be written in terms of two independent integral constants $C = ( C^{(1)}, C^{(2)} )^t$:
\begin{equation}
\chi (r) = \mathrm{W} (\bar{U} , r) C ,
\label{eq: chi W}
\end{equation}
where the columns of the $\mathrm{W}$-matrix are the two independent basic solutions of Eq.~(\ref{eq: eigen chi U}),
\begin{equation}
\mathrm{W} (\bar{U} , r) =
\left(
\begin{array}{cc}
J_{j-\frac{1}{2}} (q r) & Y_{j-\frac{1}{2}} (q r) \\
i \tau J_{j+\frac{1}{2}} (q r) & i \tau Y_{j+\frac{1}{2}} (q r)
\end{array}
\right) .
\label{eq: W-matrix}
\end{equation}
Here $J_{j\pm\frac{1}{2}}$ is the Bessel function of the first kind and $Y_{j\pm\frac{1}{2}}$ is the Bessel function of the second kind~\cite{Abramowitz1972a}, $q = \sqrt{(E - \bar{U})^2 - \Delta^2}$ and $\tau = q / (E - \bar{U} + \nu \Delta )$. In the following, for definition, the integral constants $C = (C^{(1)}, C^{(2)})^t$ will be referred to as \emph{basic coefficients}. In the 1D problems, these basic coefficients can be interpreted as the coefficients of the forward and backward waves~\cite{Nguyen2009a,Nguyen2009b}. A similar interpretation can be introduced when the Hankel functions~\cite{Abramowitz1972a} are used to present the basic solutions $\mathrm{W}$~\cite{Abramowitz1972a}. In this paper, we however use the Bessel function representation for the sake of algebraic convenience.
A special care is needed in the case of energies $E \rightarrow \bar{U} \pm \nu \Delta$, when the basic solutions (\ref{eq: W-matrix}) become divergent. To avoid such a divergence, maintaining the matrix $\mathrm{W}$ as independent basic solutions, one has to properly adjust the regularization coefficients for the matrix elements in getting the correct limiting form of $\mathrm{W}$. To keep our discussions continuous, in the following we always assume that $E \ne \bar{U} \pm \nu \Delta$ and the case $E \rightarrow \bar{U} \pm \nu \Delta$ will be discussed separately in Appendix~\ref{sec: zero-energy}.
We note that the basic coefficient $C$ can be considered as the spinor represented in a basis that depends on $r$ according to Eq.~(\ref{eq: W-matrix}). Then, Eq.~(\ref{eq: chi W}) actually describes a (non-unitary) basis transformation of the spinor. The advantage of using such a $r$-depending basis is that while $\chi(r)$ depends on $r$ explicitly, the wave coefficient $C$ is independent of $r$ in constant potential regions.
Now, the key feature of the differential Eq.~(\ref{eq: eigen chi U}) is that it is linear and homogeneous. Consequently, the two radial spinors at $r = r_1$ and $r = r_2$ should be linearly related by some matrix $\mathrm{G} (r_2,r_1)$,
\begin{equation}
\chi (r_2) = \mathrm{G} (r_2,r_1) \chi (r_1).
\label{eq: G-matrix}
\end{equation}
This relation holds for any $r_2 \ge r_1$, including the case of $r_1 \le r_i$ and $r_2 \ge r_f$ [see Eq.~(\ref{eq: general potential})]. Therefore, when we represent the spinors at $r_1 \le r_i$ and $r_2 \ge r_f$ by the basic coefficients $C_i$ and $C_f$ respectively, these basic coefficients should also be linearly related by some $\mathrm{T}$-matrix:
\begin{equation}
C_f = \mathrm{T} C_i .
\label{eq: T-matrix}
\end{equation}
Note that the variable $r$ is entirely dropped out of this equation. Thus, in the context of the studied problem, the $\mathrm{T}$-matrix is defined as the matrix that maps the basic coefficients in the limiting region of small $r$ to those in the limiting region of large $r$. In fact, Eq.~(\ref{eq: T-matrix}) is a just basis transformation of Eq.~(\ref{eq: G-matrix}). From Eqs.~(\ref{eq: chi W}),~(\ref{eq: G-matrix}), and (\ref{eq: T-matrix}), we have the following elementary relation
\begin{equation}
\mathrm{T} = \mathrm{W}^{-1} (U_f,r_2) \mathrm{G} (r_2,r_1) \mathrm{W} (U_i,r_1).
\label{eq: T-G}
\end{equation}
This equation, like Eq.~(\ref{eq: G-matrix}), holds for any $r_1 \le r_i$ and $r_2 \ge r_f$, including $r_1=r_i$ and $r_2=r_f$.
Equation~(\ref{eq: T-G}) provides a practical way to compute the $\mathrm{T}$-matrix for any radial potential $U(r)$ of Eq.~(\ref{eq: general potential}) via computing $\mathrm{G}(r_f,r_i)$. By inserting~(\ref{eq: G-matrix}) into~(\ref{eq: eigen chi U}), one finds an explicit differential equation for $\mathrm{G}(r_2,r_1)$, which resembles a dynamical equation in $r$-direction,
\begin{equation}
i\frac{\partial \mathrm{G}(r_2,r_1)}{\partial r_2}= \mathcal{H}(r_2) \mathrm{G}(r_2,r_1),
\end{equation}
with the formal Hamiltonian defined as
\begin{equation}
\mathcal{H} (r)=
\left(
\begin{array}{cc}
i\frac{j-\frac{1}{2}}{r} & U(r) - E - \nu \Delta \\
U(r)-E + \nu \Delta & -i \frac{j+\frac{1}{2}}{r}
\end{array}
\right).
\end{equation}
This dynamical equation is to be solved for $\mathrm{G}(r_2,r_1)$ with the initial condition such that $\mathrm{G}(r_1,r_1)$ is the $(2 \times 2)$ identity matrix. Note that the formal Hamiltonian $\mathcal{H}(r)$ is not hermitian, and thus the dynamics is non-unitary. Moreover, $\mathcal{H}(r)$ at different $r$ generally do not commute with each other, rendering the dynamics analytically intractable. However, for the purpose of numerically calculating the $\mathrm{T}$-matrix, a simple numerical method for ordinary differential equations (ODEs) such as the Runge--Kutta method is sufficient~\cite{Press2002a}.
Of particular importance is the case of one-step potential, $U(r)$ of Eq.~(\ref{eq: general potential}) with $r_i=r_f$. In this case, $\mathrm{G}(r_i,r_f)$ is simply the $(2 \times 2)$ identity matrix and we can easily write down the $\mathrm{T}$-matrix of Eq.~(\ref{eq: T-G}) explicitly,
\begin{widetext}
\begin{eqnarray}
\mathrm{T} && = \left[ \tau_f J_{j-\frac{1}{2}} (q_f r_f) Y_{j+\frac{1}{2}} (q_f r_f) - \tau_f J_{j+\frac{1}{2}} (q_f r_f)Y_{j-\frac{1}{2}} (q_f r_f) \right]^{-1} \nonumber \\
&& \times \left(
\begin{array}{cc}
\tau_f Y_{j+\frac{1}{2}} (q_f r_f)J_{j-\frac{1}{2}} (q_i r_i)- \tau_i Y_{j-\frac{1}{2}} (q_f r_f)J_{j+\frac{1}{2}} (q_i r_i)& \tau_f Y_{j+\frac{1}{2}} (q_f r_f) Y_{j-\frac{1}{2}} (q_i r_i) - \tau_i Y_{j-\frac{1}{2}} (q_f r_f)Y_{j+\frac{1}{2}} (q_i r_i) \\
- \tau_f J_{j+\frac{1}{2}} (q_f r_f)J_{j-\frac{1}{2}} (q_i r_i) + \tau_i J_{j-\frac{1}{2}} (q_f r_f) J_{j+\frac{1}{2}} (q_i r_i) & - \tau_f J_{j+\frac{1}{2}} (q_f r_f) Y_{j-\frac{1}{2}} (q_i r_i)+ \tau_i J_{j-\frac{1}{2}} (q_f r_f) Y_{j+\frac{1}{2}} (q_i r_i)
\end{array}
\right), \nonumber \\
\label{eq: explicit T}
\end{eqnarray}
\end{widetext}
where $q_{i(f)}$ and $\tau_{i(f)}$ are defined as in Eq.~(\ref{eq: W-matrix}): $q_{i(f)} = \sqrt{(E - U_{i(f)} )^2 - \Delta^2}$ and $\tau_{i(f)} = q_{i(f)} / (E - U_{i(f)} + \nu \Delta )$.
Being a seemingly simple mathematical consequence of the linearity and the homogeneity of the wave equations, the $\mathrm{T}$-matrix of Eq.~(\ref{eq: T-matrix}), as can be seen below, holds rich information on the characteristics of the energy spectrum of the system. In order to derive these characteristics, we are going to impose appropriate boundary conditions for the basic coefficients $C_i$ and $C_f$, which in turn lead to corresponding constraints on the elements of the $\mathrm{T}$-matrix itself. It should be noted immediately that in the limiting region of small $r$, the Bessel function of the first kind $J_{j\pm\frac{1}{2}}(q_i r)$ is regular, while the Bessel function of the second kind $Y_{j\pm\frac{1}{2}}(q_i r)$ diverges. We should therefore set the condition $C_i \propto (1,0)^t $ for the basic coefficient in this region. We will first show that the localization behaviour of states is determined by the boundary condition for the basic coefficient $C_f$.
\subsection{Bound states}
\label{sec: bound states}
For the bound states to emerge, the wave function should decay fast enough as $r$ increases. This happens only when the wave vector in the limiting region of large $r$, $q_f = \sqrt{( E - U_f )^2- \Delta^2}$, is imaginary, implying $-\Delta < E - U_f < \Delta$. Here, as mentioned above, we do not include the case of equalities, which may bring about a particular type of bound states (see also Appendix~\ref{sec: zero-energy} and references therein). Under this condition, although both Bessel functions $J_{{j\pm\frac{1}{2}}} (q_f r)$ and $Y_{{j\pm\frac{1}{2}}} (q_f r)$ diverge as $r$ increases, the Hankel function of the first kind, $H^{+}_{{j\pm\frac{1}{2}}} (q_f r)= J_{{j\pm\frac{1}{2}}} (q_f r) + i Y_{{j\pm\frac{1}{2}}}(q_f r)$, decays exponentially. Thus, for the bound states to emerge, the appropriate boundary condition for the basic coefficient $C_f$ should have the form $C_f \propto (1, i)^t $. With the boundary conditions for $C_i$ and $C_f$ just defined, Eq.~(\ref{eq: T-matrix}) leads to the following relation for the elements of the $\mathrm{T}$-matrix:
\begin{equation}
\mathrm{T}_{11} + i \mathrm{T}_{21} = 0.
\label{eq: bound states}
\end{equation}
This is the general equation to determine the energy spectrum of all the bound states in the considered energy regions for a GQD induced by the potential of Eq.~(\ref{eq: general potential}). To obtain this energy spectrum we first have to calculate the $\mathrm{T}$-matrix in the way described above and then to solve Eq.~(\ref
{eq: bound states}). In the particular case of one-step potentials, using the explicit $\mathrm{T}$-matrix of Eq.~(\ref{eq: explicit T}), we can easily recover the bound state spectral equation reported in Refs.~\cite{Recher2009a,Rubio2015a} for the GQD induced by a rectangular potential.
\subsection{Quasi-bound states}
For $| E - U_f | > \Delta$, the wave vector in the limiting region of large $r$, $q_f$, is always real and the corresponding states cannot be truly bound. However, carriers may be temporally trapped at these states with some finite life-time. As mentioned above, such states are often referred to as the QBSs. Each QBS can be characterized by a complex energy $E = \Re(E) + i \Im(E)$ with $\Im(E)<0$. The real part of this energy, $\Re(E)$, defines the position of the QBS (i.e., the resonant level), while the imaginary part, $\Im(E)$, causes the probability density of the QBS to decay over time $t$ as $ \propto e^{2 \Im(E) t}$. This implies that $| \Im(E) |$ is a measure of the resonant level width and its inverse is a measure of the carrier life-time at the QBS, $\tau_0 \propto 1/ (2 | \Im(E) |)$.
Actually, the way we determined the spectral equation for bound states, Eq.~(\ref{eq: bound states}), can be easily extended to find the spectrum of QBSs. Indeed, as well-known~\cite{Chen2007a,Nguyen2009a}, the reasonable boundary condition for QBSs is that far from the origin the wave function should be an out-going wave. Letting $s = \operatorname{sign}(E - U_f )$, it is easy to see that the wave function $(H^{s}_{{j-\frac{1}{2}}} (q_f r), i \tau_f H^{s}_{{j+\frac{1}{2}}} (q_f r))^t $ with $H^{s}_{{j\pm\frac{1}{2}}} (q_f r) = J_{{j\pm\frac{1}{2}}} (q_f r) + is Y_{{j\pm\frac{1}{2}}} (q_f r)$ describes such an out-going wave. This can be confirmed by examining the current density of the radial wave function in the limiting region of large $r$ using the well-known asymptotic forms of the Hankel functions~\cite{Abramowitz1972a}. With the wave-function identified, in terms of the basic coefficients, it appears that the appropriate boundary condition for QBSs takes the simple from: $C_f \propto (1, is)^t$. Using this $C_f$ and the boundary condition for $C_i$ defined above, Eq.~(\ref{eq: T-matrix}) results in the general equation for determining the QBSs spectrum in circular GQDs:
\begin{equation}
\mathrm{T}_{11} + i s \mathrm{T}_{21} = 0.
\label{eq: quasi-bound states}
\end{equation}
Note that, to our best knowledge, the QBSs in circular GQDs were often identified by either numerically fitting asymptotic boundary conditions~\cite{Chen2007a}, or intuitively analysing the behaviour of the local density of states~\cite{Matulis2008a,Masir2009a}. Equation~(\ref{eq: quasi-bound states}) provides an alternative way to solve the problem, making it more definite and rather simple algebraically. In fact, this equation is in the same spirit as the equation suggested sometime ago for the QBSs in a 1D potential~\cite{Nguyen2009b}.
\subsection{Density of states}
The local density of states (LDOS) for unbound states, as defined in~\cite{Matulis2008a}, can also be easily expressed in terms of the $\mathrm{T}$-matrix of the radial confinement potential. Note that for unbound states the wave functions are not normalizable and the usual definition of LDOS~\cite{Davies1998a} should be used with care. Following~\cite{Matulis2008a}, we image that the considered GQD is entirely embedded in a large graphene disc of radius $R$, with the center of this disc coincides with that of the GQD. States are then bound within the large graphene disc, and the level spacing can be estimated to be $\Delta E= \pi/R$~\cite{Matulis2008a}. The LDOS of the GQD is proportional to both the level density and the probability for the electron at that energy level to be inside the dot. For a wave function with basic coefficients $C_i = (F, 0)^t$ and $C_f = (P, Q)^t$, the latter is proportional to $\abs{F}^2 / \abs{N}^2$, where $N$ is the normalization factor of the wave function, which in turn can be estimated to be $\abs{N}^2 \propto (\abs{P}^2 + \abs{Q}^2) R/\abs{E}$~\cite{Matulis2008a}. Overall, this gives the formula for the LDOS: $\rho^{(j)} (E) \propto \abs{E} \abs{F}^2 / ( \abs{P}^2+\abs{Q}^2 )$.
In order to get the LDOS in terms of the $\mathrm{T}$-matrix, we can use the relation~(\ref{eq: T-matrix}) to show that $\abs{F}^2/(\abs{P}^2+\abs{Q}^2) = 1 / ( \abs{T_{11}}^2+\abs{T_{21}}^2)$. Thus, for a given angular momentum $j$ and a given valley index $\nu$, the LDOS around the circular GQD can be calculated in terms of the $\mathrm{T}$-matrix as
\begin{equation}
\rho^{(j)} (E) \propto \frac{\abs{E}}{\abs{T_{11}^{(j)}}^2 + \abs{T_{21}^{(j)}}^2},
\label{eq: dos}
\end{equation}
where the superscript $(j)$ is added to explicitly indicate the $j$-dependence of the quantity calculated. Summing (\ref{eq: dos}) over all angular momenta, we obtain the total LDOS,
\begin{equation}
\rho(E)= \sum_{j=-\infty}^{+\infty} \rho^{(j)} (E).
\label{eq:tldos}
\end{equation}
It is easy to show that these general expressions, Eqs.~(\ref{eq: dos}) and (\ref{eq:tldos}), directly reduce to the corresponding ones given in Ref.~\cite{Matulis2008a} for circular GQDs with a rectangular confinement potential.
\subsection{Scattering coefficients}
\label{sec: scattering}
The scattering states are those with the asymptotic wave functions far from the origin being a superposition of an in-coming plane wave and an out-going (scattering) circular wave~\cite{Sakurai1994a}. Thus, for $r > r_f $, we write
\begin{equation}
\Psi_f (r,\phi) = \Psi_f^{(i)} (r, \phi) + \Psi_f^{(o)} (r, \phi),
\end{equation}
where the first and the second terms in the right-hand-side are the in-coming plane wave and out-going circular wave, respectively. The in-coming wave function $\Psi_f^{(i)}(r, \phi )$ is assumed to propagate along the $x$-direction with positive current density,
\begin{equation}
\Psi_f^{(i)} (r,\phi)= e^{isq_f r \cos \phi}
\left(
\begin{array}{c}
1 \\
\frac{s q_f}{E-U_f+ \nu \Delta}
\end{array}
\right),
\label{eq:plane-wave}
\end{equation}
where $q_f$ and $s$ have already been defined above. Note that for the electron to be propagated at large $r$, the energy should not be in the gap, $\abs{E-U}>\Delta$. Using the Jacobi--Anger identity~\cite{Cuyt2008a}, the plane wave function of Eq.~(\ref{eq:plane-wave}) can be decomposed into the eigen-functions of the angular momentum a
\begin{equation}
\Psi_f^{(i)}(r,\phi)=\sum_{j=-\infty}^{+\infty} (is)^{j-\frac{1}{2}} e^{ij{\phi}}
\left(
\begin{array}{c}
e^{-\frac{i}{2}\phi} \ J_{j-\frac{1}{2}}(q_f r) \\
e^{+\frac{i}{2}\phi} \ i\tau_f J_{j+\frac{1}{2}}(q_f r)
\end{array}
\right),
\end{equation}
with $\tau_f$ also already defined.
The scattering wave can be also expanded in the out-going waves of different angular momenta,
\begin{equation}
\Psi_f^{(o)} (r,\phi)=\sum_{j= -\infty}^{+\infty} a^{(j)} (is)^{j-\frac{1}{2}} e^{ij{\phi}}
\left(
\begin{array}{c}
e^{-\frac{i}{2}\phi} \ H^{s}_{{j-\frac{1}{2}}}(q_f r) \\
e^{+\frac{i}{2}\phi} \ i\tau_f H^{s}_{{j+\frac{1}{2}}}(q_f r)
\end{array}
\right),
\label{eq: out-going}
\end{equation}
where $a^{(j)}$ are regarded as scattering coefficients~\cite{Heinisch2013a,Schulz2014a,Schulz2015a}.
For $r<r_i$, similarly, the wave function can be decomposed into a linear combination of the wave functions of different angular momenta. Noting that to ensure the regularity of the wave function at the origin, the Bessel functions of the second kind are necessarily absent from this decomposition, one has
\begin{equation}
\Psi_i (r,\theta) =\sum_{j=-\infty}^{+\infty} c^{(j)} (is)^{j-\frac{1}{2}} e^{ij{\phi}}
\left(
\begin{array}{c}
e^{-\frac{i}{2}\phi} \ J_{{j-\frac{1}{2}}}(q_i r) \\
e^{+\frac{i}{2}\phi} \ i\tau_{i} J_{{j+\frac{1}{2}}}(q_i r)
\end{array}
\right),
\end{equation}
with $q_i$ and $\tau_{i}$ defined before and $c^{(j)}$ being some coefficients.
Further, since the basic coefficients in the two limiting regions, $r \le r_i$ and $r \ge r_f$, should be related to each other by the $\mathrm{T}$-matrix as in Eq.~(\ref{eq: T-matrix}), we find
\begin{equation}
\left(
\begin{array}{c}
a^{(j)}+1 \\
isa^{(j)}
\end{array}
\right)
=\mathrm{T}^{(j)}
\left(
\begin{array}{c}
c^{(j)} \\
0
\end{array}
\right),
\label{eq: amplitude equation}
\end{equation}
where the superscript $(j)$ is again introduced to indicate the $j$-dependence of $\mathrm{T}$-matrix. Solving Eq.~(\ref{eq: amplitude equation}) gives the scattering coefficients in terms of the $\mathrm{T}$-matrix elements:
\begin{equation}
a^{(j)}=\frac{-isT^{(j)}_{21}}{T^{(j)}_{11}+isT^{(j)}_{21}}.
\label{eq: scattering coefficients}
\end{equation}
Now it is important to note that for an unbound eigen-function of real energy, to ensure the probability current conservation, it requires that the coefficients for the total out-going waves and the total in-going waves should be equal in modulus~\cite{Sakurai1994a},
\begin{equation}
\abs{T_{11}^{(j)} - is T_{21}^{(j)}} = \abs{T_{11}^{(j)} + is T_{21}^{(j)}}.
\end{equation}
This implies that the scattering coefficients $a^{(j)}$ can be represented in terms of the so-called scattering phase-shifts~\cite{Sakurai1994a,Masir2011a},
\begin{equation}
a^{(j)}= \frac{1}{2} \left(e^{-i 2 \delta^{(j)}} -1\right),
\end{equation}
where
\begin{equation}
\delta^{(j)} = \frac{1}{2} \arg \left( \frac{T_{11}^{(j)} + is T_{21}^{(j)}}{T_{11}^{(j)} - is T_{21}^{(j)}}\right).
\label{eq: phase-shift}
\end{equation}
The differential scattering cross section, defined as the ratio of the probability flux of the out-going wave per unit angle to the probability flux of the in-coming wave per unit length~\cite{Sakurai1994a,Masir2011a}, can be found as
\begin{equation}
\frac{\d \sigma }{\d \phi} = \frac{2}{\pi q_f} \abs{\sum_{j=-\infty}^{+\infty} a^{(j)} e^{j \phi}}^2.
\label{eq: scattering cross section}
\end{equation}
By integrating this expression over $\phi$, one finds the total scattering cross section,
\begin{equation}
\sigma = \frac{4}{q_f} \sum_{j=-\infty}^{+\infty} \sin^2 \delta_j.
\end{equation}
\bigskip
Thus, for circular GQDs with an arbitrary radial confinement potential of Eq.~(\ref{eq: general potential}), we have shown that the bound states as well as the QBSs spectra and the associated quantities such as the LDOS and the scattering coefficients can all be exactly expressed in terms of $\mathrm{T}$-matrix elements. Equations~(\ref{eq: bound states}), (\ref{eq: quasi-bound states}), (\ref{eq: dos}), and (\ref{eq: scattering coefficients}) are the key results of the present work. In particular cases, when the eigenstates of Hamiltonian (\ref{eq: H U}) can be found analytically (e.g., for a rectangular potential $U(r)$), these equations are exactly reduced to the corresponding expressions reported in various references. Generally, the $\mathrm{T}$-matrix can be calculated numerically. In the next section, as an example, we present numerical results obtained in the case of trapezoidal radial confinement potential.
\section{Example: Trapezoidal radial potential induced GQDs}
\label{sec: examples}
As a demonstration for the studies presented in the previous section, we consider a circular GQD induced by the radial potential of Eq.~(\ref{eq: general potential}) with: $U_i = U_0$, $r_i = (1-\alpha) L$, $U_f = 0$, $r_f = (1+\alpha) L$ and $U(r) = U_i + \frac{r - r_i }{ r_f - r_i }(U_f - U_i )$ for $r_i < r < r_f$. So, the considered confinement potential has a trapezoidal shape that is characterized by three parameters: the potential height $U_0$, the dot effective radius $L$, and the smoothness $\alpha$ that ranges from $0$ to $1$. In the limiting case of $\alpha = 0$, this potential is just the most studied rectangular one. The 1D trapezoidal potential are often used to describe the gate-induced graphene $n$-$p$-$n$-junctions \cite{Huard2007a,Sonin2009a}.
For given values of potential parameters as well as the angular momentum $j$, we first calculate the $\mathrm{T}$-matrix for the potential under study. In the case of $\alpha \ne 0$, the calculation of the $\mathrm{T}$-matrix requires solving the ODE~(\ref{eq: T-G}) numerically for the matrix $\mathrm{G}(r_i,r_f)$ with the Runge--Kutta method. Substituting the obtained $\mathrm{T}$-matrix elements into Eqs.~(\ref{eq: bound states}), (\ref{eq: quasi-bound states}), (\ref{eq: dos}), and (\ref{eq: scattering coefficients}), and solving these equations, we respectively obtain the energy spectra, the associated LDOS, and the scattering coefficients~\footnote{For the indicated parameters, the Runge--Kutta method with about $1024$ steps gave the typical accuracy of $10^{-5}$ for the elements of the $T$-matrix. The numerical solutions of Eqs.~(\ref{eq: bound states}) and ~(\ref{eq: quasi-bound states}) presented in Fig.~\ref{fig: spectrum L} and Fig.~\ref{fig: spectrum a} (a) were obtained at the effective resolution of at least $4026$ grid-points in each dimension. Bessel functions were computed using the corresponding subroutines from Ref.~\cite{Zhang1996b}}. Such calculations can be carried out for various values of the potential parameters and the angular momenta. As an example, some of the obtained results are presented in Figs.1-4.
Note that we still use the quasi-relativistic quantum units ($\hbar=1$, $v_F=1$), so the dimension of energy is inverse of the length. For a comparison, to describe the usual experimental values of $L$ and $U_0$ ($L$ is of the order of $100$ nm and $U_0$ is of the order of $130$ meV), we choose $L$ to be about $1$ and $U_0$ to be about $20$.
\begin{figure}[!hbt]
\centering
\includegraphics[width=0.45\textwidth]{cspl.pdf}
\caption{(Colour online) Spectrum of bound states calculated from Eq.~(\ref{eq: bound states}) and QBSs
from Eq.~(\ref{eq: quasi-bound states}) for a GQD induced by the trapezoidal radial potential
of $U_0 = 15$ and $\alpha=0$. The lines represent the level
positions, plotted versus the dot effective radius L, while the
thickness of these lines represents the corresponding level widths.
Data are shown for $\nu=+$, $j = \frac{3}{2}$ and $\Delta=2$.}
\label{fig: spectrum L}
\end{figure}
\begin{figure}[!hbt]
\centering
\begin{minipage}{0.47\textwidth}
\begin{center}
\includegraphics[width=\textwidth]{cespf.pdf}
\end{center}
\end{minipage}\\
\begin{minipage}{0.47\textwidth}
\begin{center}
\includegraphics[width=\textwidth]{edos.pdf}
\end{center}
\end{minipage}
\caption{(Colour online) QBS spectra $(a)$ and LDOS $(b)$ of a GQD induced by the trapezoidal radial potential of $L=1$ and $U_0=20$ are presented for $\nu=+$, $j = \frac{3}{2}$ and various $\alpha$. In $(a)$: 5 curves correspond to 5 QBS levels, each describing how the QBS energy ($\Im(E)$ and $\Re(E)$) changes as $\alpha$ varying regularly from 0.3 (top) to 0.7 (bottom), correspondingly, from larger point-sizes to smaller point-sizes. In $(b)$: LDOS (in arbitrary unit) is shown for the three spectra with $\alpha$ given in the figure.}
\label{fig: spectrum a}
\end{figure}
We first set $\alpha=0$ and study the spectra of bound states and QBSs as $L$ changing from $1$ to $3$. Obtained results are shown in Fig.~\ref{fig: spectrum L}. The limiting lines $E = \pm \Delta$ and $E = U_0 \pm \Delta$ define qualitatively different energy regions. The region $U_0 - \Delta \le E \le U_0 + \Delta $ appears as a gap, where there exists neither bound states nor QBSs. On the other hand, the states in the region of energies $- \Delta \le E \le +\Delta $ are truly bound, while those outside these regions are QBSs. For the QBSs presented, the thickness of the lines represents the corresponding level widths. When $L$ increases, starting from the low energy region ($E < -\Delta$), the QBS-levels gradually rise to approach the boundary at $E=-\Delta$, and, at the same time, their widths gradually narrow to vanish at this boundary. Throughout the region $-\Delta<E<+\Delta$, the states are truly bound with zero level widths. At the opposite boundary $E=+\Delta$ the states are again converted to QBSs. So, there may observe a continuous QBS - bound state - QBS transition in the energy spectra of circular GQDs as the dot radius $L$ varies. Note that, in the case of zero-gap, $\Delta = 0$, the bound states region actually collapses into the line $E = 0$ (That is why these states have been referred to as zero-energy ones~\cite{Hewageegana2008a,Downing2011a}). At very large $L$, all levels converge to the two boundaries $E= U_0 \pm \Delta$ that describe the limiting case when a homogeneous potential of $U_0$ is applied on the entire graphene sheet.
In the gapless case, $\Delta = 0$, all the states other than zero-energy ones are just QBSs. In this case, the QBSs with energies in the region $0<E<U_0$ tend to have the level widths narrower than that for the QBSs with energies outside this region. It was suggested that the level widths of these QBSs can also be tuned by varying the smoothness of the confinement potential $\alpha$~\cite{Chen2007a}. Fig.~\ref{fig: spectrum a} $(a)$ shows how the complex energies of five different QBSs change as the smoothness $\alpha$ varies from $0.3$ to $0.7$ (correspondingly, point-sizes gradually decrease). Obviously, for any QBS under study, with increasing potential smoothness $\alpha$, while the real part of the energy $\Re(E)$ just changes slightly, the imaginary part $\Im(E)$ decreases substantially. This result is in a good agreement with those reported for 1D potentials~\cite{Nguyen2009a} and 2D power law potentials~\cite{Chen2007a}.
Next, we show in Fig.~\ref{fig: spectrum a}~$(b)$ the LDOSs (in arbitrary unit) for the three spectra with the $\alpha$-values examined in Fig.~\ref{fig: spectrum a}~$(a)$. Evidently, there is a good agreement between the positions of QBSs in $(a)$ and the corresponding resonant peaks of LDOS in $(b)$. Moreover, the imaginary parts of the QBS energies represent the widths of the corresponding LDOS peaks quite well. Thus, our results qualitatively demonstrate the correspondence between the QBSs and the LDOS peaks. In fact, the LDOS has already been used to determine QBSs indirectly~\cite{Masir2009a}. Quantitatively, it should however be noted that for very broad LDOS peaks, such as those at $E \approx 1$ in Fig.~\ref{fig: spectrum a}~$(b)$, the peak width may not correctly describe the life-time of the corresponding QBS.
To illustrate the $\mathrm{T}$-matrix-based scattering formalism developed in subsection~\ref{sec: scattering}, we calculate the low-energy differential scattering cross section $\d \sigma / \d \phi $ for the trapezoidal potential of $U_0=20$ and $L= 1$ ($\alpha$ is set to be zero for simplicity). In Fig.~\ref{fig: scattering}, obtained results of $\d\sigma / \d\phi $ are presented as a function of the scattering angle $\phi$ in three cases: $\Delta = 0$ (gapless), $0.5$, and $1$ (finite gap). In the gapless case (dash-dotted line), the differential scattering cross section vanishes at $\phi = \pm \pi$ (Fig.~\ref{fig: scattering}, inset), showing the undoubted effect of the Klein tunnelling. In the two cases of finite gap, on the contrary, $\d\sigma / \d\phi $ is always finite, implying an unavoidable presence of the back-scattering.
\begin{figure}[!hbt]
\centering
\includegraphics[width=0.45\textwidth]{cesct.pdf}
\caption{(Colour online) Low-energy differential scattering cross section is plotted as a function of scattering angle $\phi$ for the trapezoidal radial potential of $U_0=15$, $L=1$, and $\alpha=0$ in three cases of $\Delta$: $0$ (dash-dotted line), $0.5$ (dashed line), and $1$ (solid line). The inset zooms in the region of scattering angle around $\pi$. Data are shown for $E=2$ and $\nu=+1$.}
\label{fig: scattering}
\end{figure}
Besides, the two curves of finite gap (solid and dashed) in Fig.~\ref{fig: scattering} clearly show an asymmetrical behaviour with respect to the sign of $\phi$. A similar asymmetry has been discussed in the context of scattering of Dirac electrons by the so-called mass-barriers in Ref.~\cite{Masir2011a}. Note that by the reflection symmetry, $j \rightarrow -j$, $\nu \rightarrow -\nu$, electrons with opposite valley indices will scatter as if reflected along $\phi=0$, so no Hall-like voltage can be expected unless the injected current is valley-polarized. Nevertheless, with an unpolarised current, electrons of different valley indices are expected to accumulate on opposite edges of the graphene sample in the way similar to the spin Hall effect~\cite{Sinova2015a}. The valley-dependent asymmetric scattering was suggested to be used for the valley filtering purpose~\cite{Masir2011a}.
Further, to learn if the examined electrostatic potential can support to control the valley polarisation of Dirac electrons like the mass potential does ~\cite{Masir2011a}, we calculate the transverse scattering cross section defined as
\begin{equation}
\eta= \int_{-\pi}^{+\pi} \d \sigma (\phi) \sin \phi.
\end{equation}
Calculations have been performed for potentials of $L = 1$, $\alpha = 0$, and different $U_0$. Obtained results for $\eta$ are plotted as a function of the incident energy $E$ in Fig.~\ref{fig: eta}, where the three curves are different in $U_0$: $U_0 = 10$ (dash-dotted line), $20$ (dashed line), and $30$ (solid line). Remarkably, $\eta$ strongly fluctuates, changing its sign in a complicated way, depending on both $E$ and $U_0$. Consequently, the transverse scattering cross sections of valley-polarized electrons of slightly different energies (e.g., due to thermal fluctuations), or from slightly different potentials, might compensate each other, resulting in a vanishing net transverse scattering cross section. This is very different from the scattering of Dirac electrons by a mass-barrier studied in Ref.~\cite{Masir2011a}, where it was shown that the transverse scattering cross section generally keeps its sign unchanged as the energy of electron varies. Given the fact that an energy gap in graphene is often induced by an underlying substrate~\cite{Neto2009a,Guinea2010a}, a mass-barrier is likely to be accompanied by electrostatic disorders. Thus, although a more quantitative study is needed, we speculate that the electrostatic disorders and/or the thermal fluctuation may appear as an obstacle to controlling the valley polarization of Dirac electrons and, therefore, to observing the associated zero-field Hall and the valley filtering effects~\cite{Masir2011a,Guinea2010a} .
Finally, to gain some insight into the discussed fluctuating behaviour of the transverse scattering cross section $\eta$ observed in Fig.~\ref{fig: eta}, in the inset to this figure we compare three quantities, $\eta$, the total scattering cross section $\sigma$, and the total LDOS, all are plotted versus $E$. Obviously, there is a good correspondence between the peaks of the total LDOS resulted from QBSs of different angular momenta (labelled TLDOS) with those of the total (labelled $\sigma$) and transverse (labelled $\eta$) scattering cross sections. Note that the (rather shallow) peaks of the transverse scattering cross section come both as maxima and minima.
\begin{figure}[!hbt]
\centering
\includegraphics[width=0.45\textwidth]{cescte-inset.pdf}
\caption{ (Colour online) Transverse scattering cross section $\eta$ as a function of the incident energy $E$ for potentials of $L=1$, $\alpha=0$, and various $U_0$: $10$ (dash-dotted line), $20$ (dashed line), and $30$ (solid line). The inset zooms in a small region of energy (for $U_0=30$), where the total scattering cross section (labeled $\sigma$) and the (total) local density of states (labelled TLDOS) are also plotted for a comparison. [Note that the TLDOS (defined up to a constant factor) was rescaled to fit the figure.] Data are shown for $\Delta= 0.5$ and $\nu=+1$.}
\label{fig: eta}
\end{figure}
\section{Conclusion}
\label{sec: conclusion}
We have developed the $\mathrm{T}$-matrix formalism for studying electronic properties of the GQDs induced by a cylindrically symmetric confinement potential (circular GQDs). It was first shown that for circular GQDs with any radial confinement potential the equations for the bound states and QBSs spectra as well as the associated quantities such as the LDOS or scattering coefficients are all expressed explicitly in terms of the corresponding $\mathrm{T}$-matrix. In the case of simple confinement potentials (e.g., rectangular one), when the Dirac-like equation can be solved analytically, these equations give exactly the analytical results reported in various references. For any complicated potential, the $\mathrm{T}$-matrix can be determined numerically. As an example, we have in detail considered the case of trapezoidal radial confinement potentials, calculating the bound states and QBSs spectra, the LDOS, the differential scattering cross section, and the transverse scattering cross section for the potentials of different parameters. Apart from the role of a demonstration for the $\mathrm{T}$-matrix approach developed, obtained results in this example, in particular, suggest that controlling the valley polarization of Dirac electrons may turn out to be difficult in the presence of electrostatic disorders and/or thermal fluctuation. As an addition, we have shown how the developed $\mathrm{T}$-matrix formalism can be extended to study circular GQDs under a homogeneous perpendicular magnetic field (Appendix~\ref{sec: with B}).
\acknowledgements
We thank Cong Huy Pham and Duy Quang To for useful discussions.
This work was supported by Vietnam National Foundation for Science and Technology Development (NAFOSTED) under Grant No. 103.02-2013.17.
|
1,314,259,994,981 | arxiv |
\section*{}
\setcounter{page}{2}
\newpage
\setcounter{page}{1}
\section*{Introduction}
Let $G$ be a complex algebraic group. A smooth connected projective $G$-variety $X$ is called \textit{wonderful} of rank $r$, if $G$ stabilizes exactly $r$ irreducible divisors in $X$ having the following properties:
\begin{itemf}
\item[-] they are smooth and have a nonempty transversal intersection;
\item[-] two points of $X$ are on the same $G$-orbit if (and only if) they are contained in the same $G$-stable divisors.
\end{itemf}
In this paper, unless otherwise stated, we will assume the group $G$ to be semisimple and simply connected, and the action of the center of $G$ on wonderful varieties to be trivial. Wonderful varieties of rank 0 are then just the (generalized) flag varieties. Other examples are the wonderful completions of (adjoint) homogeneous symmetric spaces (\cite{DP83}).
In the recent years a general theory of wonderful varieties has been developed. Our aim in this paper is to give a somewhat unusual introduction to this theory, by presenting (all) examples when $G$ is simple of type $\mathsf F_4$, and studying many of them more in detail. This $G$, although of small rank, seems complicated enough to illustrate most of the phenomena present in the general case.
In Chapter~1, we recall some basic facts about wonderful varieties and their combinatorial invariants called \textit{spherical systems} (see \cite{Lu01} and \cite{Bri07a}). We very much emphasize \textit{spherical diagrams}: these diagrams provide a convenient way of visualizing spherical systems (just as Dynkin diagrams do for root systems), and will be omnipresent throughout the rest of the paper. We give the list of all 266 spherical diagrams of type $\mathsf F_4$:
\begin{itemf}
\item[-] 16 of rank 0 (the generalized flag varieties),
\item[-] 41 of rank 1,
\item[-] 61 of rank 2,
\item[-] 77 of rank 3,
\item[-] 71 of rank 4.
\end{itemf}
In Chapter~2, we present some further notions and results about wonderful varieties. Since our main intention here is only to prepare for the study of examples of type $\mathsf F_4$, this chapter is somewhat experimental and sketchy: some proofs are only outlined and several questions are left open (as already in Chapter~1). An exception is Section~\ref{sssphericalclosure}, where we reexamine and clarify the notion of spherical closure (introduced in \cite{Lu01}, Section~6), and obtain a general classification of spherical orbits in all simple projective $G$-spaces, which seems to be new.
In Chapter~3, we reach the main object of our study: in ten small sections, we examine more closely several of the 266 wonderful varieties of type $\mathsf F_4$, those we believe to be the most interesting. In a last section, we discuss also some examples which are not of type $\mathsf F_4$.
\bigskip
Let us say some words about the place of wonderful varieties in mathematics, to explain the usefulness of a general theory of wonderful varieties.
A good general notion of algebraic varieties ``having a big algebraic symmetry group'' is that of spherical varieties, i.e.\ of normal algebraic $G$-varieties, under a reductive connected group $G$, which are almost homogeneous under a Borel subgroup of $G$ (see \cite{Bri97} or \cite{T06} for an introduction). Spherical varieties are strongly related to (are the algebraic analog of) real symplectic manifolds equipped with a multiplicity-free Hamiltonian action of a compact Lie group (see \cite{Wo98}).
Wonderful varieties are spherical and, moreover, play a central role inside the theory of spherical varieties: indeed, to every spherical $G$-variety $Y$, one can associate, in a functorial way, a wonderful variety $\mathbf Y$, and $Y$ is then determined by the spherical system of $\mathbf Y$ together with some additional combinatorial data (see \cite{Lu01}, Theorem~3). This means that many properties of spherical varieties can be understood in geometrical terms of wonderful varieties. So it is not surprising that wonderful varieties play an important role in the classification of model homogeneous spaces (\cite{GZ84,AHV98,Lu07}), or in the multiplicity-free case of invariant Hilbert schemes (\cite{AB05,BC08}), or in the study of total coordinate rings of spherical varieties (\cite{Bri07a}).
Furthermore, wonderful varieties have some features of symmetric spaces: for instance, they naturally come with an interesting ``little Weyl group'' (\cite{Bri90,Kn95}). Lastly, let us mention that wonderful varieties are special cases of log homogeneous varieties (\cite{Bri07b,Bri08}).
\chapter{Wonderful varieties and\\ spherical systems}
In the first section, we will recall some notions which can be naturally attached to each wonderful variety (spherical roots, colors, Cartan pairing, \ldots ), with appropriate notations, and explain how they are related to the combinatorial invariants of $G$ (root system and Cartan matrix). This will prepare the more formal approach to the combinatorial invariants of wonderful varieties (i.e.\ spherical systems) given in the second section. In the third and last section, we will recall some facts about the simple group of type $\mathsf F_4$, and give the list of all 266 spherical systems of type $\mathsf F_4$.
In what follows, we fix two opposite Borel subgroups $B$ and ${}^-\! B$ of $G$, so that $T=B\cap {}^-\! B$ is a maximal torus of $G$. If $K$ is any affine algebraic group, we write $K^r$ (resp.\ $K^u$) for the radical (resp.\ the unipotent radical) of $K$, and $\Xi(K)$ for the group of characters of $K$. We identify $\Xi(B)$ and $\Xi(T)$, and denote $R\subset\Xi(T)$ the root system of $G$ and $S\subset R$ the basis of $R$ corresponding to $B$. The elements of $S$ are traditionally called \textit{simple roots}. For all $\alpha\in S$, we denote by $\alpha^\vee$ the co-root of $\alpha$, and by $P_\alpha\supset B$ and ${}^-\! P_\alpha\supset {}^-\! B$ the corresponding minimal parabolic subgroups of $G$. Remember that the integers $\langle\alpha^\vee,\beta\rangle$, for $(\alpha,\beta)\in S\times S$, are the coefficients of the Cartan matrix of $G$. We denote by $\Omega$ the set of fundamental weights of $G$; so $\mathbb N\Omega$ can be considered as the set of dominant weights.
Remember that an algebraic $G$-variety $Y$ is called \textit{spherical}, if it is normal and if $B$ has an open (dense) orbit in $Y$. It is known that $B$ has only finitely many orbits in spherical varieties. A \textit{color} of a spherical variety $Y$ is by definition an irreducible $B$-stable but not $G$-stable divisor in $Y$. Let us denote by $\Delta_Y$ the set of colors of $Y$ and by $\Xi(Y)$ the free abelian group of weights of $B$ in the field of rational functions on $Y$.
It is known that wonderful varieties are spherical.
\vspace{6ex}\section{Basic definitions and properties}\label{ssbasic}
In what follows $X$ will always denote a wonderful $G$-variety.
\subsection{Spherical roots}
Let $z$ be the unique point in $X$ fixed by ${}^-\! B$. \textit{Spherical roots} of $X$ are by definition weights of $T$ in $T_zX/T_z(G.z)$ (the normal space at $z$ of the closed orbit $G.z$ in $X$). Let us denote by $\Sigma_X$ the set of spherical roots of $X$. Since the action of the center of $G$ is assumed to be trivial, one has $\Sigma_X\subset\mathbb NS$. The smoothness of $X$ implies that $\Sigma_X$ is a basis of $\Xi(X)$.
For every $\sigma\in\Sigma_X$, let us denote by $D^\sigma$ the unique $G$-stable divisor such that $\sigma$ is the weight of $T$ in $T_zX/T_zD^\sigma$. One obtains in this way a natural bijection between $\Sigma_X$ and the set of irreducible $G$-stable divisors of $X$. In particular, the cardinality of $\Sigma_X$ equals the rank of $X$. For all $\Sigma'\subset\Sigma_X$, we set $X_{\Sigma'}=\cap_{\sigma\in\Sigma_X\setminus\Sigma'}D^\sigma$; each $X_{\Sigma'}$ is a wonderful $G$-subvariety of $X$ whose set of spherical roots is $\Sigma'$. One obtains in this way a bijection between the set of subsets of $\Sigma_X$ and the set of closed irreducible $G$-stable subsets of $X$.
The spherical root of any wonderful $G$-variety of rank 1 is also called a \textit{spherical root of $G$}. Let us denote by $\Sigma(G)$ the (finite) set of spherical roots of $G$. Since the spherical roots of $X$ are exactly those of the wonderful varieties $X_{\{\sigma\}}$ for all $\sigma\in\Sigma_X$, one has $\Sigma_X\subset\Sigma(G)$.
\subsection{Colors and simple roots}\label{ssscolors}
Let us now explain the relations between the set of colors $\Delta_X$ and the set of simple roots $S$.
For all $\alpha\in S$, let us set $\Delta(\alpha)=\{D\in\Delta_X: P_\alpha.D\neq D\}$. One has $\Delta_X=\cup_{\alpha\in S}\Delta(\alpha)$ and $0\leq\mathrm{card}(\Delta(\alpha))\leq2$.
Define $S^p_X$ as the set of simple roots $\alpha$ such that $\Delta(\alpha)=\emptyset$. The parabolic subgroup of $G$ generated by the $P_\alpha$'s, $\alpha\in S^p_X$, is sometimes denoted by $P=P_X$; it can also be defined as the stabilizer of the open orbit of $B$ in $X$. One has the useful formula: $\dim X=\mathrm{card}\, \Sigma_X+\dim P_X^u$. The parabolic subgroup ${}^-\! P$ of $G$ (opposite with respect to $T$) is equal to the isotropy group at $z$.
Set $S^a_X=S\cap\Sigma_X$. One has $\mathrm{card}(\Delta(\alpha))=2$ if and only if $\alpha\in S^a_X$. Set $S^{2a}_X=\{\alpha\in S: 2\alpha\in\Sigma_X\}$ and $S^b_X=S\setminus(S^p_X\cup S^a_X\cup S^{2a}_X)$.
Furthermore, let us set
\begin{itemf}
\item[-] $\Delta^a_X=\cup \Delta(\alpha)$, $\alpha\in S^a_X$;
\item[-] $\Delta^{2a}_X=\cup \Delta(\alpha)$, $\alpha\in S^{2a}_X$;
\item[-] $\Delta^b_X=\cup \Delta(\alpha)$, $\alpha\in S^b_X$.
\end{itemf}
The union $\Delta_X=\Delta^a_X\cup\Delta^{2a}_X\cup\Delta^b_X$ is disjoint.
\subsection{Cartan pairing}\label{ssscartan}
The classes $[D]$, $D\in\Delta_X$, form a basis of $\mathrm{Pic}(X)$. The pairing $c\colon\Delta_X\times\Sigma_X\to\mathbb Z$ given by $[D^\sigma]=\sum_{D\in\Delta_X} c(D,\sigma)[D]$ is called the \textit{Cartan pairing} of $X$.
The natural map $G/{}^-\! B\to G.z\subset X$ induces a group homomorphism $\omega$ from $\mathbb Z\Delta_X\cong\mathrm{Pic}(X)$ to $\mathrm{Pic}(G/{}^-\! B)\cong\mathbb Z\Omega$, the weight lattice of $G$. The way we labeled the $G$-stable divisors of $X$ implies $\omega([D^\sigma])=\sigma$.
The Cartan pairing of $X$ is related to the Cartan matrix of $G$ as follows:
\begin{itemf}
\item[-] if $\alpha\in S^a_X$ and if we denote by $D^+_\alpha$, $D^-_\alpha$ the two colors in $\Delta(\alpha)$,
\item[] then $c(D^+_\alpha,\sigma)+c(D^-_\alpha,\sigma)=\langle\alpha^\vee,\sigma\rangle$, for all $\sigma\in\Sigma_X$;
\item[-] if $\alpha\in S^{2a}_X$ and if we denote by $D_{2\alpha}$ the unique color in $\Delta(\alpha)$,
\item[] then $c(D_{2\alpha},\sigma)=\langle\alpha^\vee,\sigma\rangle/2$, for all $\sigma\in\Sigma_X$;
\item[-] if $\alpha\in S^b_X$ and if we denote by $D_\alpha$ the unique color in $\Delta(\alpha)$,
\item[] then $c(D_\alpha,\sigma)=\langle\alpha^\vee,\sigma\rangle$, for all $\sigma\in\Sigma_X$.
\end{itemf}
Moreover, if $\alpha, \beta\in S^b_X$, then $D_\alpha=D_\beta$ if and only if $\alpha\perp\beta$ and $\alpha+\beta\in\Sigma_X$.
\subsection{Localization (at a parabolic subgroup $Q$)}\label{localization}
Let $Q$ be a parabolic subgroup of $G$ containing $B$. We will denote by ${}^-\! Q$ the opposite parabolic with respect to $T$, and by $M=Q\cap {}^-\! Q$ the common Levi subgroup of $Q$ and ${}^-\! Q$.
Let $X$ be a wonderful $G$-variety. Let us denote by $Z$ the set of points of $X$ fixed by ${}^-\! Q^r$ (the radical of ${}^-\! Q$). This variety $Z$ is known to be a wonderful ${}^-\! Q/ {}^-\! Q^r$-variety called the wonderful variety obtained by \textit{localization} of $X$ at $Q$.
\subsection{Parabolic induction}\label{ssparabolic}
Conversely, from a wonderful ${}^-\! Q/ {}^-\! Q^r$-variety $Z$, one gets a
wonderful $G$-variety by $X=G\ast_{{}^-\! Q}Z$, the quotient of $G\times
Z$ under the action of $ {}^-\! Q$: $q.(g,y)=(gq^{-1},q.y)$, $q\in {}^-\! Q$,
$g\in G$, $y\in Z$. The localization of $G\ast_{{}^-\! Q}Z$ at $Q$ is clearly isomorphic to $Z$. This construction is called \textit{parabolic induction}. A wonderful variety which cannot be obtained by proper parabolic induction is sometimes called \textit{cuspidal}.
\subsection{Wonderful varieties of rank 1}\label{ssswonderfulvarietiesofrank1}
All wonderful varieties of rank 1 are well known (\cite{Ah83}). They are obtained by parabolic induction from a list of cuspidal wonderful varieties of rank 1. In particular, for every $G$, the set $\Sigma(G)$ (the set of spherical roots of $G$) is known.
A spherical root $\sigma$ has one of the following shapes. If its support is connected (except one case, it is always connected) then we write $\sigma=n_1\alpha_1+\ldots+n_r\alpha_r$, labeling the simple roots $\alpha_1,\ldots,\alpha_r$ in $\mathrm{supp}(\sigma)$ as in Bourbaki. We group them according to their type of support.
\bigskip
\begin{center}
\begin{tabular}{ll}
type of $\mathrm{supp}(\sigma)$&shape of $\sigma$\\
\hline
$\mathsf A_1$&$\alpha_1$\\
&$2\alpha_1$\vspace{0.1cm}\\
$\mathsf A_1\times\mathsf A_1$&$\alpha_1+\alpha_1'$\vspace{0.1cm}\\
$\mathsf A_r$, $r\geq2$&$\alpha_1+\ldots+\alpha_r$\vspace{0.1cm}\\
$\mathsf A_3$&$\alpha_1+2\alpha_2+\alpha_3$\vspace{0.1cm}\\
$\mathsf B_r$, $r\geq2$&$\alpha_1+\ldots+\alpha_r$\\
&$2\alpha_1+\ldots+2\alpha_r$\vspace{0.1cm}\\
$\mathsf B_3$&$\alpha_1+2\alpha_2+3\alpha_3$\vspace{0.1cm}\\
$\mathsf C_r$, $r\geq3$&$\alpha_1+2\alpha_2+\ldots+2\alpha_{r-1}+\alpha_r$\vspace{0.1cm}\\
$\mathsf D_r$, $r\geq4$&$2\alpha_1+\ldots+2\alpha_{r-2}+\alpha_{r-1}+\alpha_r$\vspace{0.1cm}\\
$\mathsf F_4$&$\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$\vspace{0.1cm}\\
$\mathsf G_2$&$\alpha_1+\alpha_2$\\
&$2\alpha_1+\alpha_2$\\
&$4\alpha_1+2\alpha_2$
\end{tabular}
\end{center}
\bigskip
For every wonderful $G$-variety $X$ of rank 1, the couple ($\sigma_X$, $S^p_X$) (where $\sigma_X$ is the spherical root of $X$ and $S^p_X$ is as defined in \ref{ssscolors}) determines $X$ up to $G$-isomorphism.
We call a couple ($\sigma$, $S^p$) (where $\sigma\in\Sigma(G)$ and $S^p\subset S$) \textit{compatible}, if it is obtained as above from a $G$-wonderful variety of rank 1.
A couple ($\sigma$, $S^p$) is compatible, if and only if $S^{pp}(\sigma)\subset S^p\subset S^p(\sigma)$, where $S^p(\sigma)$ denotes the set of simple roots orthogonal to $\sigma$, and where $S^{pp}(\sigma)$ is equal to
\begin{itemf}
\item[-] $S^p(\sigma)\cap\mathrm{supp}(\sigma)\setminus\{\alpha_r\}$ if $\sigma=\alpha_1+\ldots+\alpha_r$ with support of type $\mathsf B_r$, or
\item[-] $S^p(\sigma)\cap\mathrm{supp}(\sigma)\setminus\{\alpha_1\}$ if $\sigma$ has support of type $\mathsf C_r$, or
\item[-] $S^p(\sigma)\cap\mathrm{supp}(\sigma)$ otherwise.
\end{itemf}
\vspace{6ex}\section{Spherical systems}
In this section we introduce the notion of \textit{spherical system}, as a purely abstract (axiomatic) version of the invariants introduced in Section~\ref{ssbasic}, and state then a conjecture on the combinatorial classification of wonderful varieties.
Spherical roots are already, by definition, combinatorial invariants (related to the root system of $G$), but colors a priori are not. In order to give a combinatorial meaning to colors, we retain only their relations with spherical roots via the Cartan pairing. Moreover, we have seen in Section~\ref{ssbasic} that, if one knows the set of spherical roots and the isomorphism class of the closed orbit (i.e.\ the set $S^p$), then one knows already automatically the \textit{abstract} colors of types $2a$ and $b$. These remarks lead naturally to the following definition.
\subsection{The definition of spherical systems}
We call \textit{spherical system} of $G$ any triplet $\mathcal S=(\Sigma, S^p, \mathbf A)$, where
\begin{itemf}
\item[-] $\Sigma$ is a subset (without proportional elements) of $\Sigma(G)$,
\item[-] $S^p$ is a subset of $S$,
\item[-] $\mathbf A$ is a finite multi-subset of $(\mathbb Z\Sigma)^\ast=\mathrm{Hom}_\mathbb Z(\mathbb Z\Sigma,\mathbb Z)$ (i.e.\ $\mathbf A$ is a finite abstract set, together with a map $c\colon \mathbf A\to(\mathbb Z\Sigma)^\ast$, which we will consider also as a pairing $c\colon \mathbf A\times\Sigma\to\mathbb Z$)
\end{itemf}
satisfying the following properties (axioms):
\begin{itemf}
\item[(S)] $S^p$ is compatible with all $\sigma\in\Sigma$;
\item[(A1)] for all $\delta\in\mathbf A$ and $\sigma\in\Sigma$, one has $c(\delta,\sigma)\leq1$, and $c(\delta,\sigma)=1$ implies $\sigma\in S\cap\Sigma$;
\item[(A2)] for all $\alpha\in S\cap\Sigma$, the set $\mathbf A(\alpha)=\{\delta\in\mathbf A : c(\delta,\sigma)=1\}$ contains exactly two elements, and if $\mathbf A(\alpha)=\{\delta_\alpha^+,\delta_\alpha^-\}$ then $c(\delta_\alpha^+,\sigma)+c(\delta_\alpha^-,\sigma)=\langle\alpha^\vee,\sigma\rangle$, for all $\sigma\in\Sigma$;
\item[(A3)] $\mathbf A$ is the union of the $\mathbf A(\alpha)$'s, for $\alpha\in S\cap\Sigma$;
\item[($\Sigma$1)] if $2\alpha\in\Sigma\cap2S$, then $\langle\alpha^\vee,\sigma\rangle/2$ is a non-positive integer, for all $\sigma\in\Sigma\setminus\{2\alpha\}$;
\item[($\Sigma$2)] if $\alpha+\beta\in\Sigma$, with $\alpha,\beta\in S$ and $\alpha\perp\beta$, then $\langle\alpha^\vee,\sigma\rangle=\langle\beta^\vee,\sigma\rangle$ for all $\sigma\in\Sigma$.
\end{itemf}
If $X$ is any wonderful $G$-variety, then $\mathcal S_X=(\Sigma_X, S^p_X, \Delta^a_X)$ is a spherical system.
Let us explain here how all the colors and the full Cartan pairing can be recovered from the spherical system, since this will play a role in what follows. Let $\mathcal S=(\Sigma,S^p,\mathbf A)$ be a spherical system. Define $S^{2a}=\{\alpha\in S: 2\alpha\in\Sigma\cap2S\}$, $S^b=S\setminus(S^p\cup(S\cap\Sigma)\cup S^{2a})$ and $\Delta$ as the disjoint union $\mathbf A\cup S^{2a}\cup(S^b/\sim)$, where $\alpha,\beta$ are identified in $S^b$ if they are orthogonal and $\alpha+\beta\in\Sigma$. It is then clear how to define $c\colon\Delta\to(\mathbb Z\Sigma)^\ast$, that is, by the map $c$ already given on $\mathbf A$ and by the corresponding half-co-roots and co-roots on $S^{2a}$ and $S^b/\sim$. In this way one clearly recovers also the decomposition $\Delta=\cup_{\alpha\in S}\Delta(\alpha)$.
\subsection{The main question}\label{question}
Some years ago, the following question has been formulated:
\begin{question}[\cite{Lu01}]
Are wonderful varieties classified by their spherical systems?
\end{question}
This question has been answered positively in many cases
(\cite{W96,Lu01,BP05,Bra07,BC08,Cu08}). The ``uniqueness part'' (i.e.\
that two wonderful varieties having same spherical system are
$G$-isomorphic) has been proved last year in general (this follows
from results of I.V.~Losev in \cite{Lo07}). Although many spherical
systems have been geometrically realized (i.e.\ as spherical systems
of wonderful varieties), at present no general proof for this
``existence part'' exists in the literature (even for the case of type
$\mathsf F_4$).
\bigskip
\subsection{Spherical systems and localization}
Let $\mathcal S=(\Sigma, S^p, \mathbf A)$ be a spherical system.
For every $\Sigma'\subset\Sigma$, we define another spherical system by $\mathcal S_{\Sigma'}=(\Sigma',S^p,\mathbf A')$, where $\mathbf A'$ is the union of the $\mathbf A(\alpha)$'s, $\alpha\in S\cap\Sigma'$, and where the Cartan pairing $c\colon\mathbf A'\times\Sigma'\to\mathbb Z$ is obtained by restriction from $c\colon\mathbf A\times\Sigma\to\mathbb Z$. We will say that $\mathcal S_{\Sigma'}$ is obtained by \textit{localization} of $\mathcal S$ with respect to $\Sigma'$.
For every $S'\subset S$, we define still another spherical system $\mathcal S_{S'}=(\Sigma',(S^p)',\mathbf A')$, where
\begin{itemf}
\item[-] $\Sigma'=\Sigma\cap\mathbb NS'$;
\item[-] $(S^p)'=S^p\cap S'$;
\item[-] $\mathbf A'$ is the union of the $\mathbf A(\alpha)$'s, $\alpha\in S'\cap\Sigma$ and the Cartan pairing\\ $c\colon\mathbf A'\times\Sigma'\to\mathbb Z$ is obtained by restriction from $c\colon\mathbf A\times\Sigma\to\mathbb Z$.
\end{itemf}
We will say that $\mathcal S_{S'}$ is obtained by \textit{localization} of $\mathcal S$ with respect to $S'$. Notice that $\mathcal S_{S'}$ is a spherical system of the root system $R\cap\mathbb ZS'$.
Let $X$ be a wonderful $G$-variety having spherical system $\mathcal S=(\Sigma,S^p,\mathbf A)$. Let $Q$ be a parabolic subgroup of $G$ containing $B$, and let $S'$ be the set of $\alpha\in S$ such that $P_\alpha\subset Q$. Let us denote by $X_{S'}=Z$, the localization of $X$ at $Q$ introduced in \ref{localization}.
\begin{proposition}
\begin{itemf}
\item[]
\item[1)] The spherical system of $X_{\Sigma'}$ is $\mathcal S_{\Sigma'}$.
\item[2)] The spherical system of $X_{S'}$ is $\mathcal S_{S'}$.
\end{itemf}
\end{proposition}
\begin{sketche}
One uses the following interpretation of the Cartan pairing.
(*) Let $\alpha\in S\cap\Sigma$. Then $X_{\{\alpha\}}$, the localization of $X$ with respect to $P_\alpha$,
is isomorphic to $\mathbb P^1 \times \mathbb P^1$, where ${}^-\! P_\alpha$ acts via the natural morphism
${}^-\! P_\alpha\to \mathrm{PGL}(2)$. From this follows that $T$ has four fixed points in $X_{\{\alpha\}}$:
$z$, $s_\alpha.z$, and two other points $z_\alpha^+$, $z_\alpha^-$ exchanged by $s_\alpha$ (the involution
of $\mathrm{N}_G(T)/T$ associated to $\alpha$). For any $\sigma\in\Sigma\setminus\{\alpha\}$, the two points $z_\alpha^\pm$
are contained in $D^\sigma$, and one can show that the weight of $T$ in the
normal bundles of $D^\sigma$ in $X$ at $z_\alpha^\pm$ is given by $\sigma - c(D_\alpha^\pm , \sigma)\alpha$.
1) All assertions on the spherical system of $X_{\Sigma'}$ are easy, except those
concerning the Cartan pairing, which follow from (*).
2) One can identify the set of roots of ${}^-\! Q/{}^-\! Q^r$ to $R\cap\mathbb ZS'$, which is also the
set of $\beta\in R$ trivial on $M^r$. The variety $Z$ can also be characterized as the
connected component of the set of points of $X$ fixed by $M^r$, containing $z$
(the unique point of $X$ fixed by ${}^-\! B$). From this follows easily that the
spherical roots of the wonderful ${}^-\! Q/{}^-\! Q^r$-variety $Z$ are those of $\Sigma\cap \mathbb NS'$.
The other assertions are either easy, or follow from (*).
\end{sketche}
\bigskip
\textit{Remarks}
1) From the proposition follows: if a spherical system is geometrically realizable, then so are all its localizations.
2) The conjecture in \ref{question} can be reformulated in more geometrical terms.
Let us define the \textit{essential skeleton} of a wonderful $G$-variety,
as the union
of all wonderful $G$-subvarieties which either have rank 1, or have rank 2
and contain at least one simple root as spherical root. Part 1) of the proposition implies that the
essential skeleton determines the spherical system; conversely, since
wonderful varieties of rank 1 and 2 are known (\cite{W96}), by gluing some of
these together, one can associate to each spherical system its essential
skeleton. Then one can ask: does every essential skeleton come from a (unique) wonderful
variety?
\bigskip
\subsection{Diagrams}\label{sssdiagrams}
We will now introduce (spherical) \textit{diagrams}, which are a way to visualize
spherical systems (exactly as Dynkin diagrams allow to visualize root
systems). They are obtained by adding information to the Dynkin diagram
of the root system $R$.
Here is our way to represent spherical roots on the Dynkin diagram (see also \ref{ssswonderfulvarietiesofrank1}):
\begin{center}
\begin{tabular}{cl}
diagram&spherical root\\
\hline
\begin{picture}(900,2100)\put(0,0){\usebox{\aone}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1$}\end{picture}\\
\begin{picture}(900,1800)\put(0,0){\usebox{\aprime}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$2\alpha_1$}\end{picture}\\
\begin{picture}(3000,1800)\put(300,0){\line(1,0){2400}}\multiput(0,0)(2400,0){2}{\put(300,0){\line(0,1){600}}\put(300,900){\circle{600}}\put(300,900){\circle*{150}}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+\alpha_1'$}\end{picture}\\
\begin{picture}(6000,1800)\put(0,600){\usebox{\mediumam}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+\ldots+\alpha_r$}\end{picture}\\
\begin{picture}(3600,1800)\put(0,600){\usebox{\dthree}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+2\alpha_2+\alpha_3$}\end{picture}\\
\begin{picture}(7500,1800)\put(0,600){\usebox{\shortbm}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+\ldots+\alpha_r$}\end{picture}\\
\begin{picture}(7500,1800)\put(0,600){\usebox{\shortbprimem}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$2\alpha_1+\ldots+2\alpha_r$}\end{picture}\\
\begin{picture}(3900,1800)\put(0,600){\usebox{\bthirdthree}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+2\alpha_2+3\alpha_3$}\end{picture}\\
\begin{picture}(9000,1800)\put(0,600){\usebox{\shortcm}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+2\alpha_2+\ldots+2\alpha_{r-1}+\alpha_r$}\end{picture}\\
\begin{picture}(6900,2400)\put(0,0){\usebox{\shortdm}}\end{picture}&\begin{picture}(14400,2400)\put(0,900){$2\alpha_1+\ldots+2\alpha_{r-2}+\alpha_{r-1}+\alpha_r$}\end{picture}\\
\begin{picture}(5700,1800)\put(0,600){\usebox{\ffour}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$}\end{picture}\\
\begin{picture}(2400,1800)\put(0,600){\usebox{\gsecondtwo}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$\alpha_1+\alpha_2$}\end{picture}\\
\begin{picture}(2400,1800)\put(0,600){\usebox{\gtwo}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$2\alpha_1+\alpha_2$}\end{picture}\\
\begin{picture}(2400,1800)\put(0,600){\usebox{\gprimetwo}}\end{picture}&\begin{picture}(6000,1800)\put(0,600){$4\alpha_1+2\alpha_2$}\end{picture}\\
\end{tabular}
\end{center}
\bigskip
Let us now explain how to represent a spherical system $\mathcal S = (\Sigma , S^p , \mathbf A)$. One begins by representing all its spherical roots. Then one represents $S^p$ by adding some (not shadowed) circles around vertices in such a way that $S^p$ becomes exactly the set of vertices having no circles around, below or above. If $S\cap\Sigma = \emptyset$, we are done: the result is what we call a diagram, which allows one to visualize spherical systems of the form $\mathcal S = (\Sigma , S^p , \emptyset)$.
\bigskip
\begin{examplenumber}
\[\begin{picture}(5700,1200)\multiput(300,900)(1800,0){2}{\usebox{\segm}}\put(3900,900){\usebox{\rightbisegm}}\multiput(0,0)(1800,0){2}{\usebox{\aprime}}\put(3600,600){\usebox{\GreyCircleTwo}}\end{picture}\]
Here $G$ is of type $\mathsf B_4$. There are 3 spherical roots, $\sigma_1=2\alpha_1$, $\sigma_2=2\alpha_2$ and $\sigma_3=2\alpha_3+2\alpha_4$. Notice that, for $\alpha_1$ and $\alpha_2$, Axiom~$\Sigma$1 is satisfied. Moreover, $S^p=\{\alpha_4\}$.
\end{examplenumber}
\bigskip
\begin{examplenumber}
\[\begin{picture}(6000,1200)\multiput(300,900)(1800,0){2}{\usebox{\segm}}\put(3900,900){\usebox{\leftbisegm}}\multiput(300,900)(5400,0){2}{\circle{600}}\multiput(300,0)(5400,0){2}{\line(0,1){600}}\put(300,0){\line(1,0){5400}}\put(1800,600){\usebox{\atwo}}\end{picture}\]
Here $G$ is of type $\mathsf C_4$. There are 2 spherical roots, $\sigma_1=\alpha_1+\alpha_4$ and $\sigma_2=\alpha_2+\alpha_3$. Notice that, for $\alpha_1$ and $\alpha_4$, Axiom~$\Sigma$2 is satisfied. Here $S^p=\emptyset$.
\end{examplenumber}
If $S\cap\Sigma\neq\emptyset$, more information is needed. The set $S\cap\Sigma$ corresponds to the set of vertices which have circles above and below. For each $\alpha\in S\cap\Sigma$, we identify these two circles with the elements of $\mathbf A(\alpha)$, the circle above to $\delta_\alpha^+$, where $\delta_\alpha^+$ is chosen such that $c(\delta_\alpha^+,\sigma)\in\{1,0-1\}$, for every spherical root $\sigma$. Then we join circles in different $\mathbf A(\alpha)$'s by lines, if they correspond to the same element in $\mathbf A$. Finally, for every spherical root $\sigma$ not orthogonal to $\alpha$ such that $c(\delta_\alpha^+, \sigma) = -1$, we add an arrow of the form $<$ or $>$, starting from the circle corresponding to $\delta_\alpha^+$, and pointing toward $\sigma$. This can always be done, and the set $\mathbf A$ and the Cartan pairing $c\colon \mathbf A \times\Sigma\to \mathbb Z$ is then determined by Axiom~A2.
\bigskip
\begin{examplenumber}
\[\begin{picture}(6000,2700)\multiput(300,1350)(1800,0){3}{\usebox{\segm}}\multiput(0,450)(1800,0){2}{\usebox{\aone}}\put(5400,450){\usebox{\aone}}\put(3900,1350){\circle{600}}\multiput(300,2700)(5400,0){2}{\line(0,-1){450}}\put(300,2700){\line(1,0){5400}}\multiput(2100,0)(3600,0){2}{\line(0,1){450}}\put(2100,0){\line(1,0){3600}}\put(700,1750){\usebox{\toe}}\end{picture}\]
Here $G$ is of type $\mathsf A_4$. There are 3 spherical roots, all of them are simple roots: $\Sigma=\{\alpha_1,\alpha_2,\alpha_4\}$; $S^p=\emptyset$.
The set $\mathbf A$ has 4 elements: $\delta_{\alpha_1}^+=\delta_{\alpha_4}^+\in\mathbf A(\alpha_1)\cap\mathbf A(\alpha_4)$, $\delta_{\alpha_1}^-\in\mathbf A(\alpha_1)$, $\delta_{\alpha_2}^+\in\mathbf A(\alpha_2)$, $\delta_{\alpha_2}^-=\delta_{\alpha_4}^-\in\mathbf A(\alpha_2)\cap\mathbf A(\alpha_4)$.
Since $\delta_{\alpha_1}^+$ belongs to $\mathbf A(\alpha_1)\cap\mathbf A(\alpha_4)$, $c(\delta_{\alpha_1}^+,\alpha_1)=c(\delta_{\alpha_1}^+,\alpha_4)=1$; since there is an arrow from $\delta_{\alpha_1}^+$ to $\alpha_2$, $c(\delta_{\alpha_1}^+,\alpha_2)=-1$. Then $c(\delta_{\alpha_1}^-)$ is determined by Axiom~A2, since $\{\delta_{\alpha_1}^+,\delta_{\alpha_1}^-\}=\mathbf A(\alpha_1)$ and $c(\delta_{\alpha_1}^+,-)+c(\delta_{\alpha_1}^-,-)=\langle\alpha^\vee,-\rangle$. Analogously, $c(\delta_{\alpha_4}^-)$ is determined since $\delta_{\alpha_1}^+=\delta_{\alpha_4}^+$. Finally, once $c(\delta_{\alpha_4}^-)$ is determined, so is $c(\delta_{\alpha_2}^+)$. The (restricted) Cartan pairing is then as follows:
\[\begin{array}{r|rrr}c(-,-)&\alpha_1&\alpha_2&\alpha_4\\\hline \delta_{\alpha_1}^+&1&-1&1\\\delta_{\alpha_1}^-&1&0&-1\\\delta_{\alpha_2}^+&0&1&-1\\\delta_{\alpha_2}^-&-1&1&1\\
\end{array}\]
\end{examplenumber}
\bigskip
\begin{examplenumber}
\[\begin{picture}(6000,1800)\put(300,900){\usebox{\dynkinf}}\put(3600,600){\usebox{\atwo}}\put(0,0){\usebox{\aone}}\put(1800,600){\usebox{\GreyCircle}}\end{picture}\]
Here $G$ is of type $\mathsf F_4$. There are 3 spherical roots, $\sigma_1=\alpha_1$, $\sigma_2=\alpha_2+\alpha_3$ and $\sigma_3=\alpha_3+\alpha_4$, $S^p=\emptyset$, $\mathbf A=\{\delta_{\alpha_1}^+,\delta_{\alpha_1}^-\}$. Since there is no arrow in the diagram, $c(\delta_{\alpha_1}^+,\sigma_i)=0$, for $i=2,3$.
\end{examplenumber}
At this stage, we should remark that diagrams not only allow to visualize spherical systems, but directly the set of all colors $\Delta$ and the (full) Cartan
pairing \mbox{$c\colon \Delta\times\Sigma\to\mathbb Z$}. Indeed, the way we have defined them, there is a natural bijection between the set $\Delta$ and the set of equivalence classes of circles of a diagram (two circles are equivalent if they are joined by a line); moreover, the Cartan pairing can be read off easily from the diagram.
Let us look again at the examples above.
In Example 1, there are 3 colors, 2 of type $2a$, $\delta_{2\alpha_1}\in\Delta(\alpha_1)$, $\delta_{2\alpha_2}\in\Delta(\alpha_2)$, and 1 of type $b$, $\delta_{\alpha_3}\in\Delta(\alpha_3)$. By \ref{ssscartan}, one has $c(\delta_{2\alpha_i},-)={1\over 2}\langle\alpha_i^\vee,-\rangle$, $i=1,2$, and $c(\delta_{\alpha_3},-)=\langle\alpha_3^\vee,-\rangle$. So the Cartan pairing is as follows:
\[\begin{array}{r|rrr}c(-,-)&\sigma_1&\sigma_2&\sigma_3\\\hline \delta_{2\alpha_1}&2&-1&0\\\delta_{2\alpha_2}&-1&2&-1\\\delta_{\alpha_3}&0&-2&2\end{array}\]
In Example 4, there are 5 colors, 2 of type $a$, $\delta_{\alpha_1}^+,\delta_{\alpha_1}^-\in\Delta(\alpha_1)$, and 3 of type $b$, $\delta_{\alpha_i}\in\Delta(\alpha_i)$, $i=2,3,4$. For $\delta_{\alpha_i}$ of type $b$, $c(\delta_{\alpha_i},-)=\langle\alpha^\vee,-\rangle$. The full Cartan pairing is then as follows:
\[\begin{array}{r|rrr}c(-,-)&\sigma_1&\sigma_2&\sigma_3\\\hline \delta_{\alpha_1}^+&1&0&0\\\delta_{\alpha_1}^-&1&-1&0\\\delta_{\alpha_2}&-1&1&-1\\\delta_{\alpha_3}&0&0&1\\\delta_{\alpha_4}&0&-1&1\end{array}\]
The reader is invited to determine colors and Cartan pairings for the two other diagrams above. In Chapter~3, we will often leave this to the reader.
As another exercise, the reader should also determine, for every spherical systems $\mathcal S$ given by one of the diagrams above, all the diagrams corresponding to the different localizations of $\mathcal S$.
\vspace{6ex}\section{Type $\mathsf F_4$}\label{sstype}
In this section, unless otherwise stated, $G$ will denote a simple group of
type $\mathsf F_4$. This group is unique (up to isomorphism), since adjoint and
simply connected groups of type $\mathsf F_4$ coincide, and has rank 4 and
dimension 52.
The Dynkin diagram of $G$ is
\[\begin{picture}(5400,600)\multiput(0,300)(3600,0){2}{\usebox{\segm}}\put(1800,300){\usebox{\rightbisegm}}\end{picture}\]
and the 24 positive roots of G are
\begin{itemize}
\item[] \mbox{$\alpha_1$},\quad \mbox{$\alpha_2$},\quad \mbox{$\alpha_3$},\quad \mbox{$\alpha_4$},\quad \mbox{$\alpha_1+\alpha_2$},\quad \mbox{$\alpha_2+\alpha_3$},\quad \mbox{$\alpha_3+\alpha_4$},\quad \mbox{$\alpha_1+\alpha_2+\alpha_3$},\quad\\ \mbox{$\alpha_2+\alpha_3+\alpha_4$},\quad \mbox{$\alpha_1+\alpha_2+\alpha_3+\alpha_4$},
\item[] \mbox{$\alpha_2+2\alpha_3$},\quad \mbox{$\alpha_1+\alpha_2+2\alpha_3$},\quad \mbox{$\alpha_1+2\alpha_2+2\alpha_3$},
\item[] \mbox{$\alpha_2+2\alpha_3+\alpha_4$},\quad \mbox{$\alpha_2+2\alpha_3+2\alpha_4$},
\item[] \mbox{$\alpha_1+\alpha_2+2\alpha_3+\alpha_4$},\quad \mbox{$\alpha_1+2\alpha_2+2\alpha_3+\alpha_4$},\quad \mbox{$\alpha_1+2\alpha_2+3\alpha_3+\alpha_4$},\quad\\ \mbox{$\alpha_1+\alpha_2+2\alpha_3+2\alpha_4$},\quad \mbox{$\alpha_1+2\alpha_2+2\alpha_3+2\alpha_4$},\quad \mbox{$\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$},\quad\\ \mbox{$\alpha_1+2\alpha_2+4\alpha_3+2\alpha_4$},\quad \mbox{$\alpha_1+3\alpha_2+4\alpha_3+2\alpha_4$},\quad \mbox{$2\alpha_1+3\alpha_2+4\alpha_3+2\alpha_4$}.
\end{itemize}
Since $G$ is adjoint and simply connected, the fundamental weights belong
to the root lattice. They are
\begin{itemf}
\item[] $\omega_1=2\alpha_1+3\alpha_2+4\alpha_3+2\alpha_4$,
\item[] $\omega_2=3\alpha_1+6\alpha_2+8\alpha_3+4\alpha_4$,
\item[] $\omega_3=2\alpha_1+4\alpha_2+6\alpha_3+3\alpha_4$,
\item[] $\omega_4=\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$.
\end{itemf}
The corresponding fundamental representations have dimensions
respectively 52, 1274, 273 and 26.
\subsection{Structure of the maximal parabolic subgroups}\label{ssstructure}
Let for a moment $G$ be again an arbitrary semisimple group.
Let $Q$ be a maximal parabolic subgroup of $G$ containing $B$. Let us write (as before) ${}^-\! Q$ for the opposite parabolic subgroup with respect to $T$, and $M=Q\cap{}^-\! Q$ for the common Levi subgroup of $Q$ and ${}^-\! Q$.
Then $\mathrm{Lie}(Q^u)$ is graded
\[\mathrm{Lie}(Q^u)=\bigoplus_{i=1}^s\mathfrak n_i\]
by the action of $M^r$, which is $\cong\mathbb C^\times$. Moreover, the $\mathfrak n_i$'s are simple $M$-modules and one has $[\mathfrak n_i,\mathfrak n_j]\subset\mathfrak n_{i+j}$, so in particular
\[\mathrm{Lie}((Q^u,Q^u))=\bigoplus_{i=2}^s\mathfrak n_i.\]
Furthermore,
\[\mathrm{Lie}({}^-\! Q^u)=\bigoplus_{i=1}^s\mathfrak n_{-i},\]
where the $\mathfrak n_{-i}$ are isomorphic to the duals $\mathfrak n_i^\ast$ as $M$-modules.
The maximal parabolic subgroups of $G$ can be indexed by simple roots in the following way. To every $\alpha\in S$, one associates $Q_\alpha$, the unique maximal parabolic subgroup of $G$ containing $B$ such that $P_\alpha\not\subset Q_\alpha$. Then the unique color of $G/{}^-\! Q_\alpha$ is $D_\alpha$ (a color of type $b$).
Let us return now to the case when $G$ is of type $\mathsf F_4$. The general picture above particularizes then to the data given in the following table.
\begin{center}
\begin{tabular}{l|cccccc}
&semisimple type of $M$&$s$&$\dim \mathfrak n_1$&$\dim \mathfrak n_2$&$\dim \mathfrak n_3$&$\dim \mathfrak n_4$\\
\hline
$Q_{\alpha_1}$&$\mathsf C_3$&2&14&1\\
$Q_{\alpha_2}$&$\mathsf A_1\times\mathsf A_2$&3&12&6&2\\
$Q_{\alpha_3}$&$\mathsf A_2\times\mathsf A_1$&4&6&9&2&3\\
$Q_{\alpha_4}$&$\mathsf B_3$&2&8&7
\end{tabular}
\end{center}
\begin{remark}
When $G$ is of type $\mathsf F_4$, every parabolic subgroup $Q$ is conjugated to ${}^-\! Q$. But since we want to present the case of type $\mathsf F_4$ as example of the general case, we will not use this to simplify our notations.
\end{remark}
\subsection{Spherical roots of type $\mathsf F_4$}\label{ssssphericalroots}
Using the table in \ref{ssswonderfulvarietiesofrank1}, one obtains the list of all spherical roots of type $\mathsf F_4$:
\begin{itemize}
\item[] $\alpha_1$,\quad $\alpha_2$,\quad $\alpha_3$,\quad $\alpha_4$,\quad $2\alpha_1$,\quad $2\alpha_2$,\quad $2\alpha_3$,\quad $2\alpha_4$,
\item[] $\alpha_1+\alpha_3$,\quad $\alpha_1+\alpha_4$,\quad $\alpha_2+\alpha_4$,
\item[] $\alpha_1+\alpha_2$,\quad $\alpha_3+\alpha_4$,
\item[] $\alpha_2+\alpha_3$,\quad $2\alpha_2+2\alpha_3$,
\item[] $\alpha_1+\alpha_2+\alpha_3$,\quad $2\alpha_1+2\alpha_2+2\alpha_3$,\quad $\alpha_1+2\alpha_2+3\alpha_3$,
\item[] $\alpha_2+2\alpha_3+\alpha_4$,
\item[] $\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$.
\end{itemize}
Since compatible couples $(\sigma,S^p)$ are in bijective correspondence with spherical systems of rank 1, we refer to Tables~\ref{tr1a1}--\ref{tr1f4} below (where we list all these systems of type $\mathsf F_4$) for an explicit description of these couples.
\subsection{Spherical systems of type $\mathsf F_4$}\label{ssssphericalsystems}
In the tables below, we will give the complete list of the diagrams of all spherical systems $(\Sigma,S^p,\mathbf A)$ of type $\mathsf F_4$, ordered by their rank and by the support of $\Sigma$. Recall that the spherical systems of rank 0 correspond to the generalized flag varieties, and those of rank 1 to compatible couples.
The spherical systems of rank 4 are subdivided into two tables: Table~\ref{tr4ss} for spherical systems admitting a morphism to the spherical system of the full flag variety ($\emptyset$, $\emptyset$, $\emptyset$), called \textit{strongly solvable} (see \ref{sssquotient2}); Table~\ref{tr4o} for the remaining cases.
\input{tables.1}
\chapter{Further notions concerning wonderful varieties}
In this chapter, we will go beyond the basic notions of Chapter~1. We will introduce the notion of wonderful subgroup, which allows a Lie theory point of view on wonderful varieties. Then, after mentioning briefly facts on equivariant automorphisms, we will introduce and study a natural notion of morphism between wonderful varieties. This will be our main tool for analyzing the examples of type $\mathsf F_4$ in Chapter~3. Finally, we will examine the relations which spherical orbits in simple projective spaces have with wonderful varieties.
In what follows, $X$ will always denote a wonderful $G$-variety, $\mathcal S=(\Sigma, S^p, \mathbf A)$ its spherical system, $\Delta$ its set of colors and $H$ will be an isotropy group of $G$ at a point in the open orbit of $X$. The Cartan pairing $c\colon \Delta\times\Sigma\to\mathbb Z$ will also be considered as a $\mathbb Z$-bilinear pairing $c\colon\mathbb Z\Delta\times\mathbb Z\Sigma\to\mathbb Z$.
We put $d(\mathcal S)=\mathrm{card}(\Delta)-\mathrm{card}(\Sigma)$, integer we will call the \textit{defect} of $\mathcal S$; this integer is also equal to the rank of $\Xi(H)$.
\vspace{6ex}\section{Wonderful subgroups}
We will call \textit{wonderful subgroups} of $G$ those subgroups which are isotropy groups at points in wonderful $G$-varieties.
If $H$ is a wonderful subgroup of $G$, a wonderful $G$-variety whose open orbit is isomorphic to $G/H$ is called a \textit{wonderful completion} of $G/H$. This wonderful completion exists (by definition) and is unique up to $G$-isomorphism.
Every wonderful subgroup $H$ of $G$ is a spherical subgroup of $G$ (i.e.\ $B$ has an open orbit in $G/H$) and $\mathrm{N}_G(H)/H$ is finite. The converse is not true in general; but every spherical subgroup equal to its normalizer is wonderful (this result is due to F.~Knop \cite{Kn96}).
\smallskip
Let us place here some general considerations. We have now three (equivalent) levels in our subject of study:
\begin{itemf}
\item[-] wonderful varieties, which is the level of highest geometric content; but these varieties, with the exception of some of low rank, can rarely be described and studied explicitly, since they appear in a natural way only as subvarieties of very high dimensional projective spaces;
\item[-] wonderful subgroups, which is the level of Lie theory; these subgroups, more precisely their conjugacy classes, are more accessible as we will see in the examples of Chapter~3;
\item[-] spherical systems, which is the combinatorial level; this is the most accessible level, since these invariants can be described most easily, as we have seen already in Chapter~1.
\end{itemf}
The difficulty is to go from one level to another. For instance, from a point $x$ on the open orbit of $G$ in a wonderful variety $X$ (where one is near $H=G_x$, a wonderful subgroup), the closed orbit of $G$ in $X$ looks to be very far, ``at infinity''. But the spherical system of $X$ can be read off most easily at the points $z$ and $z_\alpha^\pm$ ($\alpha\in\Sigma\cap S$), which are on or near the closed orbit (see Chapter~1). How to go (in general) from the spherical system to the wonderful subgroup (and back), is for the moment not completely understood.
When $H$ is a wonderful subgroup, by \textit{its} spherical system we will of course mean the spherical system of the wonderful completion of $G/H$. Conversely, when we talk about the wonderful subgroup of (or associated to) a spherical system $\mathcal S$, we will mean of course the generic isotropy group of the wonderful variety having $\mathcal S$ as spherical system. Similar remarks apply to wonderful subgroups and diagrams.
Here are some (first) relations between properties of $H$ and properties of $\mathcal S$:
\begin{itemf}
\item[-] $H$ is reductive if and only if there exists $\sigma\in\mathbb N\Sigma$ such that $c(\delta,\sigma)>0$ for all $\delta\in\Delta$.
\item[-] $H$ is very reductive in $G$ if and only if $d(\mathcal S)=0$
\end{itemf}
($H$ \textit{very reductive} in $G$ means that $H$ is not contained in any proper parabolic subgroup of $G$; very reductive implies reductive, and even semisimple if $H$ is connected).
\bigskip
\begin{example}\label{exa2}
Consider the following diagrams:
\[\begin{picture}(2400,1800)\put(300,900){\usebox{\segm}}\multiput(0,0)(1800,0){2}{\usebox{\aprime}}\end{picture}\quad\quad
\begin{picture}(2400,1800)\put(0,600){\usebox{\atwo}}\end{picture}\quad\quad
\begin{picture}(2400,1800)\put(300,900){\usebox{\segm}}\multiput(0,0)(1800,0){2}{\usebox{\aone}}\end{picture}\quad\quad
\begin{picture}(2400,2250)\put(300,900){\usebox{\segm}}\multiput(0,0)(1800,0){2}{\usebox{\aone}}\multiput(300,1800)(1800,0){2}{\line(0,1){450}}\put(300,2250){\line(1,0){1800}}\end{picture}\]
Here $G=\mathrm{SL}(3)$, and corresponding wonderful subgroups are (from left to right):
\begin{itemf}
\item[-] $H=\mathrm{SO}(3)\cdot C_G$ (where $C_G$ denotes the center of $G$); this $H$ is very reductive in $G$;
\item[-] $H=\mathrm{GL(2)}$, which is reductive but not very reductive in $G$;
\item[-] $H=T\,U_{\alpha}$, where $\alpha$ is any root and $U_{\alpha}$ denotes the corresponding unipotent subgroup of dimension 1; these $H's$ are conjugated in $G$;
\item[-] $H=L\,H^u$, where $L$ is a subtorus of dimension 1 of $T$ and $H^u$ is a unipotent subgroup of dimension 2 of $G$ such that $L=\mathrm N_T(H^u)$; these $H$'s are conjugated in $G$.
\end{itemf}
\end{example}
\vspace{6ex}\section{Equivariant automorphisms}\label{sssequivariantautomorphisms}
Remember that $X$ denotes a wonderful $G$-variety, $\mathcal S = (\Sigma, S^p , \mathbf A)$ its spherical system and $\Delta$ its set of colors. Let us choose a point $x$ in the open orbit of $G$ in $X$, and put $H = G_x$.
We will say that a spherical root $\sigma\in\Sigma$ is \textit{loose} in $\mathcal S$, if
\begin{itemf}
\item[-] either $\sigma=\alpha\in S\cap\Sigma$ and $c(\delta^+_\alpha,\sigma')=c(\delta^-_\alpha,\sigma')$, for all $\sigma'\in\Sigma$;
\item[-] or $\sigma\in\Sigma\setminus S$, $2\sigma\in\Sigma(G)$ and the couple $(2\sigma, S^p)$ is compatible (in the sense of spherical systems, see \ref{ssswonderfulvarietiesofrank1}).
\end{itemf}
We denote by $\Sigma_\ell(\mathcal S)$ the set of spherical roots that are loose in $\mathcal S$.
Let us denote by $\Gamma=\Gamma_X=\mathrm{Aut}_G(X)$ the group of $G$-automorphisms of $X$.
Restriction to $G/H=G.x\subset X$ induces an isomorphism between $\Gamma$ and $\mathrm{Aut}_G(G/H)=(\mathrm{N}_G(H)/H)^{\mathrm{opp}}$.
For every $\sigma\in\Sigma_\ell(\mathcal S)$, there exists a unique element $\gamma(\sigma)$ of order 2 in $\Gamma$ that fixes the points of the divisor $D^\sigma$ (this follows from the conjecture of \ref{question}; a direct proof has been given by I.~Losev in \cite{Lo07}).
Moreover, these $\gamma(\sigma)$, $\sigma\in\Sigma_\ell(\mathcal S)$, commute and generate $\Gamma$. For every $\sigma\in\Sigma_\ell(\mathcal S)$, the variety $X/\sigma$ is again wonderful, and its spherical roots are $\{2\sigma\}\cup(\Sigma\setminus\{\sigma\})$.
As we will see in the following section, the presence of nontrivial equivariant automorphisms introduces some complications. But it is clear by the analysis above, that for a majority of wonderful varieties $X$, $\Gamma_X$ is reduced to the identity (i.e.\ $\Sigma_\ell(\mathcal S)=\emptyset$).
Here are some simple examples of type $\mathsf B_3$ with $\Sigma_\ell(\mathcal S)\neq\emptyset$ (we will give $\mathcal S$ by its diagram):
\[\begin{picture}(4200,1800)\put(300,900){\usebox{\segm}}\put(2100,900){\usebox{\rightbisegm}}\put(300,900){\circle{600}}\put(1800,0){\usebox{\aone}}\put(2500,1300){\usebox{\toe}}\put(3600,0){\usebox{\aprime}}\end{picture}\qquad\begin{picture}(7200,1800)\put(0,600){$\Sigma_\ell=\{\alpha_2\}$}\end{picture}\]
\[\begin{picture}(4200,1800)\put(300,900){\usebox{\segm}}\put(2100,900){\usebox{\rightbisegm}}\put(0,0){\usebox{\aone}}\put(1800,600){\usebox{\GreyCircle}}\end{picture}\qquad\begin{picture}(7200,1800)\put(0,600){$\Sigma_\ell=\{\alpha_2+\alpha_3\}$}\end{picture}\]
\[\begin{picture}(4200,1800)\put(300,900){\usebox{\segm}}\put(2100,900){\usebox{\rightbisegm}}\put(0,600){\usebox{\atwo}}\put(1800,600){\usebox{\GreyCircle}}\put(3600,0){\usebox{\aone}}\put(3100,1300){\usebox{\tow}}\end{picture}\qquad\begin{picture}(7200,1800)\put(0,600){$\Sigma_\ell=\{\alpha_3\}$}\end{picture}\]
\vspace{6ex}\section{Wonderful morphisms}
1) A $G$-morphism $\phi\colon X\to \,\grave{} X$ will be called \textit{wonderful} if $X$ and $\,\grave{} X$ are wonderful $G$-varieties, and if $\phi$ is dominant (i.e.\ surjective) and has connected fibers.
2) An inclusion $H\subset \,\grave{} H$ of wonderful subgroups of $G$ is called \textit{co-connected} if $\,\grave{} H/H$ is connected.
3) Let $\mathcal S=(\Sigma, S^p, \mathbf A)$ be a spherical system and $\Delta$ its set of colors. A subset $\Delta^\ast$ of $\Delta$ is called \textit{distinguished} in $\Delta$, if there exists $\delta\in\mathbb N_{>0}\Delta^\ast$ such that $c(\delta,\sigma)\geq0$, for all $\sigma\in\Sigma$.
For every distinguished subset $\Delta^\ast$ of $\Delta$, the \textit{quotient system}\\ $\mathcal S/\Delta^\ast=(\Sigma/\Delta^\ast, S^p/\Delta^\ast, \mathbf A/\Delta^\ast)$ is defined as follows:
\begin{itemf}
\item[-] $\Sigma/\Delta^\ast$ is the set of minimal generators of the (free) semigroup\\ $\{\sigma\in\mathbb N\Sigma,\, c(\delta,\sigma)=0\mathrm{\ for\ all\ }\delta\in\Delta^\ast\}$;
\item[-] $S^p/\Delta^\ast=S^p\cup\{\alpha\in S,\, \Delta(\alpha)\subset\Delta^\ast\}$;
\item[-] $\mathbf A/\Delta^\ast$ is the union of the $\mathbf A(\alpha)$'s, $\alpha\in S\cap(\Sigma/\Delta^\ast)$, and the Cartan pairing $\mathbf A/\Delta^\ast\times\Sigma/\Delta^\ast\to\mathbb Z$ is obtained from $\mathbf A\times\Sigma\to\mathbb Z$ in the obvious way.
\end{itemf}
In this section we will study these three notions and their interrelations.
\subsection{Quotient systems and wonderful subgroups}\label{sssquotient1}
Let $H\subset \,\grave{} H$ be two wonderful subgroups of $G$. Denote by $\phi\colon G/H\to G/\,\grave{} H$ the natural map, and $\Delta_\phi$ the set of $D\in\Delta=\Delta_{G/H}$ such that $\phi(D)$ is dense in $G/\,\grave{} H$. Then
\begin{itemf}
\item[-] $\Delta_\phi$ is distinguished in $\Delta$;
\item[-] conversely, for every distinguished subset $\Delta^\ast$ of $\Delta$, there exists a unique wonderful subgroup $\,\grave{} H$ having the following properties: $H\subset \,\grave{} H$, $\,\grave{} H/H$ is connected, and $\Delta_\phi=\Delta^\ast$;
\item[-] moreover, if $\,\grave{} H/H$ is connected, the spherical system of $\,\grave{} H$ is given by the quotient system $\mathcal S/\Delta_\phi$ (where $\mathcal S$ is the spherical system of $H$)
\end{itemf}
(these results are close to statements of F.~Knop in \cite{Kn91}).
\subsection{Quotient systems and wonderful morphisms}\label{sssquotient2}
Remember that $X$ denotes a wonderful $G$-variety, $\mathcal S=(\Sigma,S^p,\mathbf A)$ its spherical system and $\Delta$ its set of colors. The group $\Gamma_X$ acts naturally in the set of distinguished subsets of $\Delta$. Let $\,\grave{} X$ be another wonderful variety. The group $\Gamma_X\times\Gamma_{\,\grave{} X}$ acts naturally in the set of wonderful morphisms $\phi\colon X\to \,\grave{} X$. If $\phi$ is such a morphism, denote by $\Delta_\phi$ the set of $D\in\Delta$ such that $\phi(D)=\,\grave{} X$. Then
\begin{itemf}
\item[-] $\Delta_\phi$ is distinguished in $\Delta$;
\item[-] conversely, for every orbit $\Gamma_X.\Delta^\ast$ of distinguished subsets in $\Delta$, there exist wonderful morphisms $\phi\colon X\to \,\grave{} X$, defined (and unique) up to isomorphism, such that $\Delta_\phi\in\Gamma_X.\Delta^\ast$;
\item[-] moreover, the spherical system of $\,\grave{} X$ is given by the quotient system $\mathcal S/\Delta_\phi$.
\end{itemf}
This statement follows from \ref{sssquotient1} and from the fact that if $x\in X$ and $\,\grave{} x\in \,\grave{} X$ are such that $H=G_x\subset \,\grave{} H=G_{\,\grave{} x}$, then $\phi\colon G/H=G.x\to G/\,\grave{} H=G.\,\grave{} x$ extends always to a $G$-morphism $\phi\colon X\to \,\grave{} X$ such that $\phi(x)=\,\grave{} x$.
Notice that there may exist morphisms $\psi\colon X\to \,\grave{} X$ which are not obtained by $\psi=\,\grave{} \gamma\circ\phi\circ\gamma$, $\gamma\in\Gamma_X$ and $\,\grave{} \gamma\in\Gamma_{\,\grave{} X}$ (they correspond to the existence of subgroups $K$ of $G$ containing $H$ and conjugated to $\,\grave{} H$, but such that the two inclusions $H\subset \,\grave{} H$ and $H\subset K$ cannot be conjugated simultaneously).
\begin{remark}
Most of the wonderful varieties we will see in the examples in Chapter~3 have trivial automorphism groups; in that case the statements above have simpler form.
\end{remark}
Here are some relations between properties on different levels:
\begin{itemf}
\item[-]
the variety $\,\grave{} X$ is homogeneous (in other words $\,\grave{} H$ is a parabolic subgroup of $G$), if and only if $\Sigma/\Delta^\ast=\emptyset$; $\,\grave{} H$ is then conjugated in $G$ to ${}^-\! Q$, where $Q$ is the parabolic subgroup of $G$ containing $B$ and corresponding to $S^p/\Delta^\ast$; and there exists a ${}^-\! Q$-variety $Y$ such that $X\cong G\ast_{{}^-\! Q}Y$;
\item[-]
moreover, ${}^-\! Q^r$ acts trivially on $Y$ (so that $X$ is obtained by parabolic induction from the wonderful ${}^-\! Q/{}^-\! Q^r$-variety $Y$), if and only if $\mathrm{supp}(\Sigma)\subset S^p/\Delta^\ast$.
\item[-]
in particular, $X$ cannot be obtained by nontrivial parabolic induction if and only if $\mathrm{supp}(\Sigma)\cup S^p=S$.
\item[-]
the group $H$ is \textit{strongly solvable} in $G$ (i.e.\ $H$ is contained in a Borel subgroup of $G$) if and only if there exists a distinguished subset $\Delta^\ast$ such that $\mathcal S/\Delta^\ast=(\emptyset,\emptyset,\emptyset)$ (this system being the spherical system of $G/B$).
\end{itemf}
\subsection{Quotient systems and diagrams}\label{qsdiagrams}
Let us compute explicitly some quotient systems. Consider the spherical system $\mathcal S=(\Sigma,S^p,\mathbf A)$ of $G=\mathrm{SL}(4)$ having diagram:
\[\begin{picture}(4200,2250)\multiput(300,900)(1800,0){2}{\usebox{\segm}}\multiput(0,0)(1800,0){3}{\usebox{\aone}}\multiput(300,1800)(3600,0){2}{\line(0,1){450}}\put(300,2250){\line(1,0){3600}}\put(700,1300){\usebox{\toe}}\put(3100,1300){\usebox{\tow}}\end{picture}\]
Here $\Sigma=S$, $S^p=\emptyset$, $\mathrm{card}(\mathbf A)=5$ and the Cartan pairing is:
\[\begin{array}{r|rrr}c(-,-)&\alpha_1&\alpha_2&\alpha_3\\\hline\delta_{\alpha_1}^+&1&-1&1\\\delta_{\alpha_1}^-&1&0&-1\\\delta_{\alpha_2}^+&0&1&0\\\delta_{\alpha_2}^-&-1&1&-1\\\delta_{\alpha_3}^-&-1&0&1\end{array}\]
The following subsets of colors are distinguished in $\Delta$:
\[\Delta^{1}=\{\delta_{\alpha_1}^+,\delta_{\alpha_2}^-\},\quad\Delta^{2}=\{\delta_{\alpha_1}^-,\delta_{\alpha_3}^-\},\quad\Delta^{3}=\{\delta_{\alpha_2}^+\},\]\[\Delta^{1,2}=\Delta^{1}\cup\Delta^{2},\quad\Delta^{2,3}=\Delta^{2}\cup\Delta^{3}.\]
Indeed, $c(\delta_{\alpha_1}^++\delta_{\alpha_2}^-,\sigma)=0$, for all $\sigma\in\Sigma$, so $\Delta^{1}$ is distinguished. Similarly, $c(\delta_{\alpha_1}^-+\delta_{\alpha_3}^-,\sigma)=0$, $\sigma\in\Sigma$. Since $\Delta^{1}$ and $\Delta^{2}$ are distinguished, so is their union $\Delta^{1,2}$. Again, $c(\delta_{\alpha_2}^+,\sigma)\geq0$, $\sigma\in\Sigma$, so $\Delta^{3}$ and $\Delta^{2,3}$ are distinguished.
Let us now compute the quotients. Set $\sigma=m_1\alpha_1+m_2\alpha_2+m_3\alpha_3\in\mathbb N\Sigma$. Let us start with $\Delta^{1}$: $c(\delta_{\alpha_1}^+,\sigma)=0$ is equivalent to $m_2=m_1+m_3$ and $c(\delta_{\alpha_2}^-,\sigma)=0$ gives a proportional equation, so $\sigma=m_1(\alpha_1+\alpha_2)+m_3(\alpha_2+\alpha_3)$ and $\Sigma/\Delta^{1}=\{\alpha_1+\alpha_2,\alpha_2+\alpha_3\}$. For $\Delta^{2}$ one has $m_1=m_3$, so $\Sigma/\Delta^{2}=\{\alpha_1+\alpha_3,\alpha_2\}$. For $\Delta^{3}$ one has $m_2=0$, so $\Sigma/\Delta^{3}=\{\alpha_1,\alpha_3\}$. Similarly for $\Delta^{1,2}$ and $\Delta^{2,3}$. One gets the quotient spherical systems corresponding to the following diagrams:
\[\begin{picture}(18600,14550)
\put(7200,12300){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\multiput(0,0)(1800,0){3}{\usebox{\aone}}
\multiput(300,1800)(3600,0){2}{\line(0,1){450}}
\put(300,2250){\line(1,0){3600}}
\put(700,1300){\usebox{\toe}}
\put(3100,1300){\usebox{\tow}}
}
\put(14400,5850){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\multiput(0,0)(3600,0){2}{\usebox{\aone}}
\multiput(300,1800)(3600,0){2}{\line(0,1){450}}
\put(300,2250){\line(1,0){3600}}
\put(2100,900){\circle{600}}
}
\put(7200,5850){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\multiput(300,900)(3600,0){2}{\circle{600}}
\multiput(300,-450)(3600,0){2}{\line(0,1){1050}}
\put(300,-450){\line(1,0){3600}}
\put(1800,0){\usebox{\aone}}
}
\put(0,5850){
\multiput(0,600)(1800,0){2}{\usebox{\atwo}}
}
\put(11100,0){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\multiput(300,900)(1800,0){3}{\circle{600}}
\multiput(300,0)(3600,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){3600}}
}
\put(3600,0){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\put(1800,600){\usebox{\GreyCircle}}
}
\put(11400,11700){\vector(1,-1){3000}}
\multiput(9300,11700)(0,-600){5}{\line(0,-1){300}}
\put(9300,8700){\vector(0,-1){300}}
\put(7200,11700){
\multiput(0,0)(-400,-400){7}{\multiput(0,0)(-20,-20){10}{\line(-1,0){30}}}
\put(-2800,-2800){\vector(-1,-1){200}}
}
\put(3150,4800){
\multiput(0,0)(300,-600){5}{\multiput(0,0)(15,-30){10}{\line(-1,0){30}}}
\put(1500,-3000){\vector(1,-2){150}}
}
\multiput(8250,4800)(7200,0){2}{
\multiput(0,0)(-300,-600){5}{\multiput(0,0)(-15,-30){10}{\line(-1,0){30}}}
\put(-1500,-3000){\vector(-1,-2){150}}
}
\put(10350,4800){\vector(1,-2){1500}}
\end{picture}\]
An arrow between two diagrams denotes that the target arises as a quotient of the source. For ``minimal'' quotients we sometimes use a dashed arrow to give more information about the ``type'' of the quotient, as it will be explained in \ref{minimal}.
In Chapter~3, quotients of spherical systems by distinguished subsets of colors will be often represented only by arrows between diagrams. Indeed, the distinguished subset of colors can usually be recovered easily, given only the two spherical systems.
To explain this, let $\mathcal S$ be a spherical system with set of colors $\Delta$, and $\Delta^\ast$ be a distinguished subset of $\Delta$. Denote by $\,\grave{} \mathcal S=\mathcal S/\Delta^\ast$ the quotient system, and by $\,\grave{} \Delta$ its set of colors. Then $\,\grave{} \Delta$ can be identified with $\Delta\setminus\Delta^\ast$, and for every $\alpha\in S$, one has $\,\grave{} \Delta(\alpha)\subset\Delta(\alpha)$. So if one knows $\Delta$ and $\,\grave{} \Delta$, one can usually recover $\Delta^\ast$ (the only problem is to understand what happens for $\alpha\in S\cap\Sigma$).
Consider for instance the diagrams
\[\begin{picture}(11400,9000)
\put(3600,6750){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\multiput(0,0)(1800,0){3}{\usebox{\aone}}
\multiput(300,1800)(1800,0){2}{\line(0,1){450}}
\put(300,2250){\line(1,0){1800}}
\multiput(300,0)(3600,0){2}{\line(0,-1){450}}
\put(300,-450){\line(1,0){3600}}
\put(2500,1300){\usebox{\toe}}
}
\put(7200,0){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(0,0){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\multiput(300,900)(1800,0){3}{\circle{600}}
\multiput(300,0)(3600,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){3600}}
}
\put(4800,5400){\vector(-1,-2){1575}}
\put(3600,300){\multiput(3000,5100)(300,-600){5}{\multiput(0,0)(15,-30){10}{\line(0,-1){30}}}
\put(4500,2100){\vector(1,-2){150}}}
\end{picture}\]
These are diagrams of type $\mathsf B_3$. Denote by $\mathcal S$ the spherical system of the diagram on top, and by ${}^{(1)}\!\mathcal S$ and ${}^{(2)}\!\mathcal S$ the spherical system of those on the bottom line. The Cartan pairing of $\mathcal S$ is
\[\begin{array}{r|rrr}c(-,-)&\alpha_1&\alpha_2&\alpha_3\\\hline\delta_{\alpha_1}^+&1&1&-1\\\delta_{\alpha_1}^-&1&-2&1\\\delta_{\alpha_2}^-&-2&1&0\\\delta_{\alpha_3}^+&-1&0&1\end{array}\]
Then the two distinguished subsets of colors $\Delta^{(1)}$ and $\Delta^{(2)}$ of $\Delta$ such that $\mathcal S/\Delta^{(i)}={}^{(i)}\!\mathcal S$, $i=1,2$, are clearly given by $\Delta^{(1)}=\{\delta_{\alpha_1}^+,\delta_{\alpha_3}^+\}$ and $\Delta^{(2)}=\Delta\setminus\{\delta_{\alpha_3}^+\}$ (the reader is invited to check this).
As another exercise, the reader is invited to explicit in the same way the quotients given by the following figure:
\[\begin{picture}(11400,8400)
\put(7200,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3900,900){\circle{600}}
}
\put(0,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(3600,6150){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3600,0){\usebox{\aone}}
}
\put(-3600,0){\multiput(8400,5100)(-300,-600){5}{\multiput(0,0)(-15,-30){10}{\line(0,-1){30}}}
\put(6900,2100){\vector(-1,-2){150}}}
\put(6600,5100){\vector(1,-2){1575}}
\end{picture}\]
This way of describing quotients of spherical systems is not without ambiguity, since it may happen that there are several distinguished subsets of colors giving the same quotient system. For instance the arrow
\[\begin{picture}(9000,1800)\multiput(300,900)(7200,0){2}{\usebox{\segm}}\multiput(0,0)(1800,0){2}{\usebox{\aone}}\multiput(7500,900)(1800,0){2}{\circle{600}}\put(3300,900){\vector(1,0){3000}}\end{picture}\]
may come from $\{\delta_{\alpha_1}^+,\delta_{\alpha_2}^+\}$, $\{\delta_{\alpha_1}^+,\delta_{\alpha_2}^-\}$ or $\{\delta_{\alpha_1}^-,\delta_{\alpha_2}^+\}$.
\subsection{Generic fibers of wonderful morphisms}
Let $\phi\colon X\to \,\grave{} X$ be a wonderful morphism, and let $\,\grave{} x$ be a point in the open orbit of $G$ in $\,\grave{} X$. The fiber $Y=\phi^{-1}(\,\grave{} x)$ is called a \textit{generic fiber} of $\phi$. It follows from the Theorem of Sard that $Y$ is a complete and smooth $\,\grave{} H$-variety, where we have put $\,\grave{} H=G_{\,\grave{} x}$ (this group is not necessarily reductive nor connected). Let us denote by $\Sigma_\phi$ the set of spherical roots of $X$ which contribute to spherical roots of $\,\grave{} X$ (recall that the second are sums of the first). For every $\Sigma'\subset \Sigma_X$, remember that $X_{\Sigma'}$ denotes the wonderful subvariety of $X$ having $\Sigma'$ as set of simple roots.
\begin{proposition}
\begin{itemf}
\item[]
\item[1)] One has $\phi(X_{\Sigma'})=\,\grave{} X$ if and only if $\Sigma'\supset\Sigma_\phi$.
\item[2)] The $\,\grave{} H$-variety $Y$ is wonderful, its rank is $\mathrm{card}(\Sigma\setminus\Sigma_\phi)$, and $Y$ is spherical under the action of any Levi subgroup of $\,\grave{} H$.
\end{itemf}
\end{proposition}
\begin{sketche}\makebox{}\\
1) A $G$-morphism $\psi\colon Z\to \,\grave{} Z$ between spherical $G$-varieties is dominant if and only if $\Xi(\psi)\colon\Xi(\,\grave{} Z)\to\Xi(Z)$ is injective. From this, (1) follows, since $\Sigma_{\,\grave{} X}$ is a basis of $\Xi(\,\grave{} X)$.
2) The intersection of $Y$ with each orbit of $G$ in $X$ is either empty, or is an orbit of $\,\grave{} H$ in $Y$. This implies that, for every $\sigma\in\Sigma\setminus\Sigma_\phi$, $Y\cap D^\sigma$ is a divisor stable by $\,\grave{} H$. One checks easily that these divisors have all the properties of the definition of wonderful varieties. The last assertion (although less easy) is left to the reader.
\end{sketche}
\subsection{Notions of minimality}\label{minimal}
Let $H\subset \,\grave{} H$ be a co-connected inclusion of wonderful subgroups of $G$. We will say that this inclusion is \textit{minimal} if there exists no proper intermediate wonderful subgroup $K$ with $K/H$ connected.
\begin{proposition}
If $H\subset \,\grave{} H$ is such a minimal inclusion, then three possibilities can occur:
\begin{itemf}
\item[($\mathcal P$)] either $H^u$ contains $\,\grave{} H^u$ strictly; then $H$ is a maximal parabolic subgroup of $\,\grave{} H$;
\item[($\mathcal R$)] or $H^u=\,\grave{} H^u$; then $H/H^r$ is very reductive and maximal in $\,\grave{} H/\,\grave{} H^r$;
\item[($\mathcal L$)] or $H^u$ is strictly contained in $\,\grave{} H^u$; then $\mathrm{Lie}(\,\grave{} H^u)/\mathrm{Lie}(H^u)$ is a simple $H$-module, $H=\mathrm{N}_{\,\grave{} H}(H^u)$, and the Levi factors of $H$ and $\,\grave{} H$ differ only by their connected centers.
\end{itemf}
\end{proposition}
\vspace{-4pt}
\begin{sketche}
Let $H=L\,H^u$ and $\,\grave{} H=\,\grave{} L\,\,\grave{} H^u$ be Levi decompositions such that $L\subset \,\grave{} L$. Denote by $q\colon H\to \,\grave{} H/\,\grave{} H^r$ the natural map. If $q$ is not surjective, it follows by minimality that $\,\grave{} H^u$ is contained in $H^u$. Then either $q(H)$ is contained in a parabolic subgroup of $\,\grave{} H/\,\grave{} H^r$ and we have $\mathcal P$; or $q(H)$ is very reductive in $\,\grave{} H/\,\grave{} H^r$, which implies $H^u=\,\grave{} H^u$, and we have $\mathcal R$. If $q$ is surjective, then $L$ and $\,\grave{} L$ differ only by their connected centers, and we have $\mathcal L$.
\end{sketche}
Let $H$ be a wonderful subgroup of $G$. Assume that $Q$ is a parabolic subgroup of $G$, containing $B$, such that ${}^-\! Q$ is minimal among the parabolic subgroups of $G$ containing $H$. Then there exists a sequence of co-connected inclusions of wonderful subgroups
\vspace{-4pt}
\[H=H_0\subset H_1\subset\ldots\subset H_{m-1}\subset H_m\subset {}^-\! Q\]
\vspace{-4pt}
having the following properties:
\begin{itemf}
\item[-] $H_{i-1}\subset H_i$ is minimal of type $\mathcal L$ ($i=1,\ldots,m$);
\item[-] $H_m^r={}^-\! Q^r$ and $H_m/H_m^r$ is very reductive in ${}^-\! Q/{}^-\! Q^r$
\end{itemf}
\vspace{-4pt}
(this is a reformulation of statements in \cite{Lo07}).
\bigskip
A wonderful morphism $\phi\colon X\to \,\grave{} X$ will be called \textit{minimal} if it cannot be written as composition of two wonderful morphisms, both of which are not isomorphisms.
Let $\,\grave{} x$ be a point in the open orbit of $G$ in $\,\grave{} X$, and consider the generic fiber $Y=\phi^{-1}(\,\grave{} x)$ introduced in the preceding section. According to the analysis above, minimal wonderful morphisms are divided into the following three types:
\begin{itemf}
\item[($\mathcal P$)] $G_{\,\grave{} x}$ acts transitively on $Y$;
\item[($\mathcal R$)] $G_{\,\grave{} x}$ does not act transitively on $Y$, but $G_{\,\grave{} x}^r$ acts trivially on $Y$;
\item[($\mathcal L$)] $G_{\,\grave{} x}^r$ has an open dense orbit in $Y$.
\end{itemf}
Let $\mathcal S=(\Sigma,S^p,\mathbf A)$ be the spherical system of $X$, and $\Delta$ its set of colors. It is clear that $\phi$ is minimal if and only if $\Delta_\phi$ is minimal among the distinguished subsets of $\Delta$.
Let us call a color \textit{negative} if its values on $\Sigma$ (under the Cartan pairing) are $\leq0$. All negative colors are of the form $\delta_\alpha$, for $\alpha\in S^b$ (which is uniquely determined by the color). A negative color $\delta_{\alpha}$ will be called \textit{interior} if $\alpha\in\mathrm{supp}(\Sigma)$, and \textit{exterior} if $\alpha\not\in\mathrm{supp}(\Sigma)$.
For every $\alpha\in S$, let us denote by $Q_\alpha$ a maximal parabolic associated to $\alpha$ (containing B). The following characterization of negative exterior colors follows from the properties of parabolic inductions: $D_{\alpha}$ is a negative exterior color for a wonderful variety $X$ if and only if there exists a generic isotropy group $H$ of $X$ verifying: ${}^-\! Q_\alpha^u\subset H\subset {}^-\! Q_\alpha$.
Is the following statement true in general? Let $D_\alpha$ be a color of type $b$ for a wonderful variety $X$. Then $D_\alpha$ is negative interior if and only if the derived subgroup $({}^-\! Q_\alpha^u,{}^-\! Q_\alpha^u)$ is not trivial, and there exists a generic isotropy group $H$ of $X$, verifying: $({}^-\! Q_\alpha^u,{}^-\! Q_\alpha^u)\subset H\subset {}^-\! Q_\alpha$ but ${}^-\! Q_\alpha^u\not\subset H$.
Let $X$ be a wonderful variety with spherical system $\mathcal S=(\Sigma,S^p,\mathbf A)$. Let $\phi\colon X\to \,\grave{} X$ be a minimal wonderful morphism, and put $\Delta^\ast=\Delta_\phi$. Remember that the spherical system of $\,\grave{} X$ is given by the quotient system $\mathcal S/\Delta^\ast=(\Sigma/\Delta^\ast,S^p/\Delta^\ast,\mathbf A/\Delta^\ast)$. How can one see the different types of $\phi$ on the combinatorial data?
From the preceding section follows that $\phi$ is of type $\mathcal P$ if and only if $\Sigma_\phi=\Sigma$.
The following facts are easy to check:
\begin{itemf}
\item[-] $d(\mathcal S)>d(\mathcal S/\Delta^\ast)$ if and only if we are in type $\mathcal P$;
\item[-] if $d(\mathcal S)<d(\mathcal S/\Delta^\ast)$, then we are in type $\mathcal L$;
\item[-] if $d(\mathcal S)=d(\mathcal S/\Delta^\ast)$ and a new exterior negative color appears in $\mathcal S/\Delta^\ast$, then we are in type $\mathcal L$.
\end{itemf}
The following statement is true in all examples we know: if $d(\mathcal S)=d(\mathcal S/\Delta^\ast)$ and no new negative color appears in $\mathcal S/\Delta^\ast$, then we are in type $\mathcal R$.
If $d(\mathcal S)=d(\mathcal S/\Delta^\ast)$ and the only new negative color which appears in $\mathcal S/\Delta^\ast$ is interior, we can either be in type $\mathcal L$ or in type $\mathcal R$ (the first happens very often, see for instance Example~2 below, or Section~\ref{ss1521}; for the second see Section~\ref{notf4}, Example~7).
\bigskip
\textit{Examples}
1) Let $\mathcal S=(\Sigma,S^p,\mathbf A)$ be any spherical system and $\Delta$ its set of colors. A color $\delta\in\mathbf A$ is called \textit{projective}, if $c(\delta,\sigma)\geq0$, for all $\sigma\in\Sigma$. If $\delta$ is any projective color, the set $\{\delta\}$ is clearly distinguished in $\Delta$, and the corresponding quotient is of type $\mathcal L$.
2)
\[\begin{picture}(18600,9000)
\put(3600,6750){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\multiput(0,0)(1800,0){3}{\usebox{\aone}}
\multiput(300,1800)(1800,0){2}{\line(0,1){450}}
\put(300,2250){\line(1,0){1800}}
\multiput(300,0)(3600,0){2}{\line(0,-1){450}}
\put(300,-450){\line(1,0){3600}}
\put(2500,1300){\usebox{\toe}}
}
\put(7200,0){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(0,0){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\multiput(300,900)(1800,0){3}{\circle{600}}
\multiput(300,0)(3600,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){3600}}
}
\put(4800,5700){\vector(-1,-2){1575}}
\put(3600,600){\multiput(3000,5100)(300,-600){5}{\multiput(0,0)(15,-30){10}{\line(0,-1){30}}}
\put(4500,2100){\vector(1,-2){150}}}
\put(7200,600){
\put(7200,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3900,900){\circle{600}}
}
\put(3600,6150){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3600,0){\usebox{\aone}}
}
\put(-3600,0){\multiput(8400,5100)(-300,-600){5}{\multiput(0,0)(-15,-30){10}{\line(0,-1){30}}}
\put(6900,2100){\vector(-1,-2){150}}}
\put(6600,5100){\vector(1,-2){1575}}
}
\end{picture}\]
These diagrams have already been analyzed in \ref{qsdiagrams}. Each arrow represents a minimal quotient. As a general rule we put dashed arrows to represent minimal quotients with strictly decreasing defect, and continuous arrows otherwise. Indeed, the two spherical systems with diagram on the top line have defect 1, both having $\mathrm{card}(\Delta)=4$ and $\mathrm{card}(\Sigma)=3$. For those on the second line, the defect is respectively $2-1=1$, $1-1=0$, $3-2=1$.
3) Here is an example of type $\mathcal R$:
\[\begin{picture}(13200,1800)\multiput(300,900)(9000,0){2}{\usebox{\segm}}\multiput(2100,900)(9000,0){2}{\usebox{\rightbisegm}}\put(0,600){\usebox{\atwo}}\put(1800,600){\usebox{\GreyCircle}}\put(3600,0){\usebox{\aprime}}\put(9000,600){\usebox{\GreyCircleTwo}}\put(5100,900){\vector(1,0){3000}}\end{picture}\]
\vspace{6ex}\section{Spherical closure}\label{sssphericalclosure}
We will call \textit{simple projective $G$-spaces} the $G$-varieties of the form $\mathbb P(V)$, where $V$ is any simple (rational) $G$-module. In this section we will see that spherical orbits in simple projective spaces play an important role in the theory of wonderful varieties.
\subsection{Definition and first properties}\label{sphericalclosure1}
Let $H$ be any spherical subgroup of $G$. The group $\mathrm{N}_G(H)$ acts naturally on $\Delta_{G/H}$, the set of colors of $G/H$. We call \textit{spherical closure} of $H$ in $G$ (denoted by $\overline H^{sph}$) the kernel of the action of $\mathrm{N}_G(H)$ on $\Delta_{G/H}$; if $H=\overline H^{sph}$, we say that $H$ is \textit{spherically closed} in $G$.
Spherical subgroups equal to their normalizer are of course spherically closed. Spherically closed subgroups are wonderful (result due to F.~Knop \cite{Kn96}).
Let $X$ be a wonderful $G$-variety, $\mathcal S=(\Sigma,S^p,\mathbf A)$ its spherical system, $\Delta$ its set of colors, and let $H$ be a generic isotropy group of $G$ in $X$. Here are some properties relating these notions:
\begin{itemf}
\item[-] $H$ is spherically closed if and only if $\Sigma_\ell(\mathcal S)\subset S$;
\item[-] in particular, $H=\mathrm{N}_G(H)$ if and only if $\Sigma_\ell(\mathcal S)=\emptyset$;
\item[-] the variety $X$ is strict if and only if $S\cap\Sigma=\emptyset=\Sigma_\ell(\mathcal S)$ (\textit{strict} means that $\mathrm{N}_G(G_x)=G_x$, for all $x\in X$);
\item[-] from the combinatorial characterization above follows in particular that, if a generic isotropy group of X is spherically closed, then so are all the
other isotropy groups;
\item[-] if $H$ is spherically closed, then $\mathrm{N}_G(H)/H=\Gamma_X$ can be identified to the group $\Gamma=\Gamma_{\mathcal S}$ of permutations of $\Delta$ stabilizing each $\Delta(\alpha)$, $\alpha\in S$, and leaving invariant the Cartan pairing (i.e.\ by those permutations which may exchange the 2 colors of $\Delta(\alpha)$, when $\alpha\in\Sigma_\ell(\mathcal S)$, but fix all other colors).
\end{itemf}
The wonderful subgroups not spherically closed are somewhat exceptional.
In type $\mathsf F_4$, only three spherical systems have wonderful subgroups which are not spherically closed:
\[\begin{picture}(24000,1800)
\put(0,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(300,900)(5400,0){2}{\circle{600}}
\put(1800,600){\usebox{\GreyCircle}}
}
\put(9000,0){
\put(300,900){\usebox{\dynkinf}}
\put(5700,900){\circle{600}}
\put(0,600){\usebox{\GreyCircle}}
}
\put(18000,0){
\put(300,900){\usebox{\dynkinf}}
\put(5700,900){\circle{600}}
\put(1800,600){\usebox{\GreyCircle}}
\put(0,0){\usebox{\aone}}
}
\end{picture}\]
\subsection{Further properties of the spherical closure}\label{sphericalclosure2}
Let $H$ be a spherical subgroup of $G$. It is well known that $H$ fixes at most a finite number of points in every simple projective $G$-space $\mathbb P(V)$ (otherwise one would be able to construct nonconstant $B$-invariant rational functions on $G/H$, which is impossible, since $B$ has an open orbit in $G/H$).
Let us denote by $q_H \colon G \to G/H$ the canonical map. The following lemma gives alternative characterizations of the spherical closure.
\begin{lemma}
Let $g\in G$. The following three conditions are equivalent:
\begin{itemf}
\item[(1)] $g\in\overline H^{sph}$;
\item[(2)] $q_H^{-1}(D).g=q_H^{-1}(D)$, for every $D\in\Delta_{G/H}$;
\item[(3)] $g$ fixes all points in simple projective $G$-spaces that are fixed by $H$.
\end{itemf}
\end{lemma}
\begin{proof}
$(1)\Rightarrow (2)$ is clear.
$(2)\Rightarrow (3)$ Let $V$ be a simple $G$-module, and let $[x]\in\mathbb P(V)$ be a point fixed by $H$ (where $x$ denotes a point in $V\setminus\{0\}$ ``over'' $[x]$); such an $x$ is an eigenvector of $H$. Choose an eigenvector $v$ of $B$ in $V^\ast$. Then $v\otimes x$ gives, by means of the map $V^\ast\otimes V\to \mathbb C[G]$ corresponding to the natural map $G\to \mathrm{End}(V)=V\otimes V^\ast$, a function $f\in\mathbb C[G]$ which is an eigenvector of $B$ (acting by left translations), and an eigenvector of $H$ (acting by right translations). Since we assume $G$ simply connected, $\mathbb C[G]$ is factorial, so $f$ can be written in a unique way as $\prod_{D\in\Delta}f_D^{n(f,D)}$, where the $f_D$'s are equations of the $q_H^{-1}(D)$'s, $D\in\Delta_{G/H}$. The assumption (2) implies then that the $f_D$'s (and so also $f$) are again eigenvectors of $g$, which gives $g.[x]=[x]$.
$(3)\Rightarrow (1)$ One has to show that $g\in\mathrm{N}_G(H)$. Since $H$ is an algebraic group, there exist $G$-modules $U$ (which are in general not simple) and $H$-eigenvectors $u\in U$, such that $H=G_{[u]}$. We can assume that $U$ has a direct sum decomposition into simple $G$-modules $U=V_1\oplus\ldots\oplus V_n$ such that $u$ can be written $u=v_1+\ldots+v_n$, where the $v_i$ are $H$-eigenvectors in $V_i$ ($i=1,\ldots,n$). Since by (3) these $v_i$ are also eigenvectors of $g$, one can find $a\in\mathrm{Aut}_G(U)$ such that $g.[u]=a.[u]$. From this follows that $G_{g.[u]}=G_{[u]}=H$, which implies $g\in\mathrm{N}_G(H)$.
\end{proof}
\begin{corollarynumber}
If $H\subset \,\grave{} H$ are spherical subgroups of $G$, then $\overline H^{sph}\subset \overline {\,\grave{} H}^{sph}$.
\end{corollarynumber}
\begin{proof}
This follows for instance immediately from the property (3) of the preceding lemma.
\end{proof}
\begin{corollarynumber}\label{corollarysc}
A spherical subgroup $H$ of $G$ is spherically closed if and only if it occurs as isotropy group in some simple projective $G$-space.
\end{corollarynumber}
\begin{proof}
Let $V$ be a simple $G$-module and let $x\in V\setminus\{0\}$ be such that $H=G_{[x]}$ is spherical in $G$. Then the property (3) of the preceding lemma implies $\overline H^{sph}\subset G_{[x]}=H$, so $H$ is spherically closed.
Conversely, let $H$ be any spherically closed subgroup of $G$. Choose pairwise different integers $n(D)>0$, $D\in\Delta_{G/H}$ and put $f=\prod_{D\in\Delta}f_D^{n(D)}$. Then there exists a simple $G$-module $V$ and $[x]\in\mathbb P(V)^H$, such that $f$ is obtained from $x$ as in the proof $(2)\Rightarrow (3)$ above. We know already that $\,\grave{} H=G_{[x]}$ is spherically closed in $G$. By definition, $H\subset \,\grave{} H$. Because of the choice of $f$, the natural map $G/H\to G/\,\grave{} H$ induces a bijection $\Delta_{G/H}\to\Delta_{G/\,\grave{} H}$. So the sets $\{q_H^{-1}(D), D\in\Delta_{G/H}\}$ and $\{q_{\,\grave{} H}^{-1}(D), D\in\Delta_{G/\,\grave{} H}\}$ are the same, which implies $H=\,\grave{} H$.
\end{proof}
The following result will be used several times in Chapter~3.
\begin{corollarynumber}\label{argument}
Let $K$ be a spherically closed subgroup of $G$, and let $H$ be a subgroup of $K$ such that $\mathrm{N}_K(H) = H$. Then $H$ is wonderful in $G$ if (and only if) it is spherical in $G$.
\end{corollarynumber}
\begin{proof}
If $H$ is spherical in $G$, Corollary~\ref{corollarysc} implies $\overline H^{sph}\subset\overline K^{sph} = K$. So we have $\overline H^{sph}\subset\mathrm{N}_G(H)\cap K=\mathrm{N}_K(H)$ which is equal to $H$ by assumption. This implies $H$ spherically closed in $G$, so $H$ is wonderful in $G$.
\end{proof}
\subsection{Spherical orbits in simple projective spaces}\label{ssorbits}
Let $V$ be any simple (rational) $G$-module. It is known that only finitely many orbits of $G$ in $\mathbb P(V)$ are spherical. In what follows, we will explain how one can classify these with the help of spherical systems.
For any $\delta\in\mathbb N\Delta$, we write $n(\delta,D)$ for the coefficient of $\delta$ at $D\in\Delta$, and define $\mathrm{supp}_\Delta(\delta)$ as the set of colors $D$ such that $n(\delta,D)>0$.
We will say that a couple ($\mathcal S$,$\delta$) -- where $\mathcal S$ is a spherical system and $\delta\in\mathbb N\Delta$ -- is \textit{faithful}, if the following conditions are fulfilled:
\begin{itemf}
\item[] (1)\hspace{6pt}$\mathcal S$ is spherically closed;
\item[] (2)\hspace{6pt}any (nonempty) distinguished subset of $\Delta$ meets $\mathrm{supp}_\Delta(\delta)$;
\item[] (3)\hspace{6pt}$n(\delta,D^+_\alpha)\neq n(\delta,D^-_\alpha)$, for all $\alpha\in\Sigma_\ell(\mathcal S)$.
\end{itemf}
Remember the natural map $\omega\colon\mathbb N\Delta\to\mathbb N\Omega$ (where $\mathbb N\Omega$ is the set of dominant weights) introduced in \ref{ssscartan}. On the combinatorial level, $\omega$ is given as follows:
\begin{itemf}
\item[] -\hspace{6pt}if $\delta\in\Delta^a\cup\Delta^b$, then $\omega(\delta)$ is the sum of those fundamental weights $\omega_{\alpha}$\\
\rule{10pt}{0pt}such that $\delta\in\Delta(\alpha)$;
\item[] -\hspace{6pt}if $\delta\in\Delta^{2a}$, then $\omega(\delta) = 2\omega_\alpha$ where $\delta\in\Delta(\alpha)$.
\end{itemf}
Remember also that we have introduced above in \ref{sphericalclosure1} a permutation group $\Gamma=\Gamma_{\mathcal S}$ of $\Delta$, which is acting also in $\mathbb N\Delta$. It is clear that for every faithful couple ($\mathcal S$,$\delta$) and every $\gamma\in\Gamma$, the couple ($\mathcal S$,$\gamma.\delta$) is again faithful, and that $\omega(\gamma.\delta)=\omega(\delta)$.
If a simple $G$-module $V$ has a dominant weight $\pi$, we denote by $\pi^\ast$ the dominant weight of its dual module $V^\ast$.
\begin{proposition}
Assume the conjecture of \ref{question} true for $G$. Let $V_\pi$ be a simple $G$-module of highest weight $\pi$. Then there exists a natural bijection between the set of spherical orbits of $G$ in $\mathbb P(V_\pi)$ and the set of $\Gamma$-orbits of faithful couples \mbox{($\mathcal S$, $\Gamma.\delta$)} such that $\omega(\delta)=\pi^\ast$.
\end{proposition}
\begin{proof}
Let ($\mathcal S$, $\delta$) be a faithful couple such that $\omega(\delta)=\pi^\ast$. Let $H$ be a spherically closed subgroup of $G$ having $\mathcal S$ as spherical system. Denote by $f_D\in\mathbb C[G]$ equations of the $q^{-1}(D)$'s, $D\in\Delta_{G/H}$, and put $f=\prod_{D\in\Delta} f_D^{n(\delta,D)}$, where $\delta=\sum_{D\in\Delta} n(\delta,D)D$. Let $V$ be a simple $G$-module and $x$ be an eigenvector of $H$ in $V$, such that $f$ is obtained from $x$ as in the previous subsection. The character $\omega(\delta)$ is also the $B$-character of $f$, which is the dominant weight of $V^\ast$. So $V$ has dominant wight $\pi$ and we have associated to the couple ($\mathcal S$, $\delta$) a spherical orbit $G.[x]$ in $\mathbb P(V_\pi)$.
Conversely, let $G.[x]$ be a spherical orbit in $\mathbb P(V_\pi)$. By Corollary~\ref{corollarysc} of \ref{sphericalclosure2}, $H=G_{[x]}$ is a spherically closed subgroup of $G$. Denote by $\mathcal S$ its spherical system, and by $f=\prod_{D\in\Delta}f_D^{n(f,D)}$ the function on $G$ given by $x$ as in the previous section. Put $\delta=\sum_{D\in\Delta}n(f,D)D$. By construction, the couple ($\mathcal S$,$\delta$) is faithful (otherwise the natural map $G/H\to G.[x]$ would not be injective), and one has $\omega(\delta)=\pi^\ast$.
If ($\mathcal S',\delta'$) is another couple going to the same (spherical) orbit in $\mathbb P(V_\pi)$, then $\mathcal S'=\mathcal S$ (since this system does depend only on the orbit). But $\delta$ does depend on the choice of the point on the orbit fixed by $H$, points which are exchanged by $\mathrm{N}_G(H)$, so $\delta'\in\Gamma.\delta$.
\end{proof}
\begin{example}
Let $G = \mathrm{SL}(4)$ and let $\pi = \omega_1+\omega_3$ be the dominant weight of the adjoint representation $\mathfrak{sl}(4)$. The following table contains all faithful couples \mbox{($\mathcal S$, $\delta$)} such that $\omega(\delta) = \omega_1+\omega_3$:
\[\begin{array}{c|ccccc}
\begin{picture}(1800,2700)\put(300,1050){$\mathcal S$}\end{picture} &
\begin{picture}(6000,2700)\put(900,0){\multiput(300,1350)(1800,0){2}{\usebox{\segm}}\multiput(300,1350)(3600,0){2}{\circle{600}}}\end{picture} &
\begin{picture}(6000,2700)\put(900,0){\multiput(300,1350)(1800,0){2}{\usebox{\segm}}\multiput(300,1350)(1800,0){3}{\circle{600}}\multiput(300,1050)(3600,0){2}{\line(0,-1){600}}\put(300,450){\line(1,0){3600}}}\end{picture} &
\begin{picture}(6000,2700)\put(900,0){\multiput(300,1350)(1800,0){2}{\usebox{\segm}}\multiput(0,450)(1800,0){3}{\usebox{\aone}}\multiput(300,2250)(3600,0){2}{\line(0,1){450}}\put(300,2700){\line(1,0){3600}}\put(2500,1750){\usebox{\toe}}}\end{picture} &
\begin{picture}(6000,2700)\put(900,0){\multiput(300,1350)(1800,0){2}{\usebox{\segm}}\multiput(300,1350)(3600,0){2}{\circle{600}}\put(0,1050){\multiput(300,300)(25,25){13}{\circle*{70}}\put(600,600){\multiput(0,0)(300,0){10}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){10}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(3900,300)(-25,25){13}{\circle*{70}}}}\end{picture} &
\begin{picture}(6000,2700)\put(900,0){\multiput(300,1350)(1800,0){2}{\usebox{\segm}}\multiput(300,1350)(3600,0){2}{\circle{600}}\multiput(300,1050)(3600,0){2}{\line(0,-1){1050}}\put(300,0){\line(1,0){3600}}\put(1800,450){\usebox{\aprime}}}\end{picture} \\
\rule{0pt}{3ex}\delta & \delta_{\alpha_1}+\delta_{\alpha_3} & \delta_{\alpha_1} & \delta_{\alpha_1}^+ & \delta_{\alpha_1}+\delta_{\alpha_3} & \delta_{\alpha_1}
\end{array}\]
Notice that the group $\Gamma$ is trivial for all systems in this table. The first three couples correspond to nilpotent orbits, the last two couples to semisimple orbits. Notice that the third couple corresponds to the nilpotent matrices having Jordan form
\[\left(\begin{array}{rrrr}0&1&0&0\\0&0&1&0\\0&0&0&0\\0&0&0&0\end{array}\right)\]
orbit which is spherical in $\mathbb P(\mathfrak{sl}(4))$, but not in $\mathfrak{sl}(4)$.
\end{example}
\bigskip
\textit{Remarks}
1) Spherical orbits in general adjoint representations are well known (see \cite{Pa94,Pa03,CCC05,Co08}).
2) Let us use again the notation of the above proposition. G.~Pezzini has shown in \cite{Pe07} that the closure of $G.[x]$ in $\mathbb P(V_\pi)$ is wonderful if and only if $\mathcal S$ is strict and $\mathrm{supp}_\Delta(\delta)=\Delta$.
\bigskip
As a nontrivial exercise, the reader is invited to characterize the inclusion relations of spherical orbit closures in simple projective spaces, in terms of faithful couples.
\clearpage
\chapter{Examples of type $\mathsf F_4$}
We already have listed in Chapter~1 the 266 spherical systems of type $\mathsf F_4$. We will now give more information on the corresponding wonderful varieties. The non-trivial ones have dimension between 15 (the smallest generalized flag variety) and 28 (the dimension of the Borel subgroup), and are not very accessible to study. One knows that they can be theoretically realized as closures of orbits in some high dimensional projective spaces, but even the set-theoretical description of these closures seems in general to be out of reach. So we have to be content mainly with describing their isotropy groups.
In some of the following sections, we start with a spherical system $\mathcal S$ and obtain the structure of the generic isotropy group $H$ of the corresponding wonderful variety $X$. Then we list all spherical systems $\mathcal S_i$ that have $\mathcal S$ as minimal quotient, and determine the corresponding generic isotropy groups $H_i$ as subgroups of $H$. Even if in this way we do not get a direct picture of the wonderful varieties themselves, we obtain some understanding of the wonderful subgroups of type $\mathsf F_4$, and of their co-connected minimal inclusions. These can be seen as explicit examples of minimal wonderful morphisms.
We also give an explicit (but abstract) classification of the spherical orbits in the projective fundamental representations of type $\mathsf F_4$.
Finally, in the last section, we discuss some examples not of type $\mathsf F_4$, for the additional information they contain.
\subsection*{Contents of Chapter 3}
\begin{itemf}
\item[\ref{ss1521}] A remarkable example of rank 2
\item[\ref{ss811}] A remarkable example of rank 1
\item[\ref{projectivecsss}] Projective colors and strongly solvable systems
\item[\ref{sb3}] Examples coming from type $\mathsf B_3$
\item[\ref{ss711}] Examples coming from type $\mathsf C_3$
\item[\ref{ss1514}] Another remarkable example of rank 2
\item[\ref{constant}] $\mathcal L$-type minimal morphisms with constant defect
\item[\ref{increasing}] $\mathcal L$-type minimal morphisms with strictly increasing defect
\item[\ref{fibered}] An example of a fiber product
\item[\ref{ssfundamental}] Spherical orbits in fundamental representations
\item[\ref{notf4}] Examples not of type $\mathsf F_4$
\end{itemf}
\vspace{6ex}\section{A remarkable example of rank 2}\label{ss1521}
\[\begin{picture}(6000,1200)
\put(300,900){\usebox{\dynkinf}}
\multiput(300,900)(5400,0){2}{\circle{600}}
\multiput(300,0)(5400,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){5400}}
\put(3900,900){\circle{600}}
\put(1800,600){\usebox{\GreyCircle}}
\end{picture}\]
The corresponding spherical system $\mathcal S=(\Sigma,S^p,\mathbf A)$ has $\Sigma=\{\alpha_1+\alpha_4,\alpha_2+\alpha_3\}$, $S^p=\emptyset$ and the following Cartan pairing:
\[\begin{array}{r|rr}c(-,-)&\sigma_1&\sigma_2\\\hline\delta_{\alpha_1}&2&-1\\\delta_{\alpha_2}&-1&1\\\delta_{\alpha_3}&-1&0\end{array}\]
Let us briefly explain how one can prove that this $\mathcal S$ comes from a
wonderful variety $X$, unique up to isomorphism.
If $X$ exists, since $S^p = \emptyset$, it should have dimension $\mathrm{card}(\Sigma) + \dim(B^u) = 2 + 24 = 26$; in particular, a generic stabilizer $H$ should have again dimension $26 = 52 - 26 = \dim(G) - \dim(X)$. The spherical system $\mathcal S$ has a (unique) distinguished subset $\{\delta_{\alpha_1},\delta_{\alpha_2}\}$. Indeed, $c(\delta_{\alpha_1}+\delta_{\alpha_2},\sigma)\geq0$, for both $\sigma\in\Sigma$. Then $\Sigma/\{\delta_{\alpha_1},\delta_{\alpha_2}\}=\emptyset$ and the quotient $\mathcal S /\{\delta_{\alpha_1},\delta_{\alpha_2}\}$ has the following diagram:
\[\begin{picture}(5400,600)
\put(0,300){\usebox{\dynkinf}}
\put(3600,300){\circle{600}}
\end{picture}\]
So this quotient is homogeneous, and the corresponding wonderful variety is isomorphic to $G/^-Q$, where $Q=Q_{\alpha_3}$ (with notation as in \ref{ssstructure}); this $Q$ is of semisimple type $\mathsf A_2\times \mathsf A_1$. Moreover, the corresponding minimal wonderful morphism $X\to G/^-Q$ would be of type $\mathcal L$. Indeed, the defect remains constant and the negative color $\delta_{\alpha_3}$, which is interior in $\mathcal S$, becomes exterior in the quotient. Since the dimension of $Q$ is 32, it follows that if we choose $H$ included in $^-Q$, then we can suppose that it has the same Levi part as $^-Q$, say $M$, and $H^u\subset {}^-Q^u$ has codimension 6. Since $\mathrm{Lie}(^-Q^u)/\mathrm{Lie}((^-Q^u, {}^-Q^u))$ is a simple $M$-module of dimension 6 (see Section~\ref{ssstructure}), it follows that $H$ is necessarily of the form $H = (^-Q^u, {}^-Q^u)\,M$. So $H$ is determined up to conjugation in $G$, which gives the uniqueness of $X$ (up to $G$-isomorphism).
Conversely, let $H$ be a subgroup of $G$ given by $H = (^-Q^u,{}^-Q^u)\,M$ as above. One can check that $H$ is spherical (by using for instance Corollary~1.4 in \cite{Pa94}). Since $\mathrm{N}_{{}^-\! Q}(H)=H$, $H$ is wonderful in $G$ by Corollary~\ref{argument} of \ref{sphericalclosure2}. The wonderful completion $X'$ of $G/H$ has dimension 26 and comes with a (minimal) wonderful morphism $X'\to G/^-Q$. This implies that the spherical system of $X'$ is equal to $\mathcal S$ (there is simply no other system having the corresponding properties on the combinatorial level), which proves the existence of $X$.\hspace{\stretch{1}}$\square$
\bigskip
Henceforth, in Chapter 3, we will admit that all spherical systems considered are geometrically realizable.
With a little combinatorial effort, one obtains all spherical systems having $\mathcal S$ as quotient:
\[\strtwoffourtwoone\]
The reader is expected to explicit on his own the corresponding
spherical roots, Cartan pairings, distinguished subset of
colors\ldots\ for all the diagrams in the figure above, and to check that these (minimal) quotients are well defined. In what follows we will explain only part of this information.
Let $\mathcal S_i$, $i=1,2,3,4$, be the spherical systems corresponding to the diagrams on the second line of the figure (numbered from left to right), and by $H_i$ the generic isotropy groups of the corresponding wonderful varieties (we admit their existence). One can choose the $H_i$'s contained in $H$ (the generic isotropy group of the wonderful variety $X$ introduced above).
The group $H$ is connected of semisimple type $\mathsf A_2\times\mathsf A_1$. One checks easily that $H$ has (up to conjugation) exactly three parabolic subgroups of dimension $\geq24=\dim(B^u)$ (two having semisimple type $\mathsf A_1\times\mathsf A_1$, and the last having semisimple type $\mathsf A_2$). Since $d(\mathcal S_i)=2$ ($i=1,2,3$) and $d(\mathcal S)=1$, the quotients $\mathcal S_i\to\mathcal S$ ($i=1,2,3$) are of type $\mathcal P$. It follows that the $H_i$ ($i=1,2,3$) are the parabolic subgroups of $H$ mentioned above. We will come back to these systems $\mathcal S_i$ ($i=1,2,3$) in Section~\ref{constant}.
Let us now consider the spherical system $\mathcal S_4$. Here $d(\mathcal S_4) = d(\mathcal S) = 1$, and the only new negative color $\delta_{\alpha_3}$ which appears in $\mathcal S$ is interior. So a priori we don't know if the quotient $\mathcal S_4\to\mathcal S$ is of type $\mathcal L$ or of type $\mathcal R$. But we have seen above that $H = ({}^-\! Q^u,{}^-\! Q^u)M$, where $Q = Q_{\alpha_3} = M\,Q^u$, and one can check that $H_4$ has codimension 2 in $H$. Since $(M,M)$ does not contain any semisimple subgroup of codimension 2, the quotient must be of type $\mathcal L$. We know that $\mathrm{Lie}({}^-\! Q^u)$ decomposes into simple $M$-modules
\[\mathrm{Lie}({}^-\! Q^u) = \mathfrak n_{-1} +\mathfrak n_{-2} +\mathfrak n_{-3} +\mathfrak n_{-4}\]
having dimensions 6, 9, 2, 3 respectively (cf.\ \ref{ssstructure}). Since $H_4$ has codimension 2 in $H$, it follows that $H_4 = H_4^uM$, where $\mathrm{Lie}(H_4^u) = \mathfrak n_{-2} + \mathfrak n_{-4}$.
\bigskip
There is another minimal distinguished subset in $\Delta_4$ (the set of colors of $\mathcal S_4$), namely $\Delta_4\setminus\{\delta_{\alpha_4}^-\}$. This subset defines a minimal quotient (of type $\mathcal P$)
\[\begin{picture}(16800,2700)
\put(0,0){
\put(300,1350){\usebox{\dynkinf}}
\multiput(0,450)(3600,0){2}{\usebox{\aone}}
\put(5400,450){\usebox{\aone}}
\multiput(300,2250)(5400,0){2}{\line(0,1){450}}
\put(300,2700){\line(1,0){5400}}
\multiput(300,0)(3600,0){2}{\line(0,1){450}}
\put(300,0){\line(1,0){3600}}
\put(700,1750){\usebox{\toe}}
\put(4900,1750){\usebox{\tow}}
\put(1800,1050){\usebox{\GreyCircle}}
}
\multiput(6600,1350)(600,0){5}{\line(1,0){300}}
\put(9600,1350){\vector(1,0){300}}
\put(11100,1350){\usebox{\dynkinf}}
\put(16200,1050){\usebox{\GreyCircle}}
\end{picture}\]
which allows us a good transition to the next section.
\vspace{6ex}\section{A remarkable example of rank 1}\label{ss811}
\[\begin{picture}(5700,600)
\put(0,300){\usebox{\dynkinf}}
\put(5100,0){\usebox{\GreyCircle}}
\end{picture}\]
The spherical system of the diagram above is $\mathcal S=(\Sigma,S^p,\mathbf A)$ where $\Sigma=\{\sigma\}$ with $\sigma=\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4$, and $S^p=\mathbf A=\emptyset$.
This system has only one color (of type $b$), $\delta_{\alpha_4}$, and $c(\delta_{\alpha_4},\sigma)=1$.
The corresponding wonderful variety is of rank 1 and has dimension
16. Since the defect of $\mathcal S$ is 0, the corresponding generic
isotropy group $H$ is very reductive, and of dimension 36. There is
only one such subgroup in $G$ (up to conjugation), which is simple,
simply connected and of type $\mathsf B_4$ (so $H$ is isomorphic to $\mathrm{Spin}(9)$), a well known symmetric subgroup of $G$. Since $\mathrm{N}_G(H)=H$, $H$ is spherically closed in $G$.
Among the 16 parabolic subgroups of $H$, only 5 have dimension $\geq
24 = \dim(B^u)$, the 4 maximal parabolic subgroups of $H$ (which have
semisimple types respectively $\mathsf B_3$, $\mathsf A_1\times
\mathsf B_2$, $\mathsf A_2\times\mathsf A_1$, $\mathsf A_3$) and
another one (which has semisimple type $\mathsf B_2$).
With a little (combinatorial) effort, one obtains all spherical systems having as quotient the system $\mathcal S$ (arrows correspond to minimal quotients):
\[\stroneffouroneone\]
The reader is invited to explicit the corresponding spherical roots, Cartan pairings, distinguished subset of colors\ldots\ of all the diagrams above, and to check that all these quotients are well defined and minimal.
Let us denote by $\mathcal S_{1,2}$ the system corresponding to the
diagram on the first line of the figure, and by $\mathcal S_i$
($i=1,2,3,4$) the systems of those on the second line. Let us denote
by $H_1$, $H_2$, $H_3$, $H_4$, $H_{1,2}$ generic isotropy groups of
the corresponding wonderful varieties (which we assume to exist). They
have dimensions respectively 29, 25, 24, 26, 24. Since $d(\mathcal
S_{1,2}) = 2$, $d(\mathcal S_i) =1$ ($i=1,2,3,4$) and $d(\mathcal S )=
0$, all arrows above are of type $\mathcal P$. Since there are exactly
5 systems having $\mathcal S$ as quotient of type $\mathcal P$, we obtain that $H_1$, $H_2$, $H_3$, $H_4$, $H_{1,2}$ are exactly the 5 parabolic subgroups of $H$ we
have considered above (having semisimple types respectively $\mathsf B_3$, $\mathsf A_1\times \mathsf B_2$, $\mathsf A_2\times\mathsf A_1$, $\mathsf A_3$ and $\mathsf B_2$).
We will come back to these 5 systems in Section~\ref{constant}.
For the convenience of the reader, let us give some details in the case of the system $\mathcal S_{1,2}=(\Sigma_{1,2},S^p_{1,2},\mathbf A_{1,2})$. In this case $\Sigma_{1,2}=\{\alpha_1+\alpha_2,\alpha_2+\alpha_3,\alpha_3,\alpha_4\}$, $S^p_{1,2}=\emptyset$ and the Cartan pairing is as follows:
\[\begin{array}{r|rrrr}c(-,-)&\sigma_1&\sigma_2&\sigma_3&\sigma_4\\\hline\delta_{\alpha_1}&1&-1&0&0\\\delta_{\alpha_2}&1&1&-1&0\\\delta_{\alpha_3}^+&0&0&1&-1\\\delta_{\alpha_3}^-&-2&0&1&0\\\delta_{\alpha_4}^+&0&0&-1&1\\\delta_{\alpha_4}^-&0&-1&0&1\end{array}\]
Since the diagram of $\mathcal S_{1,2}$ might be ambiguous, let us remark that $c(\delta_{\alpha_4}^+,\alpha_3)=-1$ but $c(\delta_{\alpha_4}^+,\alpha_2+\alpha_3)=0$.
The subset of colors $\Delta_{1,2}'=\{\delta_{\alpha_1},\delta_{\alpha_2},\delta_{\alpha_3}^-\}$ is minimal distinguished.
Moreover, for any $\sigma\in\mathbb N\Sigma_{1,2}$, say $\sigma=\sum_{i=1}^4m_i\sigma_i$, $c(\delta,\sigma)=0$ for all $\delta\in\Delta_{1,2}'$ implies $2m_1=2m_2=m_3$. Therefore, one gets $\sigma=m_3(\alpha_1+2\alpha_2+3\alpha_3)+m_4(\alpha_4)$ and obtains as quotient the spherical system $\mathcal S_1$, with defect 1.
Another minimal distinguished subset of colors is $\Delta_{1,2}''=\{\delta_{\alpha_3}^+,\delta_{\alpha_4}^+\}$.
Moreover, for any $\sigma=\sum_{i=1}^4m_i\sigma_i$, $c(\delta_{\alpha_3}^+,\sigma)=0$ implies $m_3=m_4$. Therefore, one gets $\sigma=m_1(\alpha_1+\alpha_2)+m_2(\alpha_2+\alpha_3)+m_3(\alpha_3+\alpha_4)$ and obtains as quotient the spherical system $\mathcal S_2$, with defect 1.
\bigskip
There is another well known algebraic subgroup of $G$ whose dimension
is $\geq24$, the symmetric subgroup $K$ of $H$ which is simple of type
$\mathsf D_4$. This $K$ has dimension 28, but is \textit{not}
spherical in $G$. Indeed, if $K$ were spherical in $G$, it would be
wonderful in $G$ (by Corollary~3 of \ref{sphericalclosure2}). But
there is no spherical system of type $\mathsf F_4$ having as quotient
the system $\mathcal S$ and corresponding to a wonderful subgroup
which is reductive. The fact that $K$ is not spherical in $G$ has been known for a long time, in the context of multiplicity-free homogeneous spaces of compact Lie groups (see for instance \cite{Kr79}). This also follows from the fact that $N_G(K)/K$, being isomorphic to the exterior automorphism group of $K$, is not commutative.
\vspace{6ex}\section{Projective colors and strongly solvable\\ systems}\label{projectivecsss}
In this section we will explain some facts about strongly solvable spherical systems, and illustrate them with examples of type $\mathsf F_4$.
Let us begin with some details on projective colors.
\subsection{Projective colors}\label{projective}
Let $X$ be a wonderful variety, $\mathcal S=(\Sigma,S^p,\mathbf A)$ its spherical system and $\Delta$ its set of colors. Remember also that a color $\delta\in\mathbf A$ is called \textit{projective} if $c(\delta,\sigma)\geq0$, for all $\sigma\in\Sigma$. If $\delta$ is a projective color, the set $\{\delta\}$ is clearly distinguished in $\Delta$. Let us denote by $\phi\colon X\to \,\grave{} X$ a wonderful morphism associated to $\{\delta\}$. The term projective color comes from the fact that $\phi$ is a \textit{projective fibration} (i.e.\ is smooth and its fibers are isomorphic to $\mathbb P^n$'s).
Denote by $S_\delta$ the set of $\alpha\in S$ such that $\delta\in\Delta(\alpha)$. Then $\delta$ is also called an \textit{$n$-comb}, where $n=\mathrm{card}(S_\delta)$. Here is an example with 2-comb $\delta_{\alpha_1}^+=\delta_{\alpha_3}^+$:
\[\begin{picture}(6000,2250)\put(300,900){\usebox{\dynkinf}}\multiput(0,0)(3600,0){2}{\usebox{\aone}}\multiput(300,1800)(3600,0){2}{\line(0,1){450}}\put(300,2250){\line(1,0){3600}}\put(1800,600){\usebox{\GreyCircle}}\put(5400,0){\usebox{\aone}}\put(4900,1300){\usebox{\tow}}\end{picture}\]
Wonderful morphisms given by a projective color are the simplest examples of $\mathcal L$-type minimal morphisms. All colors $\delta_\alpha$, $\alpha\in S_\delta$, appear as negative colors in $\,\grave{} X$, and the difference $d(\,\grave{} X)-d(X)$ is equal to $n-1$.
We will now explain how one can reduce, in some sense, $n$-combs to 1-combs. This will prepare a similar reduction of $\mathcal L$-type minimal morphisms of strictly increasing defect, to those of constant defect (see Section~\ref{increasing}).
Let $\delta$ be an $n$-comb. Remember that $S_\delta\subset\Sigma$. For each $\alpha\in S_\delta$, denote by $\mathcal S_{\alpha}$ the spherical system obtained from $\mathcal S$ by localization with respect to $(\Sigma\setminus S_\delta)\cup\{\alpha\}$. The corresponding variety is a wonderful $G$-subvariety of $X$ of codimension $n-1$, which we will denote by $X_{\alpha}$. The restriction of $\phi$ to $X_{\alpha}$ gives wonderful morphisms $\phi\colon X_{\alpha}\to \,\grave{} X$ which come from a 1-comb on $X_{\alpha}$, for all $\alpha\in S_\delta$. In our example above, $S_\delta=\{\alpha_1,\alpha_3\}$, and on the digram level, the (two) morphisms $\phi\colon X_\alpha\to\,\grave{} X$ are as follows:
\[\begin{picture}(16800,7800)
\put(0,6000){
\put(300,900){\usebox{\dynkinf}}\multiput(0,0)(5400,0){2}{\usebox{\aone}}\put(1800,600){\usebox{\GreyCircle}}\put(3900,900){\circle{600}}
}
\put(5400,0){
\put(300,900){\usebox{\dynkinf}}\multiput(300,900)(3600,0){2}{\circle{600}}\put(1800,600){\usebox{\GreyCircle}}\put(5400,0){\usebox{\aone}}
}
\put(10800,6000){
\put(300,900){\usebox{\dynkinf}}\multiput(3600,0)(1800,0){2}{\usebox{\aone}}\put(1800,600){\usebox{\GreyCircle}}\put(300,900){\circle{600}}\put(4900,1300){\usebox{\tow}}
}
\put(3600,5400){\vector(1,-1){3000}}
\put(13200,5400){\vector(-1,-1){3000}}
\end{picture}\]
We will now explain how one can understand $\phi\colon X\to \,\grave{} X$ if one knows the $\phi\colon X_{\alpha}\to \,\grave{} X$, $\alpha\in S_\delta$. Let us choose generic stabilizers $H_{\alpha}$ and $\,\grave{} H$ in $X_{\alpha}$ and $\,\grave{} X$. One can assume these $H_{\alpha}$ contained in $\,\grave{} H$, and that all these groups have a same Levi subgroup $\,\grave{} L$; moreover, for each $\alpha\in S_\delta$, $\mathrm{Lie}(H_{\alpha}^u)$ is a subspace of codimension 1 in $\mathrm{Lie}(\,\grave{} H^u)$, containing $\mathrm{Lie}(\,\grave{} H^u,\,\grave{} H^u)$, and stable by $\,\grave{} L$. This gives in particular characters $\tilde\alpha$ of $\,\grave{} L$ (since $\,\grave{} L$ acts on the 1-dimensional space $\mathrm{Lie}(\,\grave{} H^u)/\mathrm{Lie}(H_{\alpha}^u)$).
Then one can choose $H$ in the following form: $H=L\,H^u$, where $L$ is the subgroup of $\,\grave{} L$ where all the characters $\tilde\alpha$, $\alpha\in S_\delta$, coincide; and $\mathrm{Lie}(H^u)$ is a codimension 1 subspace of $\mathrm{Lie}(\,\grave{} H^u)$, which contains the intersection of all $\mathrm{Lie}(H_{\alpha}^u)$, but otherwise is in general position (all these $\mathrm{Lie}(H^u)$ are conjugated under $\,\grave{} L$).
On the other hand, if one introduces $H$ by means of this definition, one can check that $H$ is wonderful, and that the wonderful completion of $G/H$ has spherical system $\mathcal S$.
In other words, in order to prove the uniqueness and existence for geometric realizations of $\mathcal S$, it is necessary and sufficient to do it for the systems $\mathcal S_{\alpha}$, $\alpha\in S_\delta$.
\subsection{Strongly solvable systems}
Remember that $\mathcal S$ is called strongly solvable, if there exists a distinguished subset of colors $\,\grave{} \Delta$ such that $\mathcal S/\,\grave{} \Delta=(\emptyset,\emptyset,\emptyset)$ (which is the spherical system of the flag variety $G/B$). By definition, $\mathcal S$ is strongly solvable if and only if a generic stabilizer $H$ of $G$ in $X$ is contained in $B$. In Table~\ref{tr4o}, one can find all 38 strongly solvable rank 4 spherical systems of type $\mathsf F_4$.
If $\mathcal S$ is strongly solvable, then $\Sigma\subset S$ and $S^p=\emptyset$, but the converse is not true in general. The following conditions are equivalent:
\begin{itemf}
\item[(1)] $\mathcal S$ is strongly solvable;
\item[(2)] there exists a sequence of successive quotients by projective colors starting at $\mathcal S$ and ending at $(\emptyset,\emptyset,\emptyset)$;
\item[(3)] any sequence of successive quotients by projective colors starting at $\mathcal S$ can be prolonged until $(\emptyset,\emptyset,\emptyset)$.
\end{itemf}
It has been known for some time that every strongly solvable spherical system (for any semisimple group) is the spherical system of a wonderful variety, uniquely determined up to isomorphism, see \cite{Lu93}.
Here are two examples of strongly solvable systems of type $\mathsf F_4$ with sequences of successive quotients by projective colors:
\[\stronglysolvabletwo\]
\[\stronglysolvablethree\]
\vspace{6ex}\section{Examples coming from type $\mathsf B_3$}\label{sb3}
Remember that $G$ denotes in this chapter a simple group of type
$\mathsf F_4$. In this section, let $Q = Q_{\alpha_4}$ be the parabolic subgroup of $G$ containing $B$ and having semisimple type $\mathsf B_3$. Then ${}^-\! Q/{}^-\! Q^r = \underline G$ is a simple group of type $\mathsf B_3$. For every wonderful $\underline G$-variety $\underline X$, remember that $X = G \ast_{{}^-\! Q} \underline X$ is the wonderful $G$-variety obtained from $\underline X$ by parabolic induction by means of $Q$ (see \ref{ssparabolic}).
On the combinatorial level, this means that the spherical system $\mathcal S=(\Sigma,S^p,\mathbf A)$ of $X$ has an exterior negative color at $\alpha_4$. In the tables of Chapter~1, one can easily find all spherical systems having this property; for instance, Table~\ref{tr3b3} contains all such spherical systems of rank 3.
In this section, we will give some examples of wonderful subgroups
$\underline H$ of $\underline G$ (essentially two reductive spherical
subgroups of $\underline G$, together with those of their parabolic subgroups which are wonderful in $\underline G$), and explicit the corresponding spherical systems of type $\mathsf B_3$.
\subsection{}\label{ss611}
Consider the following figure of diagrams of type $\mathsf B_3$
\[\bthreeone\]
The reader is invited to explicit spherical roots, Cartan diagrams, \ldots\ of all the spherical systems corresponding to the diagrams in this figure, and to check that all arrows are associated to well defined minimal quotients of type $\mathcal P$.
The spherical system of the diagram on the bottom line of the figure, together with its double
\[\begin{picture}(3900,1200)
\put(0,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\GreyCircle}}
}
\end{picture}
\hspace{2cm}
\begin{picture}(3900,1200)
\put(0,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\GreyCircleTwo}}
}
\end{picture}\]
are well known spherical systems of rank 1, with spherical roots $\sigma= \alpha_1+\alpha_2+\alpha_3$ and $2\sigma$. Their corresponding wonderful subgroups $\underline K$ and $\mathrm{N}_{\underline G}(\underline K)$ are symmetric subgroups of $\underline G$, of type $\mathsf D_3 = \mathsf A_3$.
The spherical system of the diagram on the top line of the figure
\[\begin{picture}(3900,1800)
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\multiput(0,0)(1800,0){3}{\usebox{\aone}}
\multiput(1300,1300)(1800,0){2}{\usebox{\tow}}
\end{picture}\]
is strongly solvable. It follows that Borel subgroups $B(\underline K)$ of $\underline K$ are wonderful subgroups in $\underline G$, and have this system as spherical system.
This implies that all parabolic subgroups of $\underline K$ are wonderful in $\underline G$, and that their spherical systems are those of the diagrams in the figure above (there are only 6 systems and 8 parabolic subgroups of $\underline K$ containing $B(\underline K)$, because two couples of the latter set are conjugated in $\underline G$).
The normalizer of a parabolic subgroup of $\underline K$ is not always co-connected in $\mathrm{N}_{\underline G}(\underline K)$. The following figure gives the diagrams of those which are:
\[\stronebthreeonethree\]
\subsection{}\label{ss614}
\[\begin{picture}(3600,600)
\put(-300,-600){
\put(300,900){\usebox{\segm}}
\put(2100,900){\usebox{\rightbisegm}}
\put(3600,600){\usebox{\GreyCircle}}
}
\end{picture}\]
is the diagram of another well known spherical system of rank 1 of type $\mathsf B_3$, with spherical root $\sigma = \alpha_1+ 2\alpha_2+ 3\alpha_3$. The corresponding wonderful subgroup $\underline H$ of $\underline G$ is simple of type $\mathsf G_2$ (and not symmetric).
The two maximal parabolic subgroups of $\underline H$ have dimension $9 = \dim(B^u(\underline G))$. On the other hand, the following figure contains all spherical systems having as quotient (of type $\mathcal P$) the spherical system of $H$:
\[\stronebthreeonefour\]
Since we assume the spherical systems above geometrically realizable, it follows that these two parabolic subgroups of $\underline H$ are wonderful in $\underline G$.
\textit{Exercise.} These two parabolic subgroups have same dimension and same semisimple type, but are not isomorphic as groups. Which subgroup corresponds to which diagram?
\vspace{6ex}\section{Examples coming from type $\mathsf C_3$}\label{ss711}
This section is analogous to Section~\ref{sb3}: the role of $\mathsf B_3$ is now played by $\mathsf C_3$. So now $Q=Q_{\alpha_1}$ will be the parabolic subgroup of $G$ having semisimple type $\mathsf C_3$, and we will set ${}^-\! Q/{}^-\! Q^r=\underline G$, which is now a simple group of type $\mathsf C_3$. The contrast between sections \ref{sb3} and \ref{ss711} is striking: although simple groups of type $\mathsf B_3$ and type $\mathsf C_3$ are very similar (they are dual groups in the sense of Langlands), the structure of their subgroups is quite different.
We will draw the Dynkin diagram of type $\mathsf C_3$ as follows
\[\begin{picture}(3600,600)
\put(0,300){\usebox{\rightbisegm}}
\put(1800,300){\usebox{\segm}}
\end{picture}\]
keeping the numbering of simple roots $\alpha_2,\alpha_3,\alpha_4$ induced by $\mathsf F_4$.
Consider the following figure of diagrams of type $\mathsf C_3$
\[\sevenoneone\]
The reader is invited to explicit spherical roots, Cartan diagrams, \ldots\ of all the spherical systems corresponding to the diagrams in this figure, and to check that all arrows come from well defined minimal quotients of type $\mathcal P$.
The spherical system of the diagram on the bottom line has a (unique) spherical root $\sigma = \alpha_2+ 2\alpha_3+ \alpha_4$. It corresponds to a well known symmetric subgroup $\underline K$ of $\underline G$, which is connected and of semisimple type $\mathsf B_2\times\mathsf A_1$.
All parabolic subgroups of $\underline K$, with the exception of the Borel subgroups, have dimension $\geq 9 = \dim(B^u(\underline K))$. Since the figure above contains 7 spherical systems having as quotient of type $\mathcal P$ the system of $\underline K$, and since we assume all these spherical systems geometrically realizable, all parabolic subgroups of $\underline K$, except the Borel subgroups, are wonderful in $\underline G$, and their diagrams are those of the figure above.
\vspace{6ex}\section{Another remarkable example of rank 2}\label{ss1514}
Consider the figure
\[\begin{picture}(16800,600)\put(300,300){\usebox{\dynkinf}}\multiput(0,0)(3600,0){2}{\usebox{\GreyCircle}}\put(5700,300){\circle{600}}\put(6900,300){\vector(1,0){3000}}\put(10800,0){\put(300,300){\usebox{\dynkinf}}\put(0,0){\usebox{\GreyCircle}}\put(5700,300){\circle{600}}}\end{picture}\]
The first diagram comes from a spherical system $\mathcal S$ given by $\Sigma=\{\alpha_1+\alpha_2+\alpha_3,\alpha_2+2\alpha_3+\alpha_4\}$, $S^p=\{\alpha_2\}$ and $\mathbf A=\emptyset$.
The distinguished set of colors corresponding to the arrow is
$\{\delta_{\alpha_3}\}$, and this quotient is of type $\mathcal
L$. The second diagram has already been analyzed in \ref{ss611}. Let $Q = Q_{\alpha_4}$ be the parabolic subgroup of $G$, and $\underline K$ the subgroup of $\underline G = {}^-\! Q/{}^-\! Q^u$, as in Section~\ref{sb3}. Put $L = M\cap q^{-1}(\underline K)$, where $M$ is the Levi factor of $Q$ containing $T$, and where $q\colon {}^-\! Q\to \underline G$ denotes the canonical map. We know that
\[\mathrm{Lie}({}^-\! Q^u) = \mathfrak n_{-1}+ \mathfrak n_{-2}\]
where $\mathfrak n_{-1}$ and $\mathfrak n_{-2}$ are simple $M$-modules of dimension 8 and 7 (see \ref{ssstructure}). Under the action of $L$, $\mathfrak n_{-1} = \mathfrak n_{-1}'\oplus \mathfrak n_{-1}''$ splits into two $L$-submodules of dimension 4.
Let us define $H$ by $H=H^uL$, where $\mathrm{Lie}(H^u)$ is either $\mathfrak n_{-1}'+ \mathfrak n_{-2}$ or $\mathfrak n_{-1}'' +\mathfrak n_{-2}$ (these two choices are conjugated in $G$). It is not difficult to check that $H$ is a wonderful subgroup of G having $\mathcal S$ as spherical system.
There are only three spherical systems admitting $\mathcal S$ as quotient, with diagrams as follows:
\[\strtwoffouronefour\]
Let us denote them $\mathcal S_i$, $i=1,2,3$ from left to right.
The first quotient $\mathcal S_1\to\mathcal S$ is a quotient by a projective color. The other two quotients $\mathcal S_i\to\mathcal S$ ($i=2,3$) are of type $\mathcal P$. It follows that the two maximal parabolic subgroups of $H$ having semisimple type $\mathsf A_2$ and dimension $24 = \dim(B^u)$, are wonderful in $G$ and have as spherical systems the $\mathcal S_i$ ($i=2,3$) (in particular they are not conjugated in $G$).
\vspace{6ex}\section{$\mathcal L$-type minimal morphisms with constant defect}\label{constant}
In this section we gather examples of minimal wonderful morphisms $\phi\colon X\to \,\grave{} X$ of type $\mathcal L$, such that $d(\,\grave{} X) = d(X)$.
Remember that this is equivalent to the existence of generic isotropy groups $\,\grave{} H$ of $\,\grave{} X$ and $H$ of $X$, having a common Levi subgroup $L$, such that $H^u \subset \,\grave{} H^u$ and $\mathrm{Lie}(\,\grave{} H^u)/\mathrm{Lie}(H^u)$ is a simple $L$-module.
We will introduce these wonderful varieties and morphisms only by their diagrams, without going into details. The reader is of course invited to explicit the corresponding spherical roots, Cartan pairings, distinguished subset of
colors, and to verify that these morphisms are indeed minimal, of type $\mathcal L$ and have constant defect.
We have already seen several such examples, in Section~\ref{ss1521}:
\[\constantone\]
and in Section~\ref{ss1514}:
\[\constanttwo\]
In what follows, we will give similar diagrams in particular for all the spherical systems we have come across in Sections \ref{ss1521}, \ref{ss811} and \ref{ss1514} (arrows going from left to right will always correspond to minimal wonderful morphisms of type $\mathcal L$ with constant defect).
\subsection{}
Here is an example close to the spherical system of Section~\ref{ss1521} (having an $\mathcal L$-type minimal quotient which is homogeneous):
\[\constantthree\]
Notice that the wonderful variety corresponding to the diagram of rank 2 above has isotropy groups having semisimple type $\mathsf B_3$, $\mathsf G_2$ and $\mathsf B_2$.
\subsection{}
Here are examples admitting $\mathcal L$-type minimal wonderful morphisms onto wonderful varieties coming from type $\mathsf B_3$:
\[\constantfour\]
We have met already the diagrams of rank 4 above in Section~\ref{ss1514}, and that of rank 3 on the top line in Section~\ref{ss1521}.
\subsection{}
Here are examples admitting $\mathcal L$-type minimal wonderful morphisms onto wonderful varieties coming from type $\mathsf C_3$:
1)
\[\constantfive\]
The vertical arrows (corresponding to morphisms of type $\mathcal P$) have appeared already in sections \ref{ss811} and \ref{ss711}.
2)
\[\constantsix\]
Of the two diagrams of rank 4 above, one appeared already in Section~\ref{ss1521}, while the other is new in Chapter~3.
\newpage
\begin{picture}(90000,25500)\put(3000,0){\thebigone}\end{picture}
\bigskip
Here is some information on the quotients of the last figure on the previous page (notice that this figure appears also as part of the ``big'' figure above), two quotients which seem to be closely similar. Let us choose wonderful subgroups $H_i$ ($i=1,2$) associated to the two diagrams of rank 4, and $H$ a wonderful subgroup associated to their common quotient of rank 2, all three having a common Levi factor $L$. Then $\mathrm{Lie}(H^u)/\mathrm{Lie}(H_i^u)$ ($i=1,2$) are simple $L$-modules of dimension 2. What happens here is that $L$, which is of semisimple type $\mathsf A_1\times \mathsf A_1$, acts on the two $\mathrm{Lie}(H^u)/\mathrm{Lie}(H_i^u)$ ($i=1,2$) via homomorphisms $L\to \mathrm{GL}(2)$ which are trivial on different semisimple factors of $L$.
\bigskip
Let us mention also two other minimal $\mathcal L$-type quotients:
\[\constantseven\]
\[\constanteight\]
We have met the second diagram of rank 4 already in Section~\ref{ss1521}.
\newpage
\begin{picture}(0,25500)\put(-53466,0){\thebigone}\end{picture}
\subsection{}
In the ``big'' two-pages figure above we have assembled $\mathcal P$-type minimal wonderful morphisms introduced in Sections \ref{ss1521}, \ref{ss811} and \ref{ss1514}, together with some of the $\mathcal L$-type minimal wonderful morphisms seen in this section.
\vspace{6ex}\section{$\mathcal L$-type minimal morphisms with strictly\\ increasing defect}\label{increasing}
In the preceding section we have given examples of $\mathcal L$-type minimal (wonderful) morphisms, with same defect on source and target. In this section, we give examples of $\mathcal L$-type minimal morphisms where the defect increases by 1. To each of these examples, we will attach other $\mathcal L$-type minimal morphisms with constant defects, in a way somewhat similar to the one we have used in Section~\ref{projective} to reduce $n$-combs to 1-combs.
Remember that $X$ denotes a wonderful variety, that $\mathcal S=(\Sigma,S^p,\mathsf A)$ is its spherical system and $\Delta$ its set of colors.
\subsection{}
Let us start with the example of the spherical system $\mathcal S$ given by the following diagram:
\[\begin{picture}(6000,2850)
\put(300,1350){\usebox{\dynkinf}}
\multiput(0,450)(1800,0){4}{\usebox{\aone}}
\multiput(300,2250)(3600,0){2}{\line(0,1){600}}
\multiput(2100,2250)(3600,0){2}{\line(0,1){300}}
\put(300,2850){\line(1,0){3600}}
\put(2100,2550){\line(1,0){1700}}
\put(4000,2550){\line(1,0){1700}}
\multiput(300,0)(5400,0){2}{\line(0,1){450}}
\put(300,0){\line(1,0){5400}}
\put(1300,1750){\usebox{\tow}}
\put(4300,1750){\usebox{\toe}}
\end{picture}\]
and with the morphisms $\phi:X\to \,\grave{} X$ given by the distinguished subset of colors $\Delta^\ast=\{\delta_{\alpha_1}^+,\delta_{\alpha_2}^+\}$. Here $d(X)=1$ and $d(\,\grave{} X)=2$. Let us denote by $S^\ast$ the set of $\alpha\in S$ such that $\delta_{\alpha}$ is a negative exterior color of $\,\grave{} X$ (but not of $X$). In this case, we have $S^\ast=\{\alpha_2,\alpha_3\}\subset\Sigma$. For each $\alpha\in S^\ast$, denote by $\mathcal S_{\alpha}$ the spherical system obtained from $\mathcal S$ by localization with respect to $(\Sigma\setminus S^\ast)\cup\{\alpha\}$. The corresponding variety is a wonderful $G$-subvariety of $X$ of codimension $1$, which we will denote by $X_{\alpha}$. For every $\alpha\in S^\ast$, the restriction of $\phi$ to $X_{\alpha}$ gives a wonderful morphism $\phi\colon X_{\alpha}\to \,\grave{} X$ which is minimal of type $\mathcal L$ with constant defect.
\[\increasingone\]
As in Section~\ref{projective}, let us choose generic stabilizers $H_{\alpha}$ and $\,\grave{} H$ in $X_{\alpha}$ and $\,\grave{} X$. One can assume these $H_{\alpha}$ contained in $\,\grave{} H$, and that all these groups have a same Levi subgroup $\,\grave{} L$; moreover, for each $\alpha\in S^\ast$, $\mathrm{Lie}(H_{\alpha}^u)$ is here a subspace of codimension 2 in $\mathrm{Lie}(\,\grave{} H^u)$, containing $\mathrm{Lie}(\,\grave{} H^u,\,\grave{} H^u)$ and stable by $\,\grave{} L$, and the quotient $\mathrm{Lie}(\,\grave{} H^u)/\mathrm{Lie}(H_{\alpha}^u)$ is a simple $\,\grave{} L$-module. This last fact gives in particular a character $\tilde\alpha$ of $\ \,\grave{} L^r$ (which is the connected center of $\ \,\grave{} L$). What is also important here is that the two $(\,\grave{} L,\,\grave{} L)$-modules $\mathrm{Lie}(\,\grave{} H^u)/\mathrm{Lie}(H^u_\alpha)$, $\alpha\in S^\ast$, are isomorphic.
Then one can choose $H$ in the following form: $H=\,\grave{} T(\,\grave{} L,\,\grave{} L)\,H^u$, where $\,\grave{} T$ is the subgroup of $\,\grave{} L^r$ where the characters $\tilde\alpha$, $\alpha\in S^\ast$, coincide; and $\mathrm{Lie}(H^u)$ is a codimension 2 subspace of $\mathrm{Lie}(\,\grave{} H^u)$, stable by $(\,\grave{} L,\,\grave{} L)$, which contains the intersection of the two $\mathrm{Lie}(H_{\alpha}^u)$, but otherwise is in general position (all these $\mathrm{Lie}(H^u)$ are conjugated by $\,\grave{} L$).
On the other hand, if one introduces $H$ by means of this definition, one can check that $H$ is wonderful, and that the wonderful completion of $G/H$ has spherical system $\mathcal S$.
\subsection{}
Here are two other similar examples of type $\mathsf F_4$:
\[\increasingtwo\]
\[\increasingthree\]
\bigskip
$\mathcal L$-type minimal morphisms with strictly increasing defect also exist when $\Sigma\cap S=\emptyset$. To show this let us leave type $\mathsf F_4$. Consider for instance the following example of type $\mathsf B_5$:
\[\increasingfour\]
\vspace{6ex}\section{An example of a fiber product}\label{fibered}
Let $\mathcal S$ be a spherical system, $\Delta$ its set of colors, and $X$ a corresponding wonderful variety.
Let $\Delta_1$ and $\Delta_2$ be two distinguished subsets of $\Delta$. We set $\Delta_{1\,2} = \Delta_1\cup\Delta_2$, subset which is again distinguished in $\Delta$. These three distinguished subsets of $\Delta$ give a (commutative) diagram of wonderful morphisms:
\[\begin{picture}(9600,8700)
\put(4500,7800){$X$}
\put(0,3900){$X_1$}
\put(8400,3900){$X_2$}
\put(3900,0){$X_{1\,2}$}
\multiput(1800,3300)(4200,3900){2}{\vector(1,-1){1800}}
\multiput(7800,3300)(-4200,3900){2}{\vector(-1,-1){1800}}
\end{picture}\]
diagram which induces a $G$-morphism $\psi\colon X\to
X_1\times_{X_{1\,2}} X_2$. In general, this fiber product has no
reason to be a wonderful variety, and $\psi$ has no reason to be an
isomorphism. But sometimes this happens, and then we say that $\Delta_1$ and $\Delta_2$ decompose $\mathcal S$. We will not discuss here the combinatorial conditions on $\mathcal S$, $\Delta_1$, $\Delta_2$ corresponding to this notion of decomposition. We will give only a simple example of type $\mathsf F_4$.
Consider the spherical system $\mathcal S = (\Sigma,S^p,\emptyset)$, where $\Sigma = \{\alpha_1+\alpha_2,\alpha_3+\alpha_4\}$.
Here $\Delta = \{\delta_{\alpha_1},\delta_{\alpha_2},\delta_{\alpha_3},\delta_{\alpha_4}\}$. If we choose $\Delta_1=\{\delta_{\alpha_1}\}$ and $\Delta_2 = \{\delta_{\alpha_4}\}$, on the combinatorial level we get the following figure:
\[\begin{picture}(18000,9600)
\put(6000,300){
\put(300,300){\usebox{\dynkinf}}
\multiput(2100,300)(1800,0){2}{\circle{600}}
}
\put(0,4500){
\put(300,300){\usebox{\dynkinf}}
\put(3600,0){\usebox{\atwo}}
\put(2100,300){\circle{600}}
}
\put(12000,4500){
\put(300,300){\usebox{\dynkinf}}
\put(0,0){\usebox{\atwo}}
\put(3900,300){\circle{600}}
}
\put(6000,8700){
\put(300,300){\usebox{\dynkinf}}
\multiput(0,0)(3600,0){2}{\usebox{\atwo}}
}
\multiput(5100,3600)(6600,4200){2}{\vector(1,-1){1800}}
\multiput(12900,3600)(-6600,4200){2}{\vector(-1,-1){1800}}
\end{picture}\]
In this case the variety $X_{1\,2}$ is homogeneous, isomorphic to $G/{}^-\! Q$, where $Q$ is a parabolic subgroup of $G$ generated by $P_{\alpha_1}$ and $P_{\alpha_4}$, and one checks easily that
\[X\to X_1\times_{G/{}^-\! Q}X_2\]
is an isomorphism.
\vspace{6ex}\section{Spherical orbits in fundamental\\ representations}\label{ssfundamental}
In this section we will apply results of Chapter~2 to describe all spherical orbits in the projective fundamental representations of type $\mathsf F_4$.
We will use the terminology and the notation of Section~\ref{ssorbits}. Remember that $\omega_i$ (i=1,2,3,4) are the fundamental weights (of type $\mathsf F_4$).
\begin{proposition}
The faithful couples $(\mathcal S , \delta )$ such that $\omega(\delta) = \omega_i$ are:
if $i=1$
\[\begin{picture}(34800,1800)
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(300,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_1}\ \Big)$}
\put(12000,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\GreyCircleTwo}}
\put(5700,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_1}\ \Big)$}
}
\put(24000,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\atwo}}
\multiput(3600,0)(1800,0){2}{\usebox{\aprime}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_1}\ \Big)$}
}
\end{picture}\]
if $i=2$
\[\begin{picture}(34800,10800)
\put(0,9000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(2100,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(12000,9000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(300,900)(5400,0){2}{\circle{600}}
\put(1800,600){\usebox{\GreyCircleTwo}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(24000,9000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(300,900)(5400,0){2}{\circle{600}}
\multiput(300,0)(5400,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){5400}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3900,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(0,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,0){\usebox{\aprime}}
\put(1800,600){\usebox{\GreyCircleTwo}}
\put(5700,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(12000,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\multiput(3900,900)(1800,0){2}{\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(24000,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(0,600)(3600,0){2}{\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(0,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\put(3600,0){\usebox{\aprime}}
\put(5700,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\put(12000,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(300,900){\circle{600}}
\put(1800,0){\usebox{\aone}}
\multiput(3600,0)(1800,0){2}{\usebox{\aprime}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}^+\ \Big)$}
}
\put(24000,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(0,0)(1800,0){4}{\usebox{\aone}}
\multiput(300,1800)(5400,0){2}{\line(0,1){450}}
\put(300,2250){\line(1,0){5400}}
\multiput(300,0)(3600,0){2}{\line(0,-1){450}}
\put(300,-450){\line(1,0){3600}}
\multiput(3100,1300)(1800,0){2}{\usebox{\tow}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}^+\ \Big)$}
}
\put(0,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\multiput(3600,0)(1800,0){2}{\usebox{\aone}}
\multiput(3100,1300)(1800,0){2}{\usebox{\tow}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
}
\end{picture}\]
if $i=3$
\[\begin{picture}(34800,7800)
\put(0,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(3900,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}\ \Big)$}
}
\put(12000,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(5700,900){\circle{600}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}\ \Big)$}
}
\put(24000,6000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(300,900){\circle{600}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}\ \Big)$}
}
\put(0,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(5400,0){\usebox{\aone}}
\put(4900,1300){\usebox{\tow}}
\put(3600,600){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}\ \Big)$}
}
\put(12000,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(5700,900){\circle{600}}
\multiput(0,600)(3600,0){2}{\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}\ \Big)$}
}
\put(24000,3000){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(300,900){\circle{600}}
\multiput(2100,900)(3600,0){2}{\circle{600}}
\multiput(2100,600)(3600,0){2}{\line(0,-1){1050}}
\put(2100,-450){\line(1,0){3600}}
\put(3600,0){\usebox{\aone}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}^+\ \Big)$}
}
\put(0,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\multiput(0,0)(1800,0){4}{\usebox{\aone}}
\multiput(300,1800)(5400,0){2}{\line(0,1){450}}
\put(2100,1800){\line(0,1){450}}
\put(300,2250){\line(1,0){5400}}
\multiput(300,0)(3600,0){2}{\line(0,-1){450}}
\put(300,-450){\line(1,0){3600}}
\put(2500,1300){\usebox{\toe}}
\put(4900,1300){\usebox{\tow}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}^+\ \Big)$}
}
\put(12000,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(0,600){\usebox{\atwo}}
\put(1800,600){\usebox{\GreyCircle}}
\multiput(3600,0)(1800,0){2}{\usebox{\aone}}
\put(4900,1300){\usebox{\tow}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_3}^+\ \Big)$}
}
\end{picture}\]
if $i=4$
\[\begin{picture}(34800,1800)
\put(0,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(5700,900){\circle{600}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_4}\ \Big)$}
}
\put(12000,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(5400,600){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_4}\ \Big)$}
}
\put(24000,0){
\put(0,600){$\Big($}
\put(1200,0){
\put(300,900){\usebox{\dynkinf}}
\put(3600,600){\usebox{\GreyCircle}}
\put(5400,0){\usebox{\aone}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_4}^+\ \Big)$}
}
\end{picture}\]
\end{proposition}
All spherical systems mentioned in this proposition have trivial $\Gamma$.
\begin{sketche}
From the combinatorial definition of the map $\omega\colon\mathbb N\Delta\to\mathbb N\Omega$ given in \ref{ssorbits} follows that $\omega(\delta) = \omega_i$ implies either $\delta = \delta_{\alpha_i}$ or $\delta = \delta_{\alpha_i}^+$. A careful combinatorial analysis then gives the lists above.
\end{sketche}
\bigskip
\textit{Remarks}
1) The representation $V(\omega_1)$ is the adjoint representation. It is well known that there are exactly 3 spherical adjoint orbits, all of them nilpotent (see \cite{Pa03} and \cite{CCC05}).
2) Let us give explicitly the spherical orbit in $\mathbb P(V(\omega_2))$ associated to the faithful couple
\[\begin{picture}(10800,1800)
\put(0,600){$\Big(\ $}
\put(1200,-450){
\put(300,1350){\usebox{\dynkinf}}
\multiput(300,1350)(5400,0){2}{\circle{600}}
\put(3900,1350){\circle{600}}
\multiput(300,450)(5400,0){2}{\line(0,1){600}}
\put(300,450){\line(1,0){5400}}
\put(1800,1050){\usebox{\GreyCircle}}
}
\put(7200,600){$\ ,\ \delta_{\alpha_2}\ \Big)$}
\end{picture}\]
Consider the vector
\[v = X_{-(\alpha_1+2\alpha_2+3\alpha_3+\alpha_4)} \wedge X_{-(\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4)}\in\bigwedge^2 V(\omega_1)\]
(where for every root $\beta$, $X_\beta$ denotes an associated root vector). If $Q$ is a maximal parabolic subgroup of $G$ associated to $\alpha_3$ and containing $B$, it is not difficult to check that $H = (^-Q^u, ^-Q^u)\,M$ stabilizes the line $\mathbb Cv$. We know that this $H$ is a generic isotropy group of the wonderful variety having the above diagram (see Section~\ref{ss1521}). Since \[\bigwedge^2 V(\omega_1) \cong V(\omega_2) \oplus V(\omega_1),\]
and since $(\alpha_1+2\alpha_2+3\alpha_3+\alpha_4)+(\alpha_1+2\alpha_2+3\alpha_3+2\alpha_4)$ is not a root of $G$, necessarily $v\in V(\omega_2)$. It follows that the spherical orbit in $\mathbb P(V(\omega_2))$ given by $G.[v]$ is the one associated to the above faithful couple.
\vspace{6ex}\section{Examples not of type $\mathsf F_4$}\label{notf4}
In this section, we give several examples of general type to illustrate phenomena which do not occur or cannot be well illustrated in type $\mathsf F_4$.
\subsection{}
There are not so many examples of minimal wonderful $\mathcal R$-type morphisms
when $G$ is of type $\mathsf F_4$. In what follows, we give more examples for other types:
\begin{itemf}
\item[1)] $G$ of type $\mathsf B_{2m}$
\[\begin{picture}(27600,1200)\put(0,0){\begin{picture}(11400,1350)\put(0,600){\multiput(0,0)(1800,0){2}{\usebox{\atwo}}\put(7200,0){\usebox{\atwo}}\put(3900,300){\usebox{\susp}}\multiput(3900,300)(25,25){13}{\circle*{70}}\multiput(4200,600)(2400,0){2}{\multiput(0,0)(300,0){2}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){2}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(7500,300)(-25,25){13}{\circle*{70}}}\put(9000,600){\usebox{\btwo}}\put(10800,0){\usebox{\aprime}}\end{picture}}
\put(12300,900){\vector(1,0){3000}}
\put(16200,0){
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\put(3900,900){\usebox{\susp}}
\put(7500,900){\usebox{\segm}}
\put(9300,900){\usebox{\rightbisegm}}
\put(0,600){\usebox{\GreyCircle}}
}
\end{picture}
\]
\item[2)] $G$ of type $\mathsf D_4$
\[\begin{picture}(12000,3000)
\put(0,0){\begin{picture}(3600,3000)\put(300,1500){\usebox{\segm}}\put(2100,300){\usebox{\bifurc}}\put(300,1500){\circle{600}}\multiput(3300,300)(0,2400){2}{\circle{600}}\multiput(300,1500)(25,25){13}{\circle*{70}}\multiput(600,1800)(300,0){5}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(750,1650)(300,0){4}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(300,1500)(25,-25){13}{\circle*{70}}\multiput(600,1200)(300,0){5}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(750,1350)(300,0){4}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\thicklines\put(3300,2700){\line(-1,0){400}}\multiput(2900,2700)(-200,-200){5}{\line(0,-1){200}}\multiput(2900,2500)(-200,-200){4}{\line(-1,0){200}}\multiput(2100,1700)(-30,-10){5}{\line(-1,0){30}}\put(3300,300){\line(-1,0){400}}\multiput(2900,300)(-200,200){5}{\line(0,1){200}}\multiput(2900,500)(-200,200){4}{\line(-1,0){200}}\multiput(2100,1300)(-30,10){5}{\line(-1,0){30}}\multiput(3300,300)(0,2000){2}{\line(0,1){400}}\multiput(3300,700)(-200,200){4}{\line(-1,0){200}}\multiput(3100,700)(-200,200){4}{\line(0,1){200}}\multiput(3300,2300)(-200,-200){4}{\line(-1,0){200}}\multiput(3100,2300)(-200,-200){4}{\line(0,-1){200}}\end{picture}}
\put(4500,1500){\vector(1,0){3000}}
\put(8700,1500){\usebox{\segm}}
\put(10500,300){\usebox{\bifurc}}
\put(8400,1200){\usebox{\GreyCircle}}
\end{picture}\]
\item[3)] $G$ of type $\mathsf C_l\times\mathsf C_m\times\mathsf C_n
\[\left.
\begin{picture}(11700,3900)(0,3300)
\multiput(600,0)(0,2700){3}{
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\put(3900,900){\usebox{\susp}}
\put(7500,900){\usebox{\segm}}
\put(9300,900){\usebox{\leftbisegm}}
\put(1800,600){\usebox{\GreyCircle}}
\put(0,0){\usebox{\aone}}
}
\multiput(0,300)(0,6600){2}{\line(1,0){600}}
\put(0,300){\line(0,1){6600}}
\multiput(0,0)(0,2700){2}{
\multiput(300,1500)(0,1500){2}{\line(1,0){300}}
\put(300,1500){\line(0,1){1500}}
}
\end{picture}
\quad\right\}
\begin{picture}(4800,3900)(0,3300)
\put(900,3600){\vector(1,0){3000}}
\end{picture}
\left\{\quad
\begin{picture}(11700,3900)(0,3300)
\multiput(600,0)(0,2700){3}{
\multiput(300,900)(1800,0){2}{\usebox{\segm}}
\put(3900,900){\usebox{\susp}}
\put(7500,900){\usebox{\segm}}
\put(9300,900){\usebox{\leftbisegm}}
\put(1800,600){\usebox{\GreyCircle}}
}
\multiput(900,900)(0,2700){2}{\circle{600}}
\multiput(0,900)(0,2700){2}{\line(1,0){600}}
\put(0,900){\line(0,1){2700}}
\end{picture}
\right.\]
\end{itemf}
All the examples above have defect 0, so correspond to (very) reductive wonderful subgroups, which are well known (see \cite{B87}). The following examples are related to wonderful model varieties (see \cite{Lu07}), they have defect 1.
\begin{itemf}
\item[4)] $G$ of type $\mathsf A_{2m}$
\[\begin{picture}(27600,600)\put(0,0){\begin{picture}(11400,600)\multiput(0,0)(7200,0){2}{\multiput(0,0)(1800,0){2}{\usebox{\atwo}}}\put(3900,300){\usebox{\susp}}\multiput(3900,300)(25,25){13}{\circle*{70}}\multiput(4200,600)(2400,0){2}{\multiput(0,0)(300,0){2}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){2}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(7500,300)(-25,25){13}{\circle*{70}}\end{picture}}
\put(12300,300){\vector(1,0){3000}}
\put(16200,0){
\multiput(0,0)(7200,0){2}{\multiput(300,300)(1800,0){2}{\usebox{\segm}}}
\put(3900,300){\usebox{\susp}}
\multiput(300,300)(10800,0){2}{\circle{600}}
\multiput(300,300)(25,25){13}{\circle*{70}}
\multiput(600,600)(5700,0){2}{\multiput(0,0)(300,0){15}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){15}{\multiput(0,0)(25,25){7}{\circle*{70}}}}
\multiput(5400,600)(300,0){3}{\circle*{70}}
\multiput(11100,300)(-25,25){13}{\circle*{70}}
}
\end{picture}
\]
\item[5)] $G$ of type $\mathsf B_{2m}$
\[\begin{picture}(27600,600)\put(0,0){\begin{picture}(11400,750)\multiput(0,0)(1800,0){2}{\usebox{\atwo}}\put(7200,0){\usebox{\atwo}}\put(9000,0){\usebox{\bsecondtwo}}\put(3900,300){\usebox{\susp}}\multiput(3900,300)(25,25){13}{\circle*{70}}\multiput(4200,600)(2400,0){2}{\multiput(0,0)(300,0){2}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){2}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(7500,300)(-25,25){13}{\circle*{70}}\end{picture}}
\put(12300,300){\vector(1,0){3000}}
\put(16200,0){
\multiput(300,300)(1800,0){2}{\usebox{\segm}}
\put(3900,300){\usebox{\susp}}
\put(7500,300){\usebox{\segm}}
\put(9300,300){\usebox{\rightbisegm}}
\put(0,0){\usebox{\GreyCircle}}
\put(11100,300){\circle{600}}
}
\end{picture}\]
\item[6)] $G$ of type $\mathsf D_{2m+1}$
\[\begin{picture}(26400,3000)\put(0,0){\begin{picture}(10800,3000)\multiput(0,1200)(1800,0){2}{\usebox{\atwo}}\put(7200,1200){\usebox{\atwo}}\put(3900,1500){\usebox{\susp}}\put(9300,300){\usebox{\bifurc}}\multiput(10500,300)(0,2400){2}{\circle{600}}\multiput(3900,1500)(25,25){13}{\circle*{70}}\multiput(4200,1800)(2400,0){2}{\multiput(0,0)(300,0){2}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){2}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(7500,1500)(-25,25){13}{\circle*{70}}\thicklines\put(9300,1500){\line(0,1){400}}\multiput(9300,1900)(200,200){4}{\line(1,0){200}}\multiput(9500,1900)(200,200){4}{\line(0,1){200}}\put(10500,2700){\line(-1,0){400}}\put(9300,1500){\line(1,0){400}}\multiput(9700,1500)(200,-200){4}{\line(0,-1){200}}\multiput(9700,1300)(200,-200){4}{\line(1,0){200}}\put(10500,300){\line(0,1){400}}\end{picture}}
\put(11700,1500){\vector(1,0){3000}}
\put(15600,0){
\multiput(300,1500)(1800,0){2}{\usebox{\segm}}
\put(3900,1500){\usebox{\susp}}
\put(7500,1500){\usebox{\segm}}
\put(9300,300){\usebox{\bifurc}}
\put(300,1500){\circle{600}}
\multiput(10500,300)(0,2400){2}{\circle{600}}
\multiput(300,1500)(25,25){13}{\circle*{70}}
\put(600,1800){\multiput(0,0)(300,0){15}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){15}{\multiput(0,0)(25,25){7}{\circle*{70}}}}
\multiput(5400,1800)(300,0){3}{\circle*{70}}
\put(6300,1800){\multiput(0,0)(300,0){10}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){9}{\multiput(0,0)(25,25){7}{\circle*{70}}}}
\multiput(300,1500)(25,-25){13}{\circle*{70}}
\put(600,1200){\multiput(0,0)(300,0){15}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(150,150)(300,0){15}{\multiput(0,0)(25,-25){7}{\circle*{70}}}}
\multiput(5400,1200)(300,0){3}{\circle*{70}}
\put(6300,1200){\multiput(0,0)(300,0){10}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(150,150)(300,0){9}{\multiput(0,0)(25,-25){7}{\circle*{70}}}}
\thicklines
\put(3600,0){\put(6900,2700){\line(-1,0){400}}\multiput(6500,2700)(-200,-200){5}{\line(0,-1){200}}\multiput(6500,2500)(-200,-200){4}{\line(-1,0){200}}\multiput(5700,1700)(-30,-10){5}{\line(-1,0){30}}\put(6900,300){\line(-1,0){400}}\multiput(6500,300)(-200,200){5}{\line(0,1){200}}\multiput(6500,500)(-200,200){4}{\line(-1,0){200}}\multiput(5700,1300)(-30,10){5}{\line(-1,0){30}}
}
}
\end{picture}\]
\end{itemf}
In the following example, the three quotients are of type $\mathcal L$, $\mathcal R$ and $\mathcal P$, respectively. Notice that a new interior negative color appears in the quotient of type $\mathcal R$.
\begin{itemf}
\item[7)] $G$ of type $\mathsf A_1\times\mathsf B_2$
\[\begin{picture}(16200,9000)
\put(0,3150){
\put(0,450){\usebox{\aone}}
\put(3000,1350){\usebox{\rightbisegm}}
\multiput(2700,450)(1800,0){2}{\usebox{\aone}}
\multiput(300,2700)(4500,0){2}{\line(0,-1){450}}
\put(300,2700){\line(1,0){4500}}
\multiput(300,0)(2700,0){2}{\line(0,1){450}}
\put(300,0){\line(1,0){2700}}
\put(4000,1750){\usebox{\tow}}
}
\put(11100,7800){
\put(300,900){\circle*{150}}
\put(3000,900){\usebox{\rightbisegm}}
\multiput(300,900)(2700,0){2}{\circle{600}}
\put(4800,900){\circle{600}}
\multiput(300,0)(2700,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){2700}}
}
\put(11100,4200){
\put(300,300){\circle*{150}}
\put(3000,300){\usebox{\rightbisegm}}
\put(2700,0){\usebox{\GreyCircle}}
\put(4800,300){\circle{600}}
}
\put(11100,0){
\put(300,900){\circle*{150}}
\put(3000,900){\usebox{\rightbisegm}}
\multiput(300,900)(4500,0){2}{\circle{600}}
\put(2700,600){\usebox{\GreyCircle}}
\multiput(300,0)(4500,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){4500}}
}
\put(6450,5850){\vector(2,1){3300}}
\put(6600,4500){\vector(1,0){3000}}
\put(6450,3150){
\multiput(0,0)(600,-300){5}{\multiput(0,0)(30,-15){10}{\line(1,0){30}}}
\put(3000,-1500){\vector(2,-1){300}}
}
\end{picture}\]
\end{itemf}
\subsection{}
When $G$ is of type $\mathsf F_4$ (or $\mathsf B_3$, or $\mathsf C_3$), and $K$ is a (connected) spherically closed subgroup of $G$, we have seen (in Sections \ref{ss1521}, \ref{ss811}, \ref{sb3}, \ref{ss711} and \ref{ss1514}) many examples where all parabolic subgroups $H$, of dimension $\geq\dim(B^u)$, are spherical in $G$.
In what follows, we give examples to show that this is not true in general. The argument will always be the same: if $H$ were spherical in $G$, $H$ would be spherically closed and hence wonderful in $G$ (see Corollary~\ref{argument} in \ref{sphericalclosure2}); but in each case, one can check that on the combinatorial level there is simply no spherical system (of right properties to come from $H$) having as quotient the spherical system corresponding to $K$.
\begin{itemf}
\item[1)]
If $G$ is of type $\mathsf D_4$, and $K$ ($\cong \mathrm{GL}(4)$) is the spherically closed subgroup of $G$ having diagram
\[\begin{picture}(3300,2400)
\put(300,1200){\usebox{\segm}}
\put(2100,0){\usebox{\bifurc}}
\put(0,300){\usebox{\aone}}
\put(700,1600){\usebox{\toe}}
\put(1800,900){\usebox{\GreyCircle}}
\end{picture}\]
then the parabolic subgroup $H$ of $K$ having semisimple type $\mathsf A_1 \times \mathsf A_1$ and dimension $12 = \dim(B^u)$, is not spherical in $G$. On the other hand, the two parabolic subgroups of $K$ having semisimple type $\mathsf A_2$ (and dimension 13), which are conjugated in $G$, are spherical and wonderful in $G$, and the corresponding diagram is
\[\begin{picture}(3600,3000)
\put(300,1500){\usebox{\segm}}
\put(2100,300){\usebox{\bifurc}}
\put(0,600){\usebox{\aone}}
\multiput(700,1900)(0,-1200){2}{\usebox{\toe}}
\put(2100,1500){\circle{600}}
\multiput(3300,300)(0,2400){2}{\circle{600}}
\thicklines\put(-7200,0){\put(9300,1500){\line(0,1){400}}\multiput(9300,1900)(200,200){4}{\line(1,0){200}}\multiput(9500,1900)(200,200){4}{\line(0,1){200}}\put(10500,2700){\line(-1,0){400}}\put(9300,1500){\line(1,0){400}}\multiput(9700,1500)(200,-200){4}{\line(0,-1){200}}\multiput(9700,1300)(200,-200){4}{\line(1,0){200}}\put(10500,300){\line(0,1){400}}}
\end{picture}\]
\item[2)]
If $G$ is of type $\mathsf A_5$ ($\cong\mathrm{SL}(6)$), and $K$ ($\cong (\mathrm{GL}(2)\!\times\!\mathrm{GL}(4))\cap\mathrm{SL}(6)$) is the spherically closed subgroups of $G$ having diagram
\[\begin{picture}(7800,1200)
\multiput(300,900)(1800,0){4}{\usebox{\segm}}
\multiput(300,900)(1800,0){2}{\multiput(0,0)(5400,0){2}{\circle{600}}}
\multiput(300,0)(7200,0){2}{\line(0,1){600}}
\put(300,0){\line(1,0){7200}}
\put(1800,600){\multiput(300,300)(25,25){13}{\circle*{70}}\put(600,600){\multiput(0,0)(300,0){10}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){10}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(3900,300)(-25,25){13}{\circle*{70}}}
\end{picture}\]
then the two parabolic subgroups of $K$ having semisimple type respectively $\mathsf A_2$ and $\mathsf A_1\times\mathsf A_1 \times \mathsf A_1$ and dimension $15 = \dim(B^u)$ are not spherical in $G$. On the other hand, the three (pairwise not $G$-conjugated) parabolic subgroups of $K$ having semisimple type $\mathsf A_3$ or $\mathsf A_1\times\mathsf A_2$ (and dimension greater than 15) are spherical and wonderful in $G$, and the corresponding diagrams are
\[\begin{picture}(29400,2700)
\multiput(300,1350)(1800,0){4}{\usebox{\segm}}
\multiput(0,450)(7200,0){2}{\usebox{\aone}}
\multiput(2100,1350)(3600,0){2}{\circle{600}}
\multiput(300,2700)(7200,0){2}{\line(0,-1){450}}
\put(300,2700){\line(1,0){7200}}
\put(700,1750){\usebox{\toe}}
\put(6700,1750){\usebox{\tow}}
\put(1800,1050){\multiput(300,300)(25,25){13}{\circle*{70}}\put(600,600){\multiput(0,0)(300,0){10}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(150,-150)(300,0){10}{\multiput(0,0)(25,25){7}{\circle*{70}}}}\multiput(3900,300)(-25,25){13}{\circle*{70}}}
\put(10800,0){
\multiput(300,1350)(1800,0){4}{\usebox{\segm}}
\multiput(0,450)(7200,0){2}{\usebox{\aone}}
\put(1800,450){\usebox{\aone}}
\put(3600,1050){\usebox{\atwo}}
\multiput(300,2700)(7200,0){2}{\line(0,-1){450}}
\put(300,2700){\line(1,0){7200}}
\multiput(2100,0)(5400,0){2}{\line(0,1){450}}
\put(2100,0){\line(1,0){5400}}
\put(700,1750){\usebox{\toe}}
}
\put(21600,0){
\multiput(300,1350)(1800,0){4}{\usebox{\segm}}
\multiput(0,450)(7200,0){2}{\usebox{\aone}}
\put(5400,450){\usebox{\aone}}
\put(1800,1050){\usebox{\atwo}}
\multiput(300,2700)(7200,0){2}{\line(0,-1){450}}
\put(300,2700){\line(1,0){7200}}
\multiput(300,0)(5400,0){2}{\line(0,1){450}}
\put(300,0){\line(1,0){5400}}
\put(6700,1750){\usebox{\tow}}
}
\end{picture}\]
\item[3)]
If $G$ is of type $\mathsf E_6$, and $K$ (of type $\mathsf F_4$) is the spherically closed subgroup of $G$ having diagram
\[\begin{picture}(7800,2100)\multiput(300,1800)(1800,0){4}{\usebox{\segm}}\put(3900,0){\usebox{\vsegm}}\multiput(0,1500)(7200,0){2}{\usebox{\GreyCircle}}\end{picture}\]
then the parabolic subgroup $H$ of $K$ having semisimple type $\mathsf B_3$ and dimension $37 = \dim(B^u)+1$, is not spherical in $G$. On the other hand, the parabolic subgroup of $K$ having semisimple type $\mathsf C_3$ (and \textit{same} dimension 37) is spherical and wonderful in $G$, and the corresponding diagram is
\[\begin{picture}(7800,2400)\multiput(300,2100)(1800,0){4}{\usebox{\segm}}\put(3900,300){\usebox{\vsegm}}\multiput(0,1800)(1800,0){4}{\multiput(300,300)(1800,0){2}{\circle{600}}\multiput(300,300)(25,25){13}{\circle*{70}}\multiput(600,600)(300,0){4}{\multiput(0,0)(25,-25){7}{\circle*{70}}}\multiput(750,450)(300,0){4}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(2100,300)(-25,25){13}{\circle*{70}}}\put(3900,300){\circle{600}}\put(3600,0){\multiput(300,300)(25,25){13}{\circle*{70}}\multiput(600,600)(0,300){4}{\multiput(0,0)(-25,25){7}{\circle*{70}}}\multiput(450,750)(0,300){4}{\multiput(0,0)(25,25){7}{\circle*{70}}}\multiput(300,2100)(25,-25){13}{\circle*{70}}}\end{picture}\]
\end{itemf}
|
1,314,259,994,982 | arxiv | \section{Introduction}
In hierarchical galaxy evolution scenarios it is predicted that the gas flows associated
with the galaxy mergers that build massive galaxies will trigger both starbursts and
AGN activity \citep{kauffmann00,dimatteo05}. As the mergers proceed, the outflows driven
by the AGN eventually
become powerful enough to limit both the star formation in the host galaxies and any
further growth of the super-massive black holes \citep{dimatteo05}. In this context, there
is clearly an
interest in studying the co-evolution of AGN and their host galaxies. However, from an
observational perspective, separating those features of galaxies that are associated with
AGN, from those that are associated with star formation activity, has often proved
problematic. For example, deep surveys at sub-mm wavelengths have been successful at
detecting the redshifted far-IR emission from cool dust components in high redshift
galaxies that show evidence for AGN activity at X-ray \citep{alexander05},
optical \citep{priddey03} and radio \citep{archibald01,willott02}
wavelengths, but the interpretation of these results in terms of star formation activity
remains controversial because of uncertainties surrounding the heating mechanism for
the cool dust (e.g. Willott et al. 2002).
Although it is generally accepted that the warm dust emitting the mid-IR (3 --
30$\mu$m) continuum is situated relatively close to the AGN and heated by direct AGN
illumination \citep{pier92,vanbemmel03,rowan95}, the heating mechanism for the cooler, far-IR
(30 -- 150$\mu$m) emitting dust
is less certain because the distribution of the cool dust is unknown. There is plenty of
observational evidence that starbursts can produce prodigious far-IR and sub-mm
radiation. However, it is also possible to model the far-IR spectral energy distributions
(SEDs) solely in terms of AGN heating, provided that sufficient AGN energy is allowed to
escape to relatively large radii in order to heat a significant mass of dust to the requisite
cool temperatures \citep{nenkova02,vanbemmel03}.
A promising alternative to SED modelling is to use a statistical approach:
correlating the MFIR continuum properties with information about the level of both
AGN and starburst activity derived from observations at other wavelengths. However,
this approach has been hampered in the past by the low sensitivity of the available far-
IR satellites, and the well-known biases that can occur in luminosity-luminosity plots of
incomplete flux-limited samples. For example, the {\it Infrared Astronomical Satellite} (IRAS)
detected fewer
than 30\% of powerful 3C radio galaxies at MFIR wavelengths \citep{impey93,heckman94},
and the detection rate did not improve substantially in observations made by the
{\it Infrared Space Observatory} (ISO).
Therefore, while
some previous studies hinted at correlations between AGN and MFIR activity
\citep{impey93,heckman94,hes95,haas03},
none were definitive because of the incompleteness of the detections at far-IR
wavelengths. Moreover, based on IRAS results,
it was noted that some of the radio-loud AGN with the most luminous far-IR
emission are associated with prodigious recent star formation activity detected at
optical wavelengths, thus supporting the alternative starburst heating
mechanism \citep{hes95,tad02,wills02,wills04}.
The launch of the {\it Spitzer Space Telescope} \citep{werner04}, with its orders of magnitude
improved sensitivity at MFIR wavelengths compared with previous satellites, has
substantially enhanced our ability to make statistical studies of complete samples of
distant AGN. In this paper we report results from a deep survey with the {\it Spitzer} MIPS
instrument \citep{rieke04} of a complete sample of intermediate redshift radio galaxies.
These results have a direct bearing on our understanding of the dominant heating mechanism(s)
for the warm/cool dust in AGN.
\section{Sample Selection and Observations}
Our sample comprises all radio galaxies and steep-spectrum radio quasars
with intermediate redshifts ($0.05 < z < 0.7$)
from the sample of southern 2Jy radio sources ($S_{2.7GHz} > 2.0$~Jy) described
in \cite{tad93}, with the addition of PKS0345+07 which has since proved to
fulfill the same selection criteria \citep{diserego04}. This 2Jy sample (47 objects
in total) is unique
in the sense that deep optical spectra exist for all the sample objects which
can be used to derive accurate
emission line luminosities \citep{tad93,tad98},
and search for signs of optical starburst activity \citep{tad02,wills04}.
For the majority of objects in the sample (42) we made deep {\it Spitzer} observations with the
MIPS instrument at 24 and 70$\mu$m as part of a programme dedicated to understanding the dust
heating mechanism, with typical exposure times of 92 -- 180s at 24$\mu$m and
231 -- 545s at 70$\mu$m (depending on the brighteness). For 4
further objects we used MIPS observations already present in the {\it Spitzer} archive, and
for the remaining object --- PKS1549-79 --- we used 25 and 60$\mu$m flux measurements
obtained by IRAS. The {\it Spitzer} data were reduced using the MOPEX software package, with additional
median filtering performed using contributed software. Flux measurements were made using
the aperture photometry option in the Starlink Gaia package, with typical aperture sizes
of 12 -- 30 arcseconds and 25 -- 50 arcseconds at 24$\mu$m and 70$\mu$m respectively.
In all cases appropriate corrections for aperture losses were made
using empirically-determined curves of growth determined from measurements of the
brighter sources
in our sample. Our
Spitzer observations detect 100\% of the sample at 24$\mu$m and 89\% of the sample at
70$\mu$m. Typical flux uncertainties range from $\sim$30\% in the case of the
faintest sources in our sample, to $\sim$10 -- 20\% for the brightest.
A more detailed presentation of the data and results will be
made in a forthcoming paper (Dicken et al., in preparation).
The continuum fluxes were converted to luminosities using $H_0 = 71$~km
s$^{-1}$ Mpc$^{-1}$, $\Omega_{m}=0.27$ and $\Omega_{\lambda}=0.73$,
along with spectral indices derived from the measured
F(70)/F(24) flux ratios.
\section{Results}
Previous studies have correlated the MFIR properties with the radio luminosities,
which are related to the mechanical powers of the relativistic jet components
(e.g. Hes et al. 1995; Shi et al. 2005). In
contrast, we prefer to investigate correlations with the [OIII]$\lambda$5007 emission line
luminosities ($L_{[OIII]}$), which provide a more direct indication of the intrinsic radiative powers of
the illuminating AGN \citep{rawlings91,tad98,simpson98}. The main results are shown in Figures 1 and 2, which
demonstrate that strong correlations exist between $L_{[OIII]}$ and both the mid-IR
(24$\mu$m) and far-IR (70$\mu$m) monochromatic luminosities over four orders of magnitude
in optical emission line luminosity. Restricting our analysis to redshifts $z>0.06$, in order
to avoid most of the low luminosity objects in our sample with upper limits on their [OIII]
luminosities, a Spearman rank correlation analysis shows that both correlations are
highly significant (see Table 1). By fitting straight lines
to the correlations in log-log space we find that their power-law slopes are consistent
within the uncertainties: $L_{24\mu m} \propto L_{[OIII]}^{0.81\pm0.07}$ and
$L_{70\mu m} \propto L_{[OIII]}^{0.94\pm0.1}$ for the full $z>0.06$ sample
($n=39$); and $L_{24\mu m} \propto L_{[OIII]}^{0.74\pm0.05}$ and
$L_{70\mu m} \propto L_{[OIII]}^{0.81\pm0.08}$ if we exclude the objects with
evidence for optical starburst activity ($n=32$; see below). The uncertainties in the slopes
for the correlations have been estimated using a
bootstrap re-sampling technique\footnote{We used 500 cycles in the bootstrap. In the case of the
70$\mu$m correlation we handled the four objects with 70$\mu$m upper limits as follows: for each
cycle we generated a 70$\mu$m luminosity for each of the upper limits
by multiplying the measured 24$\mu$m luminosity
by a value for the 70$\mu$m/24$\mu$m ratio drawn at random from the distribution of
such ratios measured for the sample as a whole}. One object in the $z>0.06$ sample used for
the correlation analysis
has only an upper limit on its [OIII] luminosity. For this object we used the upper limit,
rather than a measured luminosity, in the correlation analysis.
Despite the similarities between the two correlations shown in Figures 1 and 2, the
70$\mu$m correlation shows a larger scatter (see the final column in Table 1). Part of the reason for this larger scatter
becomes clear when the evidence for optical starburst activity is considered. Careful
spectral synthesis modelling of high quality optical spectra for the 2Jy sample, taking full account of
AGN-related continuum components (see Tadhunter et al. 2002, 2005 for details), has allowed us to identify the objects that show
strong evidence for recent starburst activity in their early-type host galaxies (highlighted
in Figures 1 and 2 as filled stars). It is clear that these objects --- comprising $\sim$20\%
of the full sample --- tend to fall above the main correlation in the $L_{[OIII]}$ vs.
$L_{70\mu m}$ plot,
but lie closer to the main correlation in the $L_{[OIII]}$ vs.$L_{24\mu m}$ plot;
the optical starburst
objects have their 70$\mu$m luminosities enhanced by up to an order of magnitude with respect to
those without clear signs of star formation activity. We can quantify this difference in
terms of the vertical displacements of the points relative to the regression line in Figure
2. Using a Kolmogorov-Smirnoff two sample test to compare the distributions of
vertical displacements, we find that we can reject the null hypothesis that the starburst
and non-starburst sub-samples are drawn from the same parent distribution at the
P=0.005 level of significance ($n_1=39$, $n_2=8$, one-tailed test). This result is further
reinforced if we consider the supplementary sample of all the radio-loud AGN from
outside our sample known to show signs of optical star formation activity (open
stars in Figures 1 and 2). Note that the presence of significant starburst heating in a
subset of our sample is consistent with recent results obtained for radio-quiet quasars
based on mid-IR detection of PAH features \citep{schweizer06} and radio continuum data \citep{barthel06}.
As an alternative to optical continuum properties, the MFIR colors may also be
used to investigate whether star formation --- in this case heavily obscured star formation
--- is important in the target galaxies. Figure 3 shows the distribution of F(70)/F(24)
colors for the full 2Jy sample, as well as the starburst and non-starburst sub-samples. It
is striking that many of the objects with optical star formation activity have relatively
``cool'' colors ($F(70)/F(24)>3.5$) consistent with those of starburst galaxies in general.
On the other hand, most of the objects without clearly identified optical star formation
activity have warmer colors ($F(70)/F(24)<3.5$); using a two sample Kolmogorov-Smirnoff
test we find that this difference is significant at the P=0.005 level ($n_1=39$,
$n_2=8$, one-tailed test). This reinforces the view that the reason for the large scatter in the
70$\mu$m correlation is a group of objects that have enhanced 70$\mu$m luminosities due to a
contribution from starburst heating.
\section{Discussion and conclusions}
Given the similarities between Figures 1 and 2, as well as the measured slopes of the
correlations, it is likely that the dominant heating mechanism for the dust emitting at
both 24$\mu$m and 70$\mu$m is AGN illumination, with starburst heating contributing
significantly at 70$\mu$m only in the minority of objects with independent evidence for
recent star formation activity. However, there is also evidence for a loose correlation
between starburst and AGN activity, in the sense that the most luminous starbursts
($L_{70\mu m} > 10^{25}$~W Hz$^{-1}$) are only found in the objects with the most powerful AGN activity
($L_{[OIII]} > 10^{36}$~W).
It also is notable that slopes determined for the main correlations shown in Figures 1 and 2 are in
good agreement with the predictions of simple AGN illumination models involving
photoionization of optically thick clouds
($L_{MFIR} \propto L_{[OIII]}^{0.83\pm0.1}$: Tadhunter et al. 1998), provided that the relative covering factors of narrow emission line
region ($f_{NLR}$), the mid-IR emitting dust structure ($f_{MIR}$), and the far-IR emitting dust
structure ($f_{FIR}$) do not change substantially with luminosity.
In this context it is interesting to consider whether AGN illumination is energetically feasible.
We find that, in order to
explain the normalisations of the main correlations apparent in Figures 1 and 2, we
require $f_{MIR}/f_{NLR} \sim 12$ and $f_{MIR}/f_{NLR} \sim 6$\footnote{For the purposes
of this calculation we make the following assumptions: case B recombination for
an electron temperature of $T_e = 15,000$~K; $[OIII]\lambda5007/H\beta = 12$ and
a mean ionizing photon energy of $\langle hv \rangle =38.7$~eV (see Robinson et al. 1987); and a ratio of ionizing luminosity to bolometric
luminosity of $L_{ION}/L_{BOL} = 0.32$ (see Elvis et al. 2004). The mid-IR and far-IR luminosities
have been integrated over the wavelength ranges 3 -- 30$\mu$m and 30 -- 100$\mu$m respectively
assuming a spectral index of $\alpha = 0.7$ ($F \propto \nu^{-\alpha}$), estimated from the
mean F(70)/F(24)
flux ratio for the sample as a whole.
Note that
no assumptions have been made about the detailed radial distribition of dust. We
simply assume that the dust is distributed in such way that it produces the observed
MFIR SEDs by AGN illumination.}. This implies that the
MFIR emitting dust
structures cover a substantially larger fraction of the sky than the narrow emission
line region (NLR). Given that the covering factor of the NLR is typically a few percent,
the dust structures are likely to cover $\sim$20 -- 70\% of the sky as seen by the AGN. This is entirely
feasible if the dust is associated with the central obscuring tori required by the unified
schemes for powerful radio sources \citep{barthel89}, or with the kpc-scale dust lanes visible in high
resolution images of some radio galaxies \citep{dekoff00}.
On the basis of our results it is clear that powerful, radio-loud AGN are not always
accompanied by major contemporaneous starburst episodes: considering both the MFIR
colours and the optical continuum spectra we estimate that the proportion of radio galaxies
in our sample with significant recent starburst activity (optically obscured or otherwise)
falls in the range 20 -- 30\%. We hypothesise that the presence of a major starburst
component, as revealed by enhanced 70$\mu$m emission, is related to the mode of
triggering of the AGN and radio jets. For example, it is plausible that the radio galaxies
with starburst components are triggered relatively close to the peak of starburst activity
in major, gas-rich galaxy mergers, whereas those lacking significant starbursts are
triggered later in the merger sequence (e.g. Tadhunter et al. 2005), by relatively minor accretion events, or by
cooling flows \citep{bremer97}. This would be consistent with the observed morphological
and kinematical diversity of the population of powerful radio galaxies (Heckman et al. 1986;
Tadhunter, Fosbury \& Quinn 1989; Baum, Heckman \& van Breugel 1992).
It will be possible to test these ideas in future by using deep optical
imaging observations to relate the interaction status and environments of the host
galaxies to the MFIR properties revealed by Spitzer.
\acknowledgments We thank the anonymous referee for
useful comments. This work is based on observations obtained with the
{\it Spitzer Space Telescope}, which is operated by the Jet Propulsion
Laboratory, California Institute of Technology under NASA contract 1407.
DD, JH and KI acknolwledge support from PPARC.
{\it Facilities:} \facility{Spitzer (MIPS)}
\newpage\noindent
|
1,314,259,994,983 | arxiv | \section*{Introduction}
Boutet de Monvel's calculus \cite{B} provides a pseudodifferential framework
which encompasses the classical differential boundary value problems.
In an extension of the concept of Lopatinski and Shapiro, it associates
to each operator two symbols:
a pseudodifferential principal symbol, which is a bundle homomorphism, and
an operator-valued boundary symbol.
Ellipticity requires the invertibility of both. In this case, the calculus allows the
construction of a parametrix.
If the underlying manifold is compact, elliptic elements define Fredholm operators,
and the parametrices are Fredholm inverses.
Boutet de Monvel showed how then the index can be computed in
topological terms.
The crucial observation is that elliptic operators can be mapped to compactly
supported K-theory classes on the cotangent bundle over the interior of the
manifold.
The topological index map, applied to this class, then furnishes an integer which
is equal to the index of the operator.
For the construction of the above map, Boutet de Monvel combined
operator homotopies and classical (vector bundle) K-theory in a
very refined way. It therefore came as a surprise that this map --
which is neither obvious nor trivial --
can also be obtained as a composition of various standard
maps in K-theory for C$^*$-algebras
-- which was not yet available when \cite{B} was written.
In fact, it turns out to be basically sufficient to have
a precise understanding of the short exact sequence induced by the
boundary symbol map, \cite{MSS}, see also \cite{MNS}.
In the spirit of the classical result of Atiyah and Singer \cite{AS}
we introduce and consider in this article {\em families} of operators in
Boutet de Monvel's calculus, an issue that has not been addressed in \cite{B}.
More specifically, we consider a compact manifold $X$ with boundary and then a
fiber bundle $Z\to Y$ with fiber $X$ over a compact Hausdorff space $Y$. We
are then studying fiberwise (elliptic) Boutet de Monvel operators, depending
continuously on $y\in Y$.
In order to be able to use the powerful tools of C$^*$-algebra K-theory
we define such an operator family $A$ over $Y$
as a continuous section of a bundle of C$^*$-algebras over $Y$,
a concept which is slightly more general than that of Atiyah and
Singer, who equip the set of operators with a Fr\'echet-space topology.
In fact, restricted to the case without boundary,
our algebra of continuous families ${\mathfrak A}$ contains that of \cite{AS}
as a dense subalgebra.
While the analytic index $\ind_a(A)$ of such an elliptic family $A$ as an element of $K(Y)$
is easily defined following Atiyah \cite{A} and J\"anich \cite{J},
cf.~Definition \ref{ani} below,
it is less obvious how to obtain the topological description.
Similar to Boutet de Monvel's approach, the essential step is the
construction of a map which associates to an elliptic family an element of
the compactly supported K-theory of the total space of the bundle of cotangent spaces over the interior of the underlying manifolds.
We regard this map as a homomorphism defined on $K_1({\mathfrak A}/{\mathfrak K})$,
where ${\mathfrak K}$ denotes the ideal of continuous families which have values in
compact operators.
In its definition, we use a fact which builds upon an observation of Boutet
de Monvel: There exists a natural subalgebra ${\mathfrak A}^\dagger$ of ${\mathfrak A}$ for which
$K_*({\mathfrak A}^\dagger/{\mathfrak K})\cong K_*({\mathfrak A}/{\mathfrak K})$ so that each elliptic family $A$ in
${\mathfrak A}$
can be represented by a class $a\in K_1({\mathfrak A}^\dagger/{\mathfrak K})$.
Moreover, ${\mathfrak A}^\dagger/{\mathfrak K}$ is commutative which allows us to make
the connection to classical (vector bundle) K-theory. Then $\ind_t(A)$ is
defined by applying the classical construction of the topological index to
$a$, compare Definition \ref{indt}.
Our main result is then that these two indices are equal. To
prove this, we reduce to the classical families index theorem of Atiyah and
Singer \cite{AS}. We assign in a canonical way to $A$
an index problem on a bundle of closed manifolds, namely the double of our
original bundle of manifolds with boundary. We then show that this associated
family has the same analytic as well as topological index as $A$.
In this step we make once more use of the isomorphism
$K_1({\mathfrak A}/{\mathfrak K})\cong K_1({\mathfrak A}^\dagger/{\mathfrak K})$.
It is perhaps worth stressing that our index theorem does not use the Boutet de Monvel index theorem for boundary value problems, which can actually be obtained from ours by taking $Y$ equal to one point.
Taking the families index theorem for granted, Albin and Melrose
derived a more refined formula for the Chern character of the index bundle
in terms of symbolic data \cite[Theorem 3.8]{AM}.
The paper is structured as follows:
Section~\ref{sec:BdM_single} starts with a review of the Boutet de Monvel
calculus for a single manifold. We introduce the
C$^*$-algebra $\mathcal{A}$ of Boutet de Monvel operators of order and class
zero and the boundary symbol map $\gamma$.
Section~\ref{sec1} gives the technical introduction of operator families in Boutet de
Monvel's calculus over a compact Hausdorff space $Y$.
We define them as the continuous sections into a bundle of operator algebras
whose typical fiber is the C$^*$-algebra $\mathcal A$.
In order to keep the exposition simple, we first treat the case where $E$ is trivial
one-dimensional and $F=0$.
We introduce $\gamma$ as the fiberwise symbol map and extend the results on
the kernel and image of $\gamma$ to the family situation.
While in the single operator case this was sufficient to compute the K-theory
of $\mathcal A/\mathcal K$, the situation is more complicated in the families case.
In fact, an important ingredient in \cite{MSS}
is that fact that whenever $X$ is connected
and $\partial X\not=\emptyset$ there exists a continuous section of $S^*X^\circ$.
This is no longer true in the families case.
Instead, we prove in Theorem \ref{Kth} the fact alluded to above:
For $F=0$ we define ${\mathfrak A}^\dagger$ as the C$^*$-algebra
generated by all sections whose
pseudodifferential part is independent of the co-variable at the boundary
and whose singular Green part vanishes. Then
${\mathfrak A}^\dagger/{\mathfrak K}$ is commutative. Moreover, we use a
Mayer-Vietoris argument to show that the inclusion map induces an
isomorphism
\begin{equation}\label{K}
K_*(\mathfrak{A^\dagger /K})\cong K_*(\mathfrak{A/K}).
\end{equation}
In Section~\ref{index} we study the index problem.
Again, we confine ourselves first to the case of trivial
one-dimensional bundles. We introduce the analytic and topological index
and, as our main result, prove that the analytic and the
topological index are equal. To achieve this, we reduce with the help of a
doubling procedure to the case of families of closed manifolds. This
reduction is based
on the fact that we can use the isomorphism in \eqref{K} to represent any
element of $K_1({\mathfrak A}/{\mathfrak K})$ as a $K_1$-class of $\mathfrak{A^\dagger /K}$.
In Section~\ref{sec:non-trivial_bundles} we finish by explaining the arguments needed for the general situation.
Two appendices give technical details about the structure group of our
families and about the K\"unneth theorem we are using.
\section{Boutet de Monvel calculus for a single manifold}
\label{sec:BdM_single}
In this section, we introduce notation and recall the case of single
operators. Details can be found in the monographs of
Rempel and Schulze \cite{RS} and Grubb \cite{G} as well as in the
short introduction \cite{S3}.
Let $X$ be a compact manifold of dimension $n$ with boundary $\partial X$
and interior $X^\circ$. We equip $X$ with a {\em collar} (i.e, a neighborhood
$U$ of the boundary and a diffeomorphism $\delta\colon U\to\partial
X\times[0,1)$)
which then induces the {\em boundary defining function $x_n=pr_{[0,1)}\circ \delta$}
The variables of $\partial X$ will be denoted $x'$.
The collar is used to provide the double $2X$ of $X$
with a (noncanonical) smooth structure.
Recall that $2X$ is the union of two copies $X^+$
and $X^-$ of $X$ quotiented by identification of the two copies of $\partial X$.
An element in Boutet de Monvel's calculus is a matrix of operators
\begin{eqnarray}\label{eq.1}
A = \begin{pmatrix}P_++G&K\\T&S\end{pmatrix} \colon
\begin{array}{ccc}C^\infty(X,E_1)&& C^\infty(X,E_2)\\
\oplus&\longrightarrow &\oplus\\
C^\infty(\partial X,F_1)&& C^\infty(\partial X,F_2)\end{array},
\end{eqnarray}
acting between sections of vector bundles $E_1, E_2$ over $X$ and
$F_1, F_2$ over $\partial X$.
In this article we shall focus on the case of endomorphisms,
where $E_1=E_2=E$ and $F_1=F_2=F$.
For convenience, we choose a Riemannian
metric $g$ on $M$ and Hermitean metrics on $E,F$ to later obtain fixed Hilbert
spaces structures, although the results do not depend on these choices.
The operator $P_+$ in the upper left corner is a truncated
pseudodifferential operator, derived from a (classical) pseudodifferential
operator $P$ on $2X$.
Given $u\in C^\infty(X,E)$, $P_+u$ is defined as the
composition $r^+Pe^+u$. Here $e^+$ extends $u$ by zero to a function on $2X$,
to which $P$ is applied. The result then is restricted (via $r^+$) to $X$.
In general it is not true that $P_+u\in C^\infty(X,E)$. In order to ensure this,
$P$ is required to satisfy the \emph{transmission condition}:
If $p\sim\sum p_j$ is the asymptotic expansion of the local symbol $p$ of $P$ into terms $p_j(x,\xi)$,
which are positively homogeneous of degree $j$ in $\xi$
one requires that, for $x_n=0$ and $\xi=(0,\pm1)$ one has
$D_{x}^\beta D^\alpha_\xi p_j(x',0, 0,1) = (-1)^{j-|\alpha|}
D_{x}^\beta D^\alpha_\xi p_j(x',0, 0,-1)$.
As for the remaining entries,
$G$ is a singular Green operator, $T$ a trace operator, $K$ a
potential operator, and $S$ a pseudodifferential operator on the
boundary.
Operators in Boutet de Monvel's calculus have an {\em order} and
a {\em class} or {\em type}. There are invertible elements in the calculus
which allow us to reduce both, order and class, to zero.
The operators then form a $*$-subalgebra
of the bounded operators on the Hilbert space
$H:=L^2(X,E) \oplus L^2(\partial X, F)$.
\begin{df}\label{A}
Let ${\mathcal A}^\circ(E,F)$ denote the algebra of the
(polyhomogeneous) Boutet de Monvel operators of order and class zero on
$H=L^2(X,E)\oplus L^2(\partial X,F)$,
endowed with its natural Fr\'echet topology,
and ${\mathcal A}$ its C$^*$-closure in the algebra of all bounded operators on $H$.
We write ${\mathcal A}^\circ$ and ${\mathcal A}$ if $E=X\times{\mathbb C}$ is trivial
one-dimensional and $F=0$.
\end{df}
Let $A\in {\mathcal A}^\circ(E,F)$ be given as in \eqref{eq.1}. For each entry
$P,S,G,T,K$ we have a symbol.
This is the usual one for $P$ and $S$, while
$G$, $T$, and $K$ can be considered
as operator-valued pseudodifferential operators on $\partial X$ with classical
symbols in the sense of Schulze \cite{s91}.
These are defined as follows, see \cite{S3}:
The principal pseudodifferential symbol $\sigma(A)$ of $A$
is the restriction of the principal symbol of $P$ to the cosphere bundle over $X$.
In order to define the boundary principal symbol $\gamma(A)$
we first denote by $p^0$, $g^0$, $t^0$, $k^0$, and $s^0$
the principal symbols of $P$, $G$, $T$, $K$, and $S$,
respectively.
We let $E_{x',\xi'}^0$ be the pullback of $E|_{\{x_n=0\}}$ to
the normal bundle of $X$, lifted to $(x',\xi')\in S^*\partial X$.
For fixed $(x',\xi')\in S^*\partial X$, $\xi_n\mapsto p^0(x',0,\xi',\xi_n)$
is a function on the conormal line in $(x',\xi')$, acting on $E^0_{x',\xi'}$.
It induces a truncated pseudodifferential operator
\begin{equation*}
p^0(x',0,\xi',D_n)_+ = r^+p^0(x',0,\xi',D_n)e^+\colon
L^2({\mathbb R}_{\ge0},E^0_{x',\xi'})
\to L^2({\mathbb R}_{\ge0},E^0_{x',\xi'}).
\end{equation*}
In local coordinates near the boundary we then define the boundary principal
symbol $\gamma(A)(x',\xi')\colon L^2({\mathbb R}_{\ge0},E^0_{x',\xi'})
\oplus F_{x',\xi'}\to L^2({\mathbb R}_{\ge0},E^0_{x',\xi'})\oplus
F_{x',\xi'}$ by
\begin{equation}\label{sec:def_of_boundary_symb}
\gamma(A)(x',\xi'):=\begin{pmatrix}p^0(x',0,\xi',D_n)_+ +g^0(x',\xi',D_n)&k^0(x',\xi',D_n)\\
t^0(x',\xi',D_n)&s^0(x',\xi')\end{pmatrix},
\end{equation}
with $D_n$ indicating that we let the symbol act as an operator with respect to the variable $x_n$ only.
Note that the operator $g^0(x',\xi',D_n)$ is compact and that $k^0(x',\xi^\prime,D_n)$,
$t^0(x',\xi',D_n)$ and $s^0(x',\xi')$ even have finite rank.
The operator $p^0(x',0,\xi',D_n)_+$ on the other hand is a Toeplitz type
operator; it will not be compact unless $p^0=0$.
Denoting by $\mathcal K= \mathcal K(H)$ the ideal of compact operators on
$\mathcal L(H)$, one has the following important estimate based on work by
Gohberg \cite{Goh}, Seeley \cite{See} and Grubb-Geymonat \cite{GG},
see \cite[2.3.4.4, Theorem 1]{RS} for a proof:
\begin{eqnarray}\label{normestimate}
\inf_{K\in\mathcal K} \|A+K\|=
\max\{\|\sigma(A)\|_{\sup}, \|\gamma(A)\|_{\sup}\},
\end{eqnarray}
where the sup-norms on the right hand side are over the cosphere bundles
in $X$ and $\partial X$, respectively.
This estimate implies, in particular, that both symbols extend continuously to
C$^*$-algebra homomorphisms defined on $\mathcal A(E,F)$.
For fixed $(x',\xi')$ the range $\{\gamma(A)(x',\xi')\mid A\in\mathcal A\}$
forms an algebra of Wiener-Hopf type operators.
It also follows from this estimate that $\gamma$ vanishes on $\mathcal K$.
Since the entries of $\gamma(A)(x^\prime,\xi^\prime)$ induced by $g^0$, $k^0$, $t^0$ and $s^0$
are (pointwise) compact while that induced by $p^0$ is not (unless $p^0=0$),
we conclude that a Boutet de Monvel operator $A$ belongs to $\ker \gamma$
if and only if $\sigma(A)$ vanishes at the boundary.
Based on this observation (see \cite[Section~2]{MNS} for
details) one can show that $\sigma$ induces an isomorphism
\begin{equation}\label{eq:ker_gamma_single_mf}
\ker \gamma/\mathcal K\cong C_0(S^*X^\circ).
\end{equation}
The K-theory of the range of $ \gamma$ was described in
\cite[Section~3]{MNS}.
Let ${\tt b}\colon C(\partial X)\to \im\gamma$ denote the C$^*$-homomorphism
that maps $g$ to $\gamma(m(f))$, where $m(f)$ is the operator of multiplication
by a function $f\in C(X)$ whose restriction to $\partial X$ equals $g$.
Then {\tt b} induces a K-theory isomorphism.
\section{K-Theory of the families C$^*$-algebra}\label{sec1}
To simplify the exposition, we shall assume in this section that $E=X\times
{\mathbb C}$ is the trivial one-dimensional line bundle and $F=0$.
Let $\mbox{Diff}(X)$ denote the group of diffeomorphisms of $X$, equipped with its
usual Fr\'echet topology. Recall that $\delta\colon U\to
\partial X\times [0,1)$ is the collar fixed at the beginning of Section
\ref{sec:BdM_single}. Let $G$ denote \label{defG} the subgroup of $\mbox{Diff}(X)$
consisting of those $\phi$ such that
$\delta\circ\phi\circ\delta^{-1}\colon\partial X\times[0,1/2)\to\partial X\times[0,1)$
is of the form $(x^\prime,x_n)\mapsto (\varphi(x^\prime),x_n)$ for some diffeomorphism
$\varphi\colon\partial X\to \partial X$.
We are going to use two properties that each $\phi\in G$ satisfies:
the boundary defining function is preserved ($x_n\circ\phi=x_n$ for
$0\le x_n\le 1/2$),
and the canonical map $2\phi\colon 2X\to 2X$, defined by
$2\phi\circ i_\pm=i_\pm\circ\phi,$
where $i_\pm\colon X^{\pm}\to 2X$ are the two canonical embeddings of $X$ in $2X$,
is a diffeomorphism of $2X$.
Throughout this paper, $\pi\colon Z\to Y$ will denote a fiber bundle
over the compact Hausdorff space $Y$ with fiber $X$ and structure group $G$.
Note, however, that this choice of structure group
is just for convenience and can always be (essentially uniquely) arranged
for a general bundle with typical fiber $X$, see the Appendix
\ref{sec:structgroup} for details.
We denote $Z_y:=\pi^{-1}(y)$.
Each $Z_y$ is a smooth manifold with boundary, non-canonically
diffeomorphic to $X$.
The restriction of $\pi$ to $\partial Z=\cup_y\partial Z_y$
is a fiber bundle $\pi_\partial\colon\partial Z\to Y$ with fiber $\partial X$ and
structure group $\mbox{Diff}(\partial X)$.
Next we define a bundle of Hilbert spaces, and later a C$^*$-algebra which
will act on its space of sections. This is a bit delicate, as it depends on
some further choices; therefore we give the details. We choose a continuous
family of Riemannian metrics
$(g_y)_{y\in Y}$ with corresponding measures $\mu_y$ on $Z_y$
and define $H_y:= L^2(Z_y,\mu_y)$. Recall that such a family $(g_y)$ exists:
we can patch them together using trivializations of the bundle and a partition
of unity on $Y$, as the space of Riemannian metrics on $X$ is convex.
The union $\mathfrak{H}=\bigcup_{y\in Y}H_y$
is a fiber bundle of topological vector spaces over $Y$, canonically
associated to $\pi\colon Z\to Y$, with trivializations induced from the
trivializations of $\pi$ in the obvious way. The structure group is the group
of invertible bounded operators on $H$, \emph{equipped with the
strong topology}.
\begin{remark}\rm
That we obtain here the strong topology and not the norm topology comes from
the fact that the changes of trivialization are implemented by pullback with
the diffeomorphisms of $G$, and this is continuous in the strong, but not
the norm topology. This makes our considerations about bundles of operators
later quite cumbersome and requires to use the fact that we deal with
pseudodifferential operators.
\end{remark}
Moreover, the choice $(g_y)_{y\in Y}$
gives rise to a continuous family of inner products on $\mathfrak{H}$
inducing the given topology of the fibers $H_y$.
Let ${\mathcal A}_y$ be the Boutet de Monvel algebra of order and class zero on
$L^2(Z_y)$. We want to define the bundle of Boutet de Monvel algebras
$\aleph=\bigcup_{y\in Y}{\mathcal A}_y$ as locally trivial bundle with structure group
the automorphism group of the C$^*$-algebra ${\mathcal A}$ with the
\emph{norm topology}, associated to $Z\to Y$.
To achieve this, we need the diffeomorphism invariance of
the Boutet de Monvel algebra in a precise form.
\begin{definition}
Given $\phi\in G$, let $T_\phi$
denote the bounded operator on $L^2(X)$ defined by $f\mapsto f\circ\phi^{-1}$.
\end{definition}
\begin{proposition}\label{prop:conj_is_cont}
We have a well defined continuous action (for
the Fr\'echet topology on $G$ and the norm topology on ${\mathcal A}$)
\begin{equation*}
G\times {\mathcal A}\ni (\phi,A) \mapsto T_\phi A T^{-1}_\phi \in{\mathcal A}.
\end{equation*}
Moreover, by restriction we get an action $G\times {\mathcal A}^\circ\to {\mathcal A}^\circ$.
\end{proposition}
\begin{proof} This corresponds to \cite[Proposition 1.3]{AS}.
In fact, even if $X$ is closed,
Atiyah and Singer consider a slightly different situation in that
they close ${\mathcal A}^\circ$ with respect to the operator norm of the action
on all Sobolev spaces, while we only use the operator norm on $L^2$.
Their argument still applies verbatim, since they treat the action
on each Sobolev space separately.
Indeed, the proof of \cite[Proposition 1.3]{AS} uses only a number of formal
properties of the algebra of pseudodifferential operators which are also
satisfied by the Boutet de
Monvel algebra, and therefore applies in the same way to our general
situation. To be more specific, let us list these properties:
\begin{enumerate}
\item the Boutet de Monvel algebra $A^\circ$ is diffeomorphism invariant,
i.e.~in particular $T_\phi A T^{-1}_\phi\in {\mathcal A}^\circ$ for $A\in {\mathcal A}^\circ$ and
$\phi\in G$.
\item Each $T_\phi$ is a bounded operator on $L^2(X)$ and the map $G\to
\mathcal{L}(L^2(X))$ is strongly continuous. Moreover, for a sufficiently
small open neighborhood of $1$, the image has uniformly bounded norm. The
proof of this fact as given in \cite{AS} works for compact manifolds with
boundary exactly the same way as for closed manifolds.
\item Let $\mathcal{V}_G$ denote the space of vector fields on $X$ which, in the
collar, pull back from vector fields on $\partial X$.
The exponential map, defined with the help of Riemannian metrics which
respect the collar structure, gives a local diffeomorphism (of Fr\'echet
manifolds) between $\mathcal{V}_G$ and $G$.
\item If $V\in \mathcal{V}_G$ and $A\in {\mathcal A}^\circ$ then the commutator $[A,V]$
belongs to $ {\mathcal A}^\circ$ by the rules of the calculus, cf. \cite[Theorem 2.7.6]{G}.
\end{enumerate}
All these properties are either well known or easy to establish.
\end{proof}
\begin{corollary}\label{corol:bundle_of_C_algebras}
We obtain the bundle $\aleph=\bigcup_{y\in Y}{\mathcal A}_y$ of topological algebras
with bundle of subalgebras $\aleph^\circ=\bigcup_{y\in Y}{\mathcal A}^\circ_y$,
modelled on $({\mathcal A},{\mathcal A}^\circ)$ with structure group the automorphism group of
${\mathcal A}$ with its norm topology and the automorphism group of ${\mathcal A}^\circ$ its
Fr\'echet topology. The local trivializations are induced by the local
trivializations of $\pi\colon Z\to Y$, where a diffeomorphisms
$\alpha_y\colon Z_y\to
X$ obtained from the trivialization map ${\mathcal A}_y$ to ${\mathcal A}$ by conjugation with
$T_{\alpha_y}$.
Moreover, the choice of metrics $(g_y)_{y\in Y}$ induces a continuous family
of norms
on the fibers of $\aleph$ inducing the topology. With these norms the bundle
becomes a bundle of C$^*$-algebras.
\end{corollary}
\begin{proof}
The statement about the bundle of topological algebras follows immediately
from Proposition \ref{prop:conj_is_cont}. Moreover, it is well known that
each ${\mathcal A}_y$ is closed under taking adjoints in $\mathcal{L}(L^2(Z_y))$.
We now check that with this structure,
we obtain a locally trivial bundle of
C$^*$-algebras. Fix a local trivialization with diffeomorphisms
$\alpha_y\colon Z_y\to X$. If we pull back the inner
products on $H_y$ to $H=L^2(X)$ with the induced maps,
then the corresponding Gram operator
$G_y$, expressing this pullback inner product in terms of the original one
on $L^2(X)$, is the multiplication with a smooth positive function $m_y$
which depends continuously on $y$: the
density of $\alpha_y^*\mu_y$ with respect to a chosen measure $\mu$
on $X$. Note that $G_y$ belongs
to ${\mathcal A}$ and its norm, which is just the supremum,
depends continuously on $y$.
Now compose the original trivialization of
${\mathcal A}_y$ with conjugation by $\sqrt{G_y}$ and the resulting trivialization
will respect the C$^*$-algebra structures, but inherit the norm continuity
of transition maps. To summarize: with a canonical modification (given in
terms of the inner products) we have obtained trivializations of our bundle
$\aleph$ as a bundle of C$^*$-algebras, as claimed.
\end{proof}
\begin{definition}\label{def:Cstar_alg_of_sect}
We denote by ${\mathfrak A}$ the set of continuous sections of the bundle $\aleph$ of
C$^*$-algebras. With the pointwise operations and the supremum norm, this
becomes a C$^*$-algebra. The underlying topological algebra is canonically
associated to $\pi\colon Z\to Y$, the norm and the $*$-operation depend on
the choice of the family of metrics $(g_y)_{y\in Y}$.
\end{definition}
The principal
symbol and the boundary principal symbol extend continuously to two families
of C$^*$-algebra homomorphisms
\[
\sigma_y\colon {\mathcal A}_y\rightarrow C(S^*Z_y)
\ \ \mbox{and}\ \
\gamma_y\colon {\mathcal A}_y\rightarrow C(S^*\partial Z_y,\mathcal{L}(L^2({\mathbb R}_{\ge0}))),
\]
where $S^*$ denotes cosphere bundle and $\mathcal{L}$ bounded operators.
Here $\gamma_y$ is well defined,
since the structure group of the bundle $\pi\colon Z\to Y$
leaves the boundary defining function invariant, see \cite[Theorem 2.4.11]{G}.
Let us denote by $S^*Z$ the disjoint union of all $S^*Z_y$. This can
canonically be viewed as the total space of a fiber bundle over $Y$ with
structure group $G$.
One analogously defines $S^*\partial Z=\cup_{y}S^*\partial Z_y$ and
$S^*Z^\circ=\cup S^*Z_y^\circ$.
\begin{df}
Given $A\in{\mathfrak A}$, let $\sigma_A$ be the function on $S^*Z$ defined by
piecing together all the $\sigma_y$'s. Then $A\mapsto\sigma_A$ defines
a C$^*$-algebra homomorphism
\[
\sigma\colon {\mathfrak A}\longrightarrow C(S^*Z).
\]
One also gets, analogously,
\[
\gamma\colon {\mathfrak A}\longrightarrow C(S^*\partial Z,\mathcal{L}(L^2({\mathbb R}_{\ge0}))).
\]
\end{df}
Let ${\mathfrak K}$ denote the subalgebra of ${\mathfrak A}$ consisting of the sections
$(A_y)_{y\in Y}$ such that $A_y$ is compact for every $y\in Y$. It follows
immediately from the corresponding statement for a single manifold that
$
\ker\sigma\cap\ker\gamma={\mathfrak K}.
$
It is also straightforward to generalize the description of $\ker\gamma$
for a single manifold \eqref{eq:ker_gamma_single_mf}:
\begin{thm} \label{ker}
The principal symbol restricted to $\ker\gamma$ induces a C$^*$-algebra
isomorphism
\begin{equation}
\label{kernel}
\ker\gamma/{\mathfrak K}\simeq C_0(S^*Z^\circ).
\end{equation}
Here $C_0(S^*Z^\circ)$ consists of the elements of $C(S^*Z)$ which,
for every $y\in Y$, vanish on all points of $S^*Z_y$ with base point belonging to
$\partial Z_y$.
\end{thm}
Regarding each $f\in C(Z)$ as a family of multiplication operators on
$(H_y)_{y\in Y}$,
furnishes an embedding of $C(Z)$ in ${\mathfrak A}$, which we denote
$m\colon C(Z)\to{\mathfrak A}$.
Mapping a $g\in C(\partial Z)$ to
the boundary principal symbol of $m(f)$, where $f\in C(Z)$ is such that its
restriction to $\partial Z$ is $g$, defines the C$^*$-algebra homomorphism
$b\colon C(\partial Z)\to\im\gamma$.
\begin{thm}\label{biso} The homomorphisms
$b_*\colon K_i(C(\partial Z))\to K_i(\im\gamma)$,
$i=0,1$, induced by $b$ are isomorphisms.
\end{thm}
{\em Proof}:
Given an open set $U\subseteq Y$, let us denote by
$\pi_U\colon Z_U=\pi^{-1}(U)\to U$ the
restriction of $\pi$ to $U$, by ${\mathfrak A}_U$ the algebra of sections in ${\mathfrak A}$ which vanish
outside $U$ and by $\gamma_U$ the restriction of $\gamma$ to ${\mathfrak A}_U$.
Moreover we let
$$C_0(\partial Z_U)=\{f\in C(\partial Z)\colon \text{supp}\, f\subseteq
\pi_\partial^{-1}(U)\}
$$
and write $b_U$ for the restriction of $b$ to $C_0(\partial Z_U)$.
If the bundle $\pi$
is trivial over $U$, then ${\mathfrak A}_U$ is isomorphic to $C_0(U,{\mathcal A})$ and, with respect to
this isomorphism, $b_U$ corresponds to the tensor product of the identity on $C_0(U)$
with the corresponding map for a single manifold, also denoted by $b$ on \cite{MNS,MSS}.
It is the content of \cite[Corollary 8]{MNS} that $b$ induces a K-theory isomorphism
onto the image of $\gamma$.
It then follows from the K\"unneth formula for C$^*$-algebras \cite{S}
that $b_U$ induces isomorphisms
$
b_{U*}\colon K_i(C_0(\partial Z_U))\longrightarrow K_i(\im\gamma_U)
$, $i=0,1$, see Proposition \ref{kun} in Appendix \ref{sec:kunneth}.
Now let $(\im\gamma)_U$ denote the subset of $\im\gamma$ consisting
of those functions which vanish outside
${\displaystyle \cup_{y\in U}S^*\partial Z_y}$.
It is obvious that
$\im \gamma_U\subseteq (\im \gamma)_U$. Since both
$\im \gamma_U$ and $(\im \gamma)_U$ are closed in
$C(S^*\partial Z,\mathcal{L}(L^2({\mathbb R}_{\ge0})))$, to show that they are equal it
suffices to show that the former is dense in the latter. This follows from
the fact that multiplication by a complex continuous function with support
contained in $U$ maps $(\im \gamma)_U$ to $\im \gamma_U$.
This simple observation implies that, for open sets $U$ and $V$,
we have a canonical C$^*$-algebra isomorphism
\begin{equation}\label{hannover}
\im \gamma_{U\cap V}\cong
\{(f,g)\in\im \gamma_U\oplus\im \gamma_V;f=g \}.
\end{equation}
Now suppose that we have shown $b_{U*}$ to be an isomorphism
for some open $U$ and that $V$ is open and $\pi$ trivial over
$V$, and so in particular also over $U\cap V$.
We then consider the two --- thanks to \eqref{hannover} ---
diagrams
\[
\begin{array}{ccc}
C_0(\partial Z_{U\cap V})&\to&C_0(\partial Z_U)\\
\downarrow&&\downarrow\\
C_0(\partial Z_V)&\to&C_0(\partial Z_{U\cup V})
\end{array}
\ \ \mbox{and}\ \
\begin{array}{ccc}
\im \gamma_{U\cap V}&\to&\im \gamma_U\\
\downarrow&&\downarrow\\
\im \gamma_V&\to&\im \gamma_{U\cup V}
\end{array}.
\]
Because they are cartesian, we may extract from both diagrams
cyclic exact Mayer-Vietoris sequences (see \cite[21.2.2]{Bl} or
\cite[7.2.1]{MS}), and we may use the K-theory maps induced
by $b_U$, $b_V$, $b_{U\cap V}$ and $b_{U\cup V}$ to map the first cyclic sequence to the
second. By assumption and the case of trivial bundles, the
maps induced by $b_U$, $b_V$ and $b_{U\cap V}$ are isomorphisms. It then
follows from the five-lemma that also $b_{U\cup V}$ induces a K-theory
isomorphism.
Since $Y$ has a finite cover by open sets over which $\pi$ is trivial,
induction shows that $b$ induces K-theory isomorphisms.
\hfill$\Box$
Using Theorem~\ref{ker}, we obtain the following commutative diagram of
C$^*$-algebra homomorphisms, whose horizontal lines are exact:
\[
\def\mapup#1{\Big\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{ccccccc}
0\longrightarrow&C_0(S^*Z^\circ)&\longrightarrow&\mathfrak{A}/\mathfrak{K}&{\mathop{\longrightarrow}\limits^\gamma}&\text{Im}\gamma&\longrightarrow 0
\\ &\mapup{m^\circ}&&\mapup{m}&&\mapup{b}&
\\0\longrightarrow&C_0(Z^\circ)&\longrightarrow&C(Z)&{\mathop{\longrightarrow}\limits^r}&C(\partial Z)&\longrightarrow 0
\end{array}.
\]
We have denoted by $r$ the map that pieces together all restrictions
$r_y\colon C(Z_y)\to C(\partial Z_y)$, $y\in Y$, and by $Z^\circ$ the union
$\cup_yZ_y^\circ$. Since the isomorphism (\ref{kernel}) is induced by
the principal symbol, and the principal symbol of an operator of multiplication by
a function is the function itself, the map $m^\circ$ in the diagram above is
actually the map of composition with the canonical projection $S^*Z^\circ\to Z^\circ$.
We may apply the cone-mapping functor \cite[Lemma 9]{MSS} to the above diagram
and get (using the same arguments that prove (11) in \cite{MSS}) the following
commutative diagram of cyclic exact sequences
\begin{equation}
\label{2ces}
\def\mapdown#1{\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\def\mapup#1{\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{ccc}
K_0(C_0(Z^\circ)) &\longrightarrow &K_0(C(Z))\\
\mapdown{m^\circ_*}& &\mapdown{m_*}\\
K_0(C_0(S^*Z^\circ)) &\longrightarrow &K_0({\mathfrak A}/{\mathfrak K})\\
{\downarrow} & &{\downarrow}\\
K_1(Cm^\circ) &{\mathop{\longrightarrow}\limits^{\cong}}&K_1(Cm)\\
\downarrow & &\downarrow\\
K_1(C_0(Z^\circ)) &\longrightarrow &K_1(C(Z))\\
\mapdown{m^\circ_*}& &\mapdown{m^\circ_*}\\
K_1(C_0(S^*Z^\circ)) &\longrightarrow &K_1(\mathfrak{A}/\mathfrak{K})\\
\downarrow & &\downarrow\\
K_0(Cm^\circ) &{\mathop{\longrightarrow}\limits^{\cong}}&K_0(Cm)\\
\downarrow & &\downarrow\\
K_0(C_0(Z^\circ)) &\longrightarrow &K_0(C(Z))
\end{array},
\end{equation}
where $\cong$ denotes isomorphism.
Up to this point, everything goes exactly as in the case of a single manifold,
but here comes a difference:
The homomorphism $m_0$ does not necessarily
have a left inverse (in the case of a single manifold $X$,
such a left inverse is defined
by composition with a section of $S^*X$), and hence the cyclic exact sequences above
do not have to split into short exact ones.
To proceed we now introduce the subalgebra $\mathfrak A^\dagger$
of $\mathfrak A$ and an associated subalgebra $B$ of $C(S^*Z)$
with the properties outlined in the introduction:
For each $y\in Y$, let $B_y$ denote the subalgebra of $C(S^*Z_y)$ consisting of
the functions which do not depend on the co-variable over the boundary, that
is, an $f\in C(S^*Z_y)$ belongs to $B_y$ if and only if the restriction of $f$
to the points of $S^*Z_y$ over $\partial Z_y$ equals $g\circ p_y$, for some
$g\in C(\partial Z_y)$, where $p_y\colon S^*Z_y\to Z_y$ is the canonical projection.
We then define ${\mathcal A}_y^\dagger$ as the C$^*$-subalgebra of ${\mathcal A}_y$ generated by
$\{P_+;\ P$ is a pseudodifferential operator with the
transmission property and $\sigma_y(P_+)\in B_y\}$.
\begin{df}Let $B$ denote the subalgebra of $C(S^*Z)$ consisting of the
functions whose restriction to each $S^*Z_y$ belongs to $B_y$.
We let then ${\mathfrak A}^\dagger$ be the C$^*$-subalgebra of ${\mathfrak A}$ consisting of the
sections $(A_y)_{y\in Y}$ such that $A_y\in{\mathcal A}_y^\dagger$ for every $y\in Y$.
\end{df}
\begin{pro}\label{hoc}
The C$^*$-algebra
${\mathfrak A}^\dagger/{\mathfrak K}$ is commutative, and the map
\[
{\mathfrak A}^\dagger/{\mathfrak K}\ni[A]{\mathop{\longmapsto}\limits^{\bar\sigma}}\sigma(A)\in B
\]
is a C$^*$-algebra isomorphism.
\end{pro}
\begin{proof}
Let $P=(P_y)$ be a family of \emph{pseudodifferential} operators with
symbol independent of the co-variable over the boundary,
i.e.~a generator
of ${\mathfrak A}^\dagger$. According to
\eqref{sec:def_of_boundary_symb}, $\gamma(P)$ can
be considered as a
function on $\partial Z$, acting for $z\in\partial Z$ on $L^2({\mathbb R}_{\ge
0})$ by multiplication with $\gamma(P)(z)$. Moreover, for $z\in\partial
Z$ we have $\gamma(z)=\sigma(z)$ independent of the
co-variable by assumption.
It follows that the composed algebra homomorphism
\begin{equation*}
\sigma\colon {\mathfrak A}^\dagger\xrightarrow{\sigma\oplus\gamma}
C(S^*Z)\oplus
C(S^*\partial Z, \mathcal{L}(L^2({\mathbb R}_{\ge0}))) \xrightarrow{pr} C(S^*Z)
\end{equation*}
has the same kernel as $\sigma\oplus\gamma$, namely ${\mathfrak K}$ and so the map
we consider is injective and in particular ${\mathfrak A}^\dagger/{\mathfrak K}$ is
commutative. By the very definition of ${\mathfrak A}^\dagger$,
$\sigma\colon{\mathfrak A}^\dagger\to B$ has dense image, as a morphism of
C$^*$-algebras it is therefore also surjective.
\end{proof}
This allows us to describe the K-theory of ${\mathfrak A}/{\mathfrak K}$:
\begin{thm}\label{Kth}
The composition
\[
K_i({\mathfrak A}/{\mathfrak K}){\mathop{\longrightarrow}\limits^{\iota_*^{-1}}}K_i({\mathfrak A}^\dagger/{\mathfrak K})
{\mathop{\longrightarrow}\limits^{\bar\sigma_*}}K_i(B)
\]
is an isomorphism, $i=0,1$.
\end{thm}
The proof makes use of the following proposition, which is easily established
by a diagram chase, compare \cite[Exercise 38 of Section 2.2]{Hatcher}:
\begin{pro}\label{ditsche} Let there be given a commutative diagram of
abelian groups with exact rows,
\[
\def\mapdown#1{\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\def\mapup#1{\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{ccccccccccc}
\cdots&\rightarrow
&A_i^\prime&{\mathop{\longrightarrow}\limits^{f_i^\prime}}
&B_i^\prime&{\mathop{\longrightarrow}\limits^{g_i^\prime}}
&C_i^\prime&{\mathop{\longrightarrow}\limits^{h_i^\prime}}
&A_{i+1}^\prime
&\rightarrow&\cdots
\\
&
&\mapup{a_i}&&\mapup{b_i}&&\mapup{c_i}&&\mapup{a_{i+1}}&
&
\\
\cdots&\rightarrow
&A_i&{\mathop{\longrightarrow}\limits^{f_i}}
&B_i&{\mathop{\longrightarrow}\limits^{g_i}}
&C_i&{\mathop{\longrightarrow}\limits^{h_i}}
&A_{i+1}
&\rightarrow&\cdots
\end{array},
\]
where each $c_i$ is an isomorphism. Then the sequence
\[
\cdots\longrightarrow A_i{\mathop{\longrightarrow}\limits^{(a_i,-f_i)}}
A_i^\prime\oplus B_i{\mathop{\longrightarrow}\limits^{\langle f_i^\prime,b_i\rangle}}
B_i^\prime{\mathop{\longrightarrow}\limits^{h_ic_i^{-1}g_i^\prime}}
A_{i+1}\longrightarrow\cdots
\]
is exact, where $\langle f_i^\prime,b_i\rangle$ is the map defined by
$\langle f_i^\prime,b_i\rangle(\alpha,\beta)=f_i^\prime(\alpha)+b_i(\beta)$.
\end{pro}
We are now ready to prove Theorem \ref{Kth}.
Applying Proposition \ref{ditsche} to the diagram (\ref{2ces}), we get the exact sequence
\begin{equation}
\label{cyclic1}
\begin{array}{ccccc}
K_0(C_0(Z^\circ))&\rightarrow &K_0(C(Z))\oplus K_0(C_0(S^*Z^\circ))&\rightarrow&
K_0({\mathfrak A}/{\mathfrak K})
\\\uparrow& & & &\downarrow
\\
K_1({\mathfrak A}/{\mathfrak K})&\leftarrow& K_1(C(Z))\oplus K_1(C_0(S^*Z^\circ))&\leftarrow&
K_1(C_0(Z^\circ))
\end{array}.
\end{equation}
We next consider the following diagram of commutative C$^*$-algebras
\begin{equation}\label{cd3}
\def\mapdown#1{\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\def\mapup#1{\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{ccc}
C_0(Z^\circ)&{\mathop{\longrightarrow}\limits^{m^\circ}}&C_0(S^*Z^\circ)\\
\downarrow&&\mapdown{p_2}\\
C(Z)&{\mathop{\longrightarrow}\limits^{p_1}}&B
\end{array}.
\end{equation}
As $C_0(Z^\circ)$ is canonically isomorphic to
\[
\{(f,g)\in C(Z)\oplus C_0(S^*Z^\circ);\ p_1(f)=p_2(g)\},
\]
the Mayer-Vietoris exact sequence associated to (\ref{cd3}) is the
exact sequence
\begin{equation}
\label{cyclic2}
\begin{array}{ccccc}
K_0(C_0(Z^\circ))&\rightarrow &K_0(C(Z))\oplus K_0(C_0(S^*Z^\circ))&\rightarrow&
K_0(B)
\\\uparrow& & & &\downarrow
\\
K_1(B)&\leftarrow& K_1(C(Z))\oplus K_1(C_0(S^*Z^\circ))&\leftarrow&
K_1(C_0(Z^\circ))
\end{array}.
\end{equation}
The map $\iota\colon B\cong {\mathfrak A}^\dagger/{\mathfrak K}\hookrightarrow {\mathfrak A}/{\mathfrak K} $ and the identity on
the other K-theory groups furnish morphisms from the
cyclic sequence (\ref{cyclic2}) to the cyclic sequence (\ref{cyclic1}).
The five lemma then shows that the induced maps in K-theory are
isomorphisms. Together with Proposition \ref{hoc} we obtain the assertion.
\hfill$\Box$
\section{The Boutet de Monvel family index theorem}\label{index}
The index of a continuous function with values in Fredholm operators was defined by
J\"anich \cite{J} and Atiyah \cite{A}.
Using the following Proposition \ref{perturb},
their definition can be extended to sections of our $\aleph$.
\begin{pro}\label{perturb} Let ${\mathfrak H}$ and ${\mathfrak A}$ be as above, $k\in {\mathbb N}$ and let
$(A_y)_{y\in Y}\in M_k({\mathfrak A})$ be such that, for each $y$,
$A_y$ is a Fredholm operator, where we interpret $M_k({\mathfrak A})$ as the sections of
the bundle with fiber $M_k({\mathcal A}_y)$. Then there are continuous sections
$s_1,\cdots,s_q$ of $\mathfrak{H}^k$ such that the maps
\[
\begin{array}{rccl}
\tilde A_y\colon &H_y^k\oplus{\mathbb C}^q&\longrightarrow&H_y^k\oplus{\mathbb C}^q\\
&(v,\lambda)&\longmapsto&(A_yv+\sum_{j=1}^{q}\lambda_js_j(y),0)
\end{array}
\]
have image equal to $H_y^k\oplus 0$ for all $y\in Y$ and
$(\ker\tilde A_y)_{y\in Y}$ is a (finite dimensional) vector bundle
over $Y$.
\end{pro}
{\em Proof}:
Similar to \cite[Proposition (2.2)]{AS} and to \cite[Proposition A5]{A}.
\hfill$\Box$
\begin{df}\label{ani}
Given $A=(A_y)_{y\in Y}\in{\mathfrak A}$ as in Proposition~\ref{perturb}, we
denote by $\ker\tilde A$ the bundle $(\ker\tilde A_y)_{y\in Y}$ and
define
\[
\ind_a(A)=[\ker\tilde A]-[Y\times{\mathbb C}^q]\in K(Y).
\]
This is independent of the choices of $q$ and of $s_1,\cdots,s_q$ and
we call it {\em the analytical index of} $A$.
\end{df}
If $A=(A_y)_{y\in Y}\in M_k({\mathfrak A})$
is a section such that each $A_y$ is a Fredholm operator on $H_y^k$ then
the projection to $M_k({\mathfrak A}/{\mathfrak K})$
is invertible and hence defines an element of $K_1({\mathfrak A}/{\mathfrak K})$. Since $\ind_a(A)$
is invariant under stabilization, homotopies and perturbations
by
compact operator valued sections, we get a homomorphism
\begin{equation}
\label{topind}
\ind_a\colon K_1({\mathfrak A}/{\mathfrak K})\longrightarrow K(Y).
\end{equation}
$\ $
Next we define the \emph{topological} index, also
as a homomorphism
\[
\ind_t\colon K_1({\mathfrak A}/{\mathfrak K})\longrightarrow K(Y).
\]
Let $T^*Z$ denote the union
of all $T^*Z_y$, and $B^*Z$ the union of all $B^*Z_y$, equipped with their
canonical topologies, where $B^*Z_y$ denotes the bundle of closed unit
balls of $T^*Z_y$. One may regard $B^*Z$ as a compactification of $T^*Z$ and
identify the ``points at infinity'' with $S^*Z$.
Let $\sim$ denote the equivalence relation that identifies, for each
$y\in Y$, all points of each ball of $B^*Z_y$ which lies over a point of
$\partial Z_y$. The C$^*$-algebra $B$ of Theorem~\ref{Kth} is isomorphic
to the algebra of continuous functions on the quotient space
$S^*Z/\!\!\sim$. Let
$\beta\colon K_1(C(S^*Z/\!\!\sim))\to K_0(C_0(T^*Z^\circ))$ denote
the index map associated to the short exact sequence
\[
0\longrightarrow C_0(T^*Z^\circ)\longrightarrow C(B^*Z/\!\!\sim)
\longrightarrow C(S^*Z/\!\!\sim)\longrightarrow 0,
\]
where $T^*Z^\circ$ is the union over $y\in Y$ of all points of $T^*Z_y$
which lie over interior points of $Z_y$ and the map from
$C(B^*Z/\!\!\sim )$ to $C(S^*Z/\!\!\sim )$ is induced by restriction.
Let $2Z$ denote the union $\cup_y2Z_y$, where each $2Z_y$ is the
double of $Z_y$, and $\pi_d\colon 2Z\to Y$ the canonical projection.
This can be given the structure of a $\mbox{Diff}(2X)$-bundle, with trivializations
obtained by ``doubling'' (as explained at the beginning of Section \ref{sec1})
the trivializations
of the bundle $\pi\colon Z\to Y$. Each fiber $2Z_y$ is then equipped with the
smooth structure induced by the trivializations of
$\pi_d\colon 2Z\to Y$ and
we can form the bundles $T^*2Z$ and $S^*2Z$ as
\label{coesferas} the unions, respectively, of all
cotangent bundles $T^*(2Z_y)$ and of all cosphere bundles $S^*(2Z_y)$, $y\in Y$.
We denote by $\mbox{{\sc as}-ind}_t\colon K_0(C_0(T^*2Z))\to K(Y)$ the composition
of Atiyah and Singer's \cite{AS}\ topological families-index for
the bundle of closed manifolds $2Z$ with the canonical
isomorphism $K(T^*2Z)\simeq K_0(C_0(T^*2Z))$.
Theorem~\ref{Kth} allows us to define the topological index:
\begin{df}\label{indt}
The topological index $\ind_t$ is the following composition of maps
\begin{equation*}
\label{anaind}
\def\mapdown#1{\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\def\mapup#1{\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{rc}
\ind_t\colon K_1({\mathfrak A}/{\mathfrak K})
{\mathop{\longrightarrow}\limits^{\bar\sigma_*\circ\iota_{*}^{-1}}}
K_1(C(S^*Z/\!\!\sim )){\mathop{\longrightarrow}\limits^{\beta}}
K_0(C_0(T^*Z^\circ)){\mathop{\longrightarrow}\limits^{e_*}}
&K_0(C_0(T^*2Z))\\ &
\mapdown{\mbox{{\sc as}}-\ind_t}\\ &K(Y),
\end{array}
\end{equation*}
where $e\colon C_0(T^*Z^\circ)\to C_0(T^*2Z)$ denotes the map
which extends by zero.
\end{df}
If $A=(A_y)_{y\in Y}\in{\mathfrak A}$ is a family of Fredholm operators
we denote by $\ind_t(A)$ the topological index evaluated
at the element of $K_1({\mathfrak A}/{\mathfrak K})$ that $A$ defines.
\begin{thm}\label{indthm}
Let $A=(A_y)_{y\in Y}\in{\mathfrak A}$ be a continuous
family of Fredholm operators in the closure of the Boutet de Monvel algebra for
each $y$. Then{\em
\begin{equation}
\label{teoind}
\ind_a(A)\ =\ \ind_t(A).
\end{equation} }
\end{thm}
{\em Proof}:
Our strategy is to derive the equality of the indices from the classical
Atiyah-Singer index theorem for families \cite[Theorem (3.1)]{AS}.
To this end we define an operator family $\hat A$ acting on a vector bundle over
the double of $Z$ by a gluing technique involving the principal symbol family of $A$.
We proceed in several steps.
Step 1 consists of a few preliminary remarks on the choice of the representative
of the K-theory class of $A$.
In Step 2 we describe the construction of the bundle.
We then define the operator family $\hat A$ over $2Z$ in Step 3.
Its topological index coincides with that of $A$ as we shall see in Step 4.
The equality of the analytic indices of $A$ and $\hat A$ is
the content of Step 5.
{\em Step} 1.
We need to prove that $\ind_t$ and $\ind_a$
coincide on $K_1({\mathfrak A}/{\mathfrak K})$.
Using that $K_1({\mathfrak A}/{\mathfrak K})=K_1({\mathfrak A}^\dagger/{\mathfrak K})$ by Theorem \ref{Kth}, an
arbitrary element of $K_1({\mathfrak A}/{\mathfrak K})$ is a class
$[[A]]_1$ (the inner brackets denoting a class in the quotient by the
compacts), for some operator family
$A=(A_y)_{y\in Y}\in M_k({\mathfrak A}^\dagger)$, $k\in \mathbb N$,
such that, for each $y$, $A_y\colon H_y^k\to H_y^k$ is a Fredholm operator
with symbol in $B$.
It will be convenient to pick a representative with special properties.
We denote by $C^\infty(S^*X/\!\!\sim)$ the subset of $C^\infty(S^*X)$
of functions which factor through $S^*X/\!\!\sim$, i.e.\ are independent
of the co-variable at the boundary.
The algebraic tensor product
$C_0(U)\otimes C^\infty(S^*X/\!\!\sim)$
is dense in $C(U\times S^*X/\!\!\sim)$ for every open subset $U$ of $Y$.
Furthermore, the inclusion of the space of all elements in $C^\infty(S^*X/\!\!\sim)$
which are independent of the co-variable even in a neighborhood of $\partial Z$
into $C^\infty(S^*X/\!\!\sim)$ is a homotopy equivalence.
We can therefore assume that the symbol family
$(\sigma_y(A_y))_{y\in Y}$ is given as a finite sum of elements
supported in open subsets $U$ of $Y$ over which $Z$ is trivial,
and each of these is a pure tensor in
$C_0(U)\otimes C^\infty(S^*X)$
which is independent of the co-variable near the boundary.
Hence it suffices to prove equality for such an $A$.
{\em Step} 2.
For each $y\in Y$, let $Z_y^+$ and $Z_y^-$ denote the two copies of $Z_y$ which
are glued together at $\partial Z_y$ to form $2Z_y$. The map
$i_y\colon \partial Z_y^+\to\partial Z_y^-$ identifies the two copies of
$\partial Z_y$. We define $E_y$ as the quotient of the disjoint union
$Z_y^+\times{\mathbb C}^k\cup Z_y^-\times{\mathbb C}^k$ by the equivalence relation that
identifies the pairs $(x,v)$ and $(x^\prime,w)$ if and only if they are
equal or $x^\prime=i_y(x)$, $x\in\partial Z_y^+$, and
$w=\sigma_y(A_y)(x) v$ (remembering that at points of $S^*Z_y$ over
$\partial Z_y$, $\sigma_y(A_y)$ is independent of the co-vector variable).
This set $E_y$ naturally becomes a smooth vector
bundle over $Z_y$. Let $E$ denote the union
of all $E_y$, which in the same way becomes a vector bundle over $Y$.
When defining families of smooth manifolds with smooth vector bundles,
Atiyah and Singer make the technical assumption that the fiberwise vector bundles
are isomorphic to a fixed vector bundle on the typical fiber.
If $Y$ is not connected, this is not necessarily satisfied.
However, the isomorphism type of $E_y$ depends only on the homotopy
type of the map $\sigma_y$, in particular only on the component of the space
of all continuous maps from $\partial Z_y$ to $M_k({\mathbb C})$ in which it lies. By
the compactness of $Y$, the latter decomposes into finitely many open and
closed subsets over each of which the isomorphism type of $E_y$ is
constant. As the K-theory of $Y$ as well as ${\mathfrak A}/{\mathfrak K}$ split as direct sums
under such disjoint union decompositions of $Y$, and as
$\ind_a$, $\ind_t$ respect
this, we can restrict to one such subset of $Y$. Then
we are canonically in the situation of \cite[Definition 1.2]{AS},
i.e.~$E$ is a smooth vector bundle over the family of smooth manifolds $2Z$.
{\em Step} 3.
Let $\pi_s\colon S^*2Z\to 2Z$ denote the canonical projection and
$S^*Z^+$ and $S^*Z^-$, respectively, the union of all $S^*Z_y^+$ and $S^*Z_y^-$, $y\in Y$.
The bundle $\pi_s^*E$ can be seen as the disjoint union
of $S^*Z^+\times{\mathbb C}^k$ and $S^*Z^-\times{\mathbb C}^k$ quotiented by the equivalence relation that identifies
a boundary point $(s,v)$ in $S^*Z^+\times{\mathbb C}^k$ with
$(s,\sigma_A(s)\cdot v)$ in $S^*Z^-\times{\mathbb C}^k$ .\
Similarly, the bundle $S^*2Z\times{\mathbb C}^k$ can be seen as the disjoint union
of $S^*Z^+\times{\mathbb C}^k$ and $S^*Z^-\times{\mathbb C}^k$ quotiented by the equivalence relation that identifies
a boundary point $(s,v)$ in $S^*Z^+\times{\mathbb C}^k$ with $(s,v)$ in
$S^*Z^-\times{\mathbb C}^k$.
We then define $\hat{a}\in\mbox{Hom}(\pi_s^*E,\,S^*2Z\times{\mathbb C}^k)$ by
\begin{equation}\label{ahat}
\hat{a}(s,v)=\left\{\begin{array}{rl}
\sigma_A(s)\cdot v,\ &\mbox{if}\ (s,v)\in S^*Z^+\times{\mathbb C}^k,\\
v,\ &\mbox{if}\ (s,v)\in S^*Z^-\times{\mathbb C}^k.
\end{array}\right.
\end{equation}
We want to show that $\hat{a}$ is the
symbol of a continuous family of pseudodifferential operators.
As any element of $\mbox{Hom}(\pi_s^*E,\,S^*2Z\times{\mathbb C}^k)$,
our $\hat{a}$ can be regarded as a family $(\hat{a}_y)_{y\in Y}$, $\hat{a}_y\in\mbox{Hom}(\pi_s^*E_y,\,S^*2Z_y\times{\mathbb C}^k)$.
It is easily checked that our
definition of $\hat{a}$ indeed mends continuously at boundary points.
But more is true.
Since $\sigma_y(A_y)$ is smooth and independent of the co-variable
near the boundary, each $\hat a_y$ is smooth.
Moreover, since we assumed in Step 1 that
$a$ is a finite sum of local elementary tensors,
we see that $\hat{a}$ is the symbol of an Atiyah-Singer family of pseudodifferential operators on $2Z$%
\footnote{Recall that they use a
slightly stricter definition of operator families:
While we here require continuity of the family with respect to the
$L^2(X)$-operator norm, they take into account the norms on the whole range of
Sobolev spaces.}.
{\em Step} 4.
Let $\iota \colon K_0(C_0(T^*2Z))\to K(B^*2Z,S^*2Z)\simeq
K(T^*2Z)$ denote the canonical isomorphism
(we refer to \cite{Bl} and mainly
\cite{Karoubi} for topological K-theory
definitions and notation).
By Definition \ref{indt}, it is enough to show that
$\iota(e_*(\beta([\sigma_A]_1)))$ is equal to the element of $K(B^*2Z,S^*2Z)$ defined by the triple $(\pi_b^*E,\,B^*2Z\times{\mathbb C}^k, \,\hat{a})$,
where $\pi_b\colon B^*2Z\to 2Z$ denotes the canonical projection.
The main step here is to understand $\beta([\sigma_A]_1)$.
Now, $\sigma_A$ can
and will be considered as a function on $S^*Z/\!\!\sim$ with values in
$Gl_k({\mathbb C})$, representing an element in $K_1(C(S^*Z/\!\!\sim))$ and at the
same time the corresponding element of the topological K-theory
$K^1(S^*Z/\!\!\sim)$, \cite[3.2]{Karoubi}. Recall from \cite[3.21]{Karoubi}
that for the pair of compact topological spaces
$S^*Z/\!\!\sim\ \subset B^*Z/\!\!\sim$,
the boundary map in topological K-theory assigns to $\sigma_A$
the relative K-class
$((B^*Z/\!\!\sim)\times {\mathbb C}^k,(B^*Z/\!\!\sim)\times{\mathbb C}^k,\sigma_A)$,
corresponding under the excision isomorphism $K((B^*Z/\!\!\sim),$ $(S^*Z/\!\!\sim))\cong
K(B^*Z,S^*Z)$ to $(B^*Z\times {\mathbb C}^k,B^*Z\times {\mathbb C}^k,\sigma_A)$, compare
\cite[2.35]{Karoubi}. Moreover, this corresponds to $\beta$ under the
isomorphism
with C$^*$-algebra K-theory.
We next have to compute the map
$e^{top}\colon K(B^*Z,S^*Z)\to K(B^*2Z,S^*2Z)$ in topological K-theory, representing
$e_*\colon K_0(C_0(T^*Z))\to K_0(C_0(T^*2Z))$.
Recall, however, that
$e^{top}(V,W,\tau)$ is given by any extension $\tilde V$ of $V$, $\tilde W$ of $W$
to $B^*2Z$ and an extension of $\tau$ to an isomorphism $\tilde \tau$ between $\tilde V$ and
$\tilde W$ on all of $(B^*2Z\setminus B^*Z)\cup S^*Z$, $\tilde \tau$ finally
restricted to $S^*2Z$. Finally, observe that
$(\pi_b^*E, B^*2Z\times {\mathbb C}^k, \hat{a})$ provides exactly such an extension (as
$\hat{a}$ extends as $id$ over all of $B^*2Z\setminus B^*Z$) and therefore
represents $\iota e_*(\beta([\sigma_A]))$, as we had to prove.
{\em Step} 5.
In order to show that the analytic indices coincide, we will introduce
yet another operator family.
Since $\sigma(A)$ is independent of the co-variable near the boundary,
there is an open set $U\subseteq 2Z$ containing $Z^-=\cup_yZ^-_y$
and a bundle isomorphism
\[
\Phi\colon E|_{U}\longrightarrow U\times{\mathbb C}^k
\]
such that the restriction of $\hat{a}$ to $\pi_s^{-1}(U)$ is equal to the pullback of $\Phi$ by $\pi_s$.
Let $(\chi_y^+)_{y\in Y}$ and $(\chi_y^-)_{y\in Y}$ be continuous families of smooth functions on $2Z$ with
$0\leq\chi_y^\pm\leq 1$, $(\chi_y^+)^2+(\chi_y^-)^2=1$.
Moreover, let the support of each $\chi^+_y$ be contained in the interior of $Z_y^+$ and $\chi_y^+\equiv 1$
outside a neighborhood of $\partial Z_y^+$ in $U$.
Then
\[
\hat{B}_y=\chi^+_y\hat{A}_y\chi^+_y+\chi^-_y\Phi_y\chi^-_y,
\]
defines a family of pseudodifferential operators in the sense of Atiyah and Singer
which has the same principal symbol -- and hence the same analytic index --
as $\hat{A}$.
For each $y\in Y$, we canonically identify the space $L^2(E_y)$ of $L^2$-sections of $E_y$ with the direct sum
$L^2(Z_y^+;{\mathbb C}^k)\oplus L^2(Z_y^-;{\mathbb C}^k)$ and denote by $e_y^\pm$ and $r_y^\pm$ the maps of extension by zero and restriction,
\[
e_y^\pm\colon L^2(Z_y^\pm;{\mathbb C}^k)\to L^2(E_y)\ \ \ \mbox{and}\ \ \ r_y^\pm\colon L^2(2Z_y;{\mathbb C}^k)\to L^2(Z_y^\pm;{\mathbb C}^k).
\]
Then $B_y=r_y^+\hat{B}_ye_y^+$ defines a continuous family
$B=(B_y)_{y\in Y}$ in $M_k({\mathfrak A})$.
As $\sigma(A)=\sigma(B)$ (and hence $\gamma(A)=\gamma(B)$), it suffices to prove that the analytic indices of $B$ and $\hat{B}$ are equal.
Proposition (2.2) of \cite{AS}, applied to the family $\hat{B}$ provides us with sections
$s_y^j\in C^\infty(2Z_y;{\mathbb C}^k)$, $y\in Y$, $1\leq j\leq q$, such that
\[
\begin{array}{rcl}
\hat{Q}_y\colon C^\infty(2Z_y;E_ y)\oplus{\mathbb C}^q&\longrightarrow&C^\infty(2Z_y;{\mathbb C}^k)\\
(u;\lambda_1,\cdots,\lambda_q)&\longmapsto&\hat{B}_y(u)+\sum_{j=1}^{q}\lambda_js^j_y
\end{array}
\]
is onto, $\ker \hat{Q}=(\ker \hat{Q}_y)_{y\in Y}$ is a vector bundle and the analytic index of $\hat{B}$ is equal to
$[\ker \hat{Q}]-[Y\times{\mathbb C}^q]$.
Now let $t_y^j=r_y^+s_y^j\in C^\infty(Z_y;{\mathbb C}^k)$.
The continuity with respect to $y$ that we
get from \cite[Proposition (2.2)]{AS} is enough to ensure that $(t_y^j)_{y\in Y}$ is a continuous section of our bundle of Hilbert spaces ${\displaystyle \bigcup_{y\in Y}L^2(Z_y;{\mathbb C}^k)}$.
We then define
\[
\begin{array}{rcl}
Q_y\colon L^2(Z_y;{\mathbb C}^k)\oplus{\mathbb C}^q&\longrightarrow&L^2(Z_y;{\mathbb C}^k)\\
(u;\lambda_1,\cdots,\lambda_q)&\longmapsto&B_y(u)+\sum_{j=1}^{q}\lambda_jt^j_y
\end{array}
\]
Since $B_y$ is elliptic, $\ker Q_y\subset C^\infty(Z_y;{\mathbb C}^k)$. Using that $\Phi_y$ is local, it is straightforward to check that
\
\hat{B}_y= e_y^+r_y^+\hat{B}_ye_y^+r_y^+ +e_y^-r_y^-\hat{B}_ye_y^-r_y^-=e_y^+B_yr_y^++e_y^-r_y^-\Phi_ye_y^-r_y^-
\
and, hence, $\ker Q_y$ and $\ker\hat{Q}_y$ are isomorphic for each $y$ (because $\Phi$ is an isomorphism). Moreover, $Q_y$ is also surjective: Given $v\in L^2(Z_y;{\mathbb C}^k)$, if $u\in L^2(2Z_y;E_y)$ is a preimage of $e^+_yv$ under $\hat{Q}_y$, then
$r^+_yu$ is a preimage of $v$ under $Q_y$. Hence the analytic index of $B$ is given by $[\ker Q]-[Y\times{\mathbb C}^q]$. The bundles
$\ker Q=(\ker Q_y)_{y\in Y}$ and $\ker\hat{Q}$ are isomorphic and then
\[
\ind_a(B)=[\ker Q]-[Y\times {\mathbb C}^q]=[\ker\hat Q]-[Y\times{\mathbb C}^q]=\ind_a(\hat{B}),
\]
as we wanted.
\hfill$\Box$
\section{Nontrivial bundles}\label{sec:non-trivial_bundles}
In this section we discuss families of Boutet de Monvel operators acting between vector bundles. The case considered
in the first two sections correspond to the case of trivial bundles over the manifolds and the zero bundle over
the boundary.
In addition to the data assumed up to this point (a bundle of manifolds
$\pi\colon Z\to Y$ with fiber $X$), we take smooth vector bundles $E$ and $F$
over $X$ and $\partial X$, respectively.
Let $\mbox{Diff}(\partial X,F)$ denote the group of diffeomorphisms of $F$ which map fibers to fibers linearly,
and let $G_E$ denote the group of diffeomorphisms of $E$ which map fibers to fibers linearly and
whose restrictions to the base belong to the group $G$ defined on page~\pageref{defG}.
We equip $\mbox{Diff}(\partial X,F)$ with its canonical
topology \cite[page 123]{AS} and do a similar construction for $G_E$. Note
that there are homomorphisms ``forget the action in the fiber''
$h_\partial\colon \mbox{Diff}(\partial X,F)\to \mbox{Diff}(\partial X)$ and $h\colon
G_E\to G$. Define the fiber product group
\begin{equation*}
G_r:=\{(\phi,\psi)\in
\mbox{Diff}(\partial X,F)\times G_E\mid h_\partial(\phi)=h(\psi)\}.
\end{equation*}
Let $(p\colon
\tilde E\to Z;\; q\colon \tilde F\to\partial Z)$ be maps such that $(\pi\circ
p\colon \tilde E\to Y; \; \pi_\partial\circ q\colon \tilde F\to Y)$ are
bundles with, respectively,
fibers $E$ and $F$ and structure group $G_r$. It follows
that, for each pair of local
trivializations $(\alpha,\beta)$ of $(\pi\circ p\colon \tilde E\to Y;\; F\to
Y)$
there are local trivialization $\alpha_0$ of $\pi\colon Z\to Y$ and $\beta_0$
of $\partial Z\to Y$ such that the diagram
\begin{equation}
\label{hyp}
\def\mapup#1{\Big\uparrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\def\mapdn#1{\Big\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle#1$}}$}}
\begin{array}{ccc}
(\pi\circ p)^{-1}(U)&{\mathop{\longrightarrow}\limits^{\alpha}}&U\times E\\
\mapdn{p}&&\Big\downarrow\\
\pi^{-1}(U)&{\mathop{\longrightarrow}\limits^{\alpha_0}}&U\times X
\end{array}
\end{equation}
commutes, where the right vertical arrow is the identity on $U$ times the bundle projection on $E$.
This defines a vector bundle structure for $p\colon \tilde E\to Z$. Moreover, for each $y\in Y$, the
restriction of $p$ to $\tilde E_y=(\pi\circ p)^{-1}(y)$
defines a smooth vector bundle $p_y\colon \tilde E_y\to Z_y$, isomorphic to
$E\to X$. We obtain the corresponding result for the the map
$q$ and get a vector bundle $q\colon \tilde F\to \partial Z$ and,
for each $y\in Y$, a smooth vector bundle $q_y\colon \tilde F_y\to\partial Z_y$ isomorphic to $F\to \partial X$.
Choose now, in addition to the family of Riemannian metrics $(g_y)_{y\in Y}$
families of Hermitean metrics on $E_y$ and $F_y$ which depend continuously on
$y\in Y$. Using them, we get families of Hilbert spaces $H_y:=
L^2(Z_y;E_y)\oplus L^2(\partial Z_y; F_y)$ which patch together to a bundle of
Hilbert spaces. Let ${\mathcal A}(E,F)_y$ denote the C$^*$-subalgebra of the algebra of all bounded operators on
$H_y$ generated by the polyhomogeneous Boutet de Monvel operators of
order and class zero.
Exactly as \cite[Proposition 1.3]{AS} our Proposition \ref{prop:conj_is_cont}
generalizes to the case of non-trivial bundles and their diffeomorphisms and
is the basis for the generalization of Corollary
\ref{corol:bundle_of_C_algebras} to the case of non-trivial bundles: the
${\mathcal A}(E,F)_y$ form in a canonical way a continuous bundle of C$^*$-algebras,
which we continue to call $\aleph$ by abuse of notation.
Let $\mathfrak{A}$ denote the set of continuous sections of the bundle
$\aleph$, forming again a C$^*$-algebra with pointwise operations and supremum
norm. The K-theory results of Section~\ref{sec1}\ can be extended to this
more general setting using arguments similar to those used in \cite{MSS}. In
particular, the analytic and topological index
given in Section~\ref{index} can also be defined as maps $K_1({\mathfrak A})\to
K(Y)$. Theorem~\ref{indthm} then extends to this more general setting.
\begin{remark}
Variants of Theorem \ref{indthm}, the family index theorem for the Boutet
de Monvel algebra
for real K-theory or for equivariant K-theory should hold as
well, and one should be able to derive them along the lines used in the
present article.
\end{remark}
\begin{appendix}
\section{Reduction of the structure group}\label{sec:structgroup}
Let, as in the main body of the text, $X$ be a compact smooth manifold with
boundary $\partial X$, and fix a collar diffeomorphism $\delta\colon U\to \partial X\times
[0,1)$ with collar coordinate $x_n$. Recall that $G$ was
defined as the subgroup of the diffeomorphism group $\mbox{Diff}(X)$ of those
diffeomorphisms which respect the product structure and collar coordinate for
$x_n\in [0,1/2)$. For convenience, in the
text we were working with bundles of
manifolds modelled on $X$ and with structure group $G$, i.e.~with a
canonically defined collar of the boundary in each fiber of the bundle.
In this appendix, we prove that, for any bundle (over a paracompact space) with structure group
$\mbox{Diff}(X)$ we have a unique (up to isomorphism) reduction to the structure
group $G$. In other words, the functor from bundles (over a given paracompact
base) with structure group $G$
to bundles with structure group $\mbox{Diff}(X)$ which ``forgets the collar'' is an equivalence of categories. [This is similar to the (unique
up to isomorphism) choice of a
Riemannian metric on a given finite dimensional vector bundle: reduction of
the structure group from $Gl(n)$ to $O(n)$.]
It is well known that we get this unique reduction of structure group if the
inclusion $G\to
\mbox{Diff}(X)$ is a homotopy equivalence, compare \cite{Do} for a rather refined
version of this fact. We therefore show
\begin{theorem}\label{theo:red_to_G}
The inclusion $G\to \mbox{Diff}(X)$ (and therefore the corresponding map $BG\to
B\mbox{Diff}(X)$)
are homotopy equivalences.
\end{theorem}
\begin{proof}
Observe first that $G$ and $\mbox{Diff}(X)$ as well as $BG$ and $B\mbox{Diff}(X)$ are
paracompact
Fr\'echet manifolds by \cite[Sections 41, 42, 44.21]{KrieglMichor} (the
reference is for $\mbox{Diff}(X)$, but the proofs easily generalize to
$G$). Therefore it
suffices by \cite[Theorem 15]{Palais} to
show that $G\to \mbox{Diff}(X)$ is a weak homotopy equivalence and it follows
automatically that it is a homotopy equivalence.
To show that the map is a weak homotopy equivalence, we have for a
continuous map $f\colon K\to\mbox{Diff}(X)$, where $K$ is a compact CW-complex, to
construct a homotopy $f_s$ from $f_0=f$ to an $f_1$ which takes values in
$G$. Moreover, the homotopy should be constant on every CW-subcomplex $K_0$
of $K$ where $f$ already maps to $G$. Note that $K_0$ is a deformation
retract of a neighborhood $U$, i.e.~there is a homotopy $h
\colon K\times [0,1]\to K$ from the identiy to $h_1$ such that $h_1(U)=K_0$ and
such that $h_t$ is the identity on $K_0$. Be precomposing with $h_1$ we can
therefore assume that $f$ maps the neighbourhood $U$ of $K_0$ to $G$.
Let us now construct the family $f_t$.
Choose $\eta\in (0,1]$ such that $\tilde f(k)=\delta\circ
f(k)\circ\delta^{-1}$ maps $\partial X\times[0,\eta)$ to $\partial X\times
[0,1)$ for all $k\in K$ and write $\tilde
f(k)(x^\prime,t)=(\varphi(x^\prime,t;k),\tau(x^\prime,t;k))$.
In two steps we shall now first deform $\tau$ to a function $\hat\tau$ which
equals $t$ for small $t$ and then $\varphi$ to a function which depends only
on $x'$ for small $t$.
Observe that, as $f(k)$ is a diffeomorphism of a manifold with boundary,
$\frac{\partial \tau}{\partial t}>0$ and therefore,
by the compactness of $K$, if we choose $\eta$ small enough,
$C>\frac{\partial \tau}{\partial t}>c>0$ for some $C>c>0$ on all of
$K\times \partial X\times [0,\eta)$.
Pick a smooth function $a\colon [0,\eta)\to [0,1]$ such that $a(t) \equiv
0$ for $t$ close to zero, $a(t)\equiv1$ for $t$ close to $\eta$ and such
that
$$\hat \tau(x',t;k)= (1-a(t))t+a(t)\tau(x',t;k),
\quad (x',t)\in\partial X\times[0,\eta), $$
satisfies $\partial \hat \tau(x',t;k)/\partial t\ge c/2$ for every $x'\in\partial X$ end every $k\in K$. To construct
such an $a$, we use the uniform growth of $\tau$: Choose, for some given
$\varepsilon>0$, the function $a$ so that $(1-a)t$ is monotonely increasing
on the interval $[0,4\varepsilon]$ with $(1-a)t=t$ on $[0, \varepsilon]$ and
$(1-a)t=2\varepsilon$ on $[3\varepsilon,4\varepsilon]$. Then $a$ is
necessarily increasing with $a\equiv 0$ near $0$ and $a(4\varepsilon)=1/2$.
Moreover, $\hat\tau$ is strictly increasing as $\tau$ is. Finally choose
$a$ on $[4\varepsilon, \eta]$ such that $(1-a)t$ monotonely decreases to $0$
and equals zero on $[\eta-\varepsilon, \eta]$. Moreover, we arrange for the
derivative $\partial_t((1-a)t)$ to be always $\ge
-2\frac{2\varepsilon}{\eta-5\varepsilon}$. Again, $a$ is necessarily
increasing with $a\equiv 1$ near $\eta$. The derivative $\partial_t(a\tau)$
can therefore be estimated from below by $c/2$. For $\varepsilon$
sufficiently small, we will therefore have
$2\frac{2\varepsilon}{\eta-5\varepsilon}<c $ and thus
$\partial_t\hat\tau(x',t;k)>0$ for all $x',t,k$.
Note that
then $\hat\tau(x',t;k) = t $ for $t$ close to zero, and $\hat
\tau(x',t;k) =
\tau(x',t;k)$ for $t$ close to $\eta$, uniformly in $k$.
We
then let
$$\tau_s= s\hat\tau+(1-s) \tau,\quad 0\le s\le 1.$$
Then $\frac{\partial \tau_s}{\partial t}\ge c/2$ on $K\times \partial X\times
[0,\eta)$.
For the second step fix a smooth function $\rho\colon [0,1)\to [0,1)$
with $\rho(t) =0$ for $t<\varepsilon$ and $\rho(t) = t$ for
$t>1-\varepsilon$. Next choose a smooth family of smooth functions
$\rho_s$, $0\le s\le1$ such that $\rho_0$ is the identity and
$\rho_1=\rho$. By compactness, we have a uniform bound $|d\rho_s(t)/dt|\le R$.
For a given $\eta>0$, define $\rho^\eta_s(t)\colon [0,\eta)\to [0,\eta);
t\mapsto \eta\rho_s(\eta^{-1} t)$. Then still $|d\rho_s^\eta/dt|\le R$, even
independently of $\eta$.
Let $\varphi_s(x',t):=\varphi(x',\rho^\eta_s(t))$ and $\tilde f_s(k)(x^\prime,t)=
(\varphi_s(x^\prime,t),\tau_s(t))$. Then $ \tilde f_s$ equals the given
$\tilde f$ for $t$ close to $\eta$. Therefore $f_s= \delta^{-1}\circ \tilde
f_s\circ \delta$ extends (independently of $s$) to a self-map of
$X$. Moreover, $|{\frac{\partial\tau_s}{\partial x'}}|\le
|{\frac{\partial \tau}{\partial x'}}|$ for all $s$. And for $t=0$ we have
$\frac{\partial \tau}{\partial x'}=0$.
On the other hand, $\frac{\partial \rho_s}{\partial
x'}|_{(x',t)}=\frac{\partial \rho}{\partial x'}|_{(x',\rho_s(t))}$ is, for
$\eta$ small enough,
invertible on $[0,\eta]$ with uniform bound on the norm of the inverse (and
with better bounds if we choose $\eta$ smaller), and $|\frac{\partial
\phi_s}{\partial t}(x',t)|= |\frac{\partial
\phi}{\partial t}_{(x',\rho_s(t))}|\cdot |d\rho^\eta_s/dt(t)|$ which is uniformly
bounded, independent of $\eta$.
By choosing $\eta$ small enough,
therefore $\partial\tau_s$ will be linearly independent from $\partial\varphi(x',\rho_s(t))$
and so $f_s(k)$ is a submersion for all $s,k$.
We check that we actually constructed diffeomorphisms. We made our
construction such that all the maps $f_s(k)$ are submersions which map the
boundary to itself, therefore the image is an open subset of $X$. As $X$ is
compact, the image is also closed, and the map being a local diffeomorphism,
is a covering map. Because it is homotopic to the diffeomorphism $f(k)$, it is
a trivial covering map and therefore a diffeomorphism.
It is obvious that $f_0=f$ and $f_1(k)$ lies in the variant of $G$ where $1/2$
is replaced by $\eta-\epsilon$.
Next, we compose
with a family of reparametrizations of the collar $[0,1)$ which stretches
$[0,\eta-\epsilon)$
to $[0,1/2)$ such that in the end we really map to $G$. Note that our
construction is carried out in such a way that for $k\in U$, where $f(k)$ was
already in $G$, $f_s(k)\in G$ for all $s$, although, because of the last
reparametrization step, not necessarily $f_s(k)=f(k)$.
Therefore, finally, we choose a function $\beta\colon K\to [0,1]$ which is $1$ outside
$U$ and $0$ on $K_0$ and replace the homotopy $f_s(k)$ with $f_{\beta(k)s}(k)$.
This yields the desired homotopy from $f_0=f$ to an $f_1$ taking values in
$G$. Moreover, the mapping is constant on $K_0$.
\end{proof}
\section{The K\"unneth formula}\label{sec:kunneth}
By the ``K\"unneth formula'', we mean the following theorem of Schochet
\cite{S}:
\begin{thm} \label{KS} Let A and B be C$^*$-algebras with $A$ in the smallest
subcategory of the category of separable nuclear C$^*$-algebras which
contains the separable Type I algebras and is closed under the operations of
taking ideals, quotients, extensions, inductive limits, stable isomorphism,
and crossed product by ${\mathbb Z}$ and by ${\mathbb R}$. Then there is a natural
${\mathbb Z}/2$-graded exact sequence
\begin{equation}\label{KAS}
0\to K_*(A)\otimes K_*(B)\to K_*(A\otimes B)\to\mbox{{\em Tor}}(K_*(A),K_*(B))\to 0.
\end{equation}
\end{thm}
We use this Theorem to prove a statement made in the proof of Theorem
\ref{biso}:
\begin{pro}\label{kun} $b_{U*}\colon K_i(C_0(\partial Z_U))
\to K_i(\im \gamma_U)$ is an isomorphism, $i=0,1$.
\end{pro}
{\em Proof}: Let $A=C_0(U)$
and $B=C(\partial X)$. Then $\im \gamma_U$
is equal to $A\otimes C$, where $C$ is the image of the boundary principal
symbol for the single manifold $X$. As explained in the Introduction of
\cite{MNS}, $C$ can be regarded as a C$^*$-subalgebra of $C(S^*\partial
X)\otimes\mathcal{T}$, where $\mathcal{T}$ denotes the Toeplitz algebra.
Since $\mathcal{T}$ belongs to the category defined in the statement of
Theorem \ref{KS} (see Examples 5.6.4 and 6.5.1 in \cite{M}), we may apply
Schochet's theorem for $A\otimes B$ and for $A\otimes C$.
Now let ${\tt b}\colon C(\partial X)\to C$ be the map analogous to the map $b$
defined right before the statement of Theorem~\ref{biso}. In
\cite[Section 3]{MNS}, it is proven that ${\tt b}$ induces a
K-theory isomorphism ({\tt b} was denoted $b$ in \cite{MNS,MSS}). Using that
the exact sequence of Theorem~\ref{KS} is natural, we can map (\ref{KAS}) to
the corresponding sequence obtained by replacing $B$ with $C$. Since the maps
induced by ${\tt b}$ are isomorphisms, it follows from the five-lemma that the
maps induced by $b_U=\mbox{id}_A\otimes{\tt b}$ are also isomorphisms. \hfill$\Box$
\end{appendix}
\section*{Acknowledgements}
We greatly benefited from numerous discussions with our friends
Johannes Aastrup and Daniel Tausk.
We thank them for their generosity and for the great time we had talking Math to them. We are also grateful to Jochen Ditsche for pointing out Proposition
\ref{ditsche} to us.
Severino Melo was partially supported by a grant from the Brazilian agency
CNPq (Processo 304783/2009-9). Thomas Schick was partially supported by the
Courant Center ``Higher order structures of mathematics'' within the
Excellence initiative's Institutional strategy of Georg-August-Universit\"at G\"ottingen.
|
1,314,259,994,984 | arxiv | \section{Introduction}
\label{sec:Intro}
Recently, the discovery of an unusual supernova (SN), iPTF14hls, was
reported by \cite{Arcavi17}. iPTF14hls, at a redshift of $z=0.0344$, was
first discovered in $R$ band on September 22, 2014 UT \citep{Arcavi17}.
Before its discovery, the position of iPTF14hls was not monitored for
approximately 100 days. At beginning, astronomers did not pay much attention
to iPTF14hls during its decline in brightness. Intense multiband
observations were deployed only when iPTF14hls began to rebrighten after
about 100 days since its discovery.
Although identified as a type II-P SN according to its spectroscopic
features \citep{Li15}, iPTF14hls is very unique among currently discovered
SNe. The light curve of iPTF14hls lasts for more than 1200 days %
\citep{Sollerman18} and has at least five distinct peaks, while an ordinary
type II-P SN has a 100-day plateau in brightness. The spectral evolution of
iPTF14hls is 10 times slower than typical SNe II-P \citep{Arcavi17}. The
photospheric velocities measured by Fe \textsc{ii }$\lambda $5169 stay at a
constant value of $4000\unit{km}\unit{s}^{-1}$.
\cite{Arcavi17} discussed several possible theoretical models, e.g.,
interaction between SN ejecta and circumstellar material
\citep{Chevalier82,
Chevalier94, Chugai94, Chatzopoulos12, Moriya13, WangLiu2016, Wang18},
spin-down of a magnetar \citep{Kasen10, Woosley10}, fallback accretion onto
a black hole \citep{Michel88, Dexter13}, and suggested that the most likely
model may be fallback accretion. However, \cite{Dessart18} proposed that the
magnetar model can fit the light curve, while \cite{Soker18} explained
iPTF14hls as a common-envelope jets SN. \cite{Chugai18} and \citet{Woosley18}
discussed the models that might explain the light curve and spectral
features.
Here we suggest that the multiple peaks in the light curve of iPTF14hls
could be powered by intermittent fallback accretion of the SN ejecta. In a
successful SN, the material remaining bound could fallback and eventually
accrete onto the central object. Accretions onto compact objects (black
holes or neutron stars) are usually accompanied by powerful outflows
\citep{Mirabel98,
Fender04}, which can carry away about 10\% of the gravitational binding
energy of the accreted material. Such powerful outflows can aid the
explosion of the SN, and on the other hand, a fraction of this energy would
be thermalized to power a bright light curve \citep{Dexter13}.
This paper is structured as follows. In Section \ref{sec:model} we describe
the fallback accretion process, while in Section \ref{sec:fit} the model and
fitting results are presented. Finally, we discuss and conclude our results
in Section \ref{sec:conc}.
\section{Fallback accretion}
\label{sec:model}
After the explosion of a core-collapse SN, a rebounce outward shock is
launched at the base of the central compact core, which further collapses
into a neutron star or black hole. This shock imparts a typical kinetic
energy of $\simeq 10^{51}\unit{erg}$ to the still-infalling material and
reverses it to move outward. The outward-moving material (ejecta) adjusts
itself quickly into a homologous expansion phase, that is, the expansion
velocity $v$ of a material element is proportional to its distance $r$ to
the central compact object. Although most of the ejecta becomes unbound to
the central compact object, a fraction of the ejecta with mass $M_{0}$ is
bound and finally falls back \citep{Colgate71} and accretes onto the central
compact object. Based on some arguments presented in Section \ref{sec:conc},
hereafter we assume that the remnant of iPTF14hls is a neutron star.
The material accreted at early times comes from the slowly moving inner
ejecta. Assuming a power-law density profile of the inner shell of the
progenitor star $\rho \left( r\right) =\rho _{0}\left( r/r_{0}\right)
^{\alpha -3}$, where $\rho _{0}$ is the density of the shell at radius $%
r_{0} $, the fallback accretion rate is
\citep[for $0<\alpha<3$;][]{Quataert12,
Dexter13}
\begin{equation}
\dot{M}=\frac{8\pi }{3-\alpha }\frac{\rho _{0}r_{0}^{3}}{t_{0}}\left( \frac{t%
}{t_{0}}\right) ^{\frac{3\left( \alpha -1\right) }{3-\alpha }}.
\end{equation}%
This accretion rate is usually rising because typically $1<\alpha <3$ for
inner shells. Here $t_{0}$ is defined as\footnote{%
Note that this definition of $t_{0}$ is different from that given in \cite%
{Dexter13} by an extra factor $\sqrt{\pi \alpha /8}$.}%
\begin{equation}
t_{0}=\left( \frac{\pi \alpha }{32G\rho _{0}}\right) ^{1/2}.
\end{equation}%
This accretion phase will transition to a long-term accretion phase when the
expansion velocity $v$ of the bound material is comparable to the escape
velocity $v_{\mathrm{esc}}$. In this case the material can reach a maximum
radius $r_{\max }=r_{0}\left( 1-v^{2}/v_{\mathrm{esc}}^{2}\right) ^{-1}$ %
\citep{Dexter13} and then falls back with a free-fall timescale $t_{\mathrm{%
ff}}$ \citep{Michel88}%
\begin{equation}
\frac{v_{\mathrm{esc}}^{2}-v^{2}}{v_{\mathrm{esc}}^{2}}=\left( \frac{t_{%
\func{col}}}{t_{\mathrm{ff}}}\right) ^{2/3},
\end{equation}%
where $t_{\func{col}}$ is the free-fall collapse time to form $M_{0}$ from
material at rest. Assuming a \emph{constant} density profile, the accretion
rate decays according to $t^{-5/3}$ \citep{Michel88}.
At very late phase, instead of a constant profile, the density may be a
steep power law, $\rho \left( r\right) =\rho _{0}\left( r/r_{0}\right)
^{\alpha -3}$ with $\alpha <0$, the enclosed mass is effectively constant,
and the accretion rate is \citep{Dexter13}%
\begin{equation}
\dot{M}=\frac{8\pi }{3}\frac{\rho _{0}r_{0}^{3}}{t_{1}}\left( \frac{t}{t_{1}}%
\right) ^{\left( 2\alpha -3\right) /3},
\end{equation}%
where%
\begin{equation}
t_{1}\equiv \pi \left( \frac{r_{0}^{3}}{8GM}\right) ^{1/2}.
\end{equation}%
Because the early rising phase in the light curve of iPTF14hls is missing,
we will model the light curve only by the $t^{-5/3}$ law and at very late
phase $t^{\left( 2\alpha -3\right) /3}$ law with some $\alpha <0$.
Assuming a spherical accretion, \cite{Chevalier89} and \cite{Houck91}
studied the structure of the accretion flow that may operate in the famous
SN 1987A. To power an SN like iPTF14hls by accretion, the accretion rate
(see Section \ref{sec:fit}) should be high (in the range $10^{-4}\lesssim
\dot{M}_{\mathrm{ac}}\lesssim 10^{4}M_{\odot }\unit{yr}^{-1}$) and the
gravitational accretion energy is carried away by neutrinos produced near
the neutron star \citep{Chevalier89, Houck91}. However, this does not mean
that the photons in the accretion flow cannot heat the ejecta.
To determine whether the radiation advected with the accretion flow is able
to diffuse out, the trapping radius
\citep{Katz77, Begelman78,
Flammang82, Blondin86}%
\begin{equation}
r_{\mathrm{tr}}=\frac{\dot{M}_{\mathrm{ac}}\kappa }{4\pi c}=5.5\times
10^{13}\left( \frac{\dot{M}_{\mathrm{ac}}}{M_{\odot }\unit{yr}^{-1}}\right)
\left( \frac{\kappa }{0.33\unit{cm}^{2}\unit{g}^{-1}}\right) \unit{cm}
\end{equation}%
is defined at which the inwardly advected radiation flux balances the
outward diffusion flux. Photons outside this radius can diffuse out and heat
the ejecta, while the photons inside this radius are trapped. Because
iPTF14hls is hydrogen-rich, here we take the electron Thomson scattering
opacity $\kappa =0.33\unit{cm}^{2}\unit{g}^{-1}$ %
\citep[e.g.,][]{Moriya11,Chatzopoulos12}, which is suitable for fully
ionized material with solar metallicity.
The inner regions of the accretion flow achieve supersonic free fall %
\citep{Chevalier89}, which, upon reaching the neutron star surface,
generates a strong shock moving outward. The energy is mainly stored inside
but close to the shock radius. The shock radius $r_{s}$\ is determined by
neutrino cooling efficiency. Photons inside $r_{s}$\ act as potential
heating source of the SN. Whether the photons inside $r_{s}$\ can diffuse
out depends on if the condition $r_{s}>r_{\mathrm{tr}}$\ is satisfied. Here
we simply assume that this condition is satisfied and leave the
justification in Section \ref{sec:conc}.
At the accretion rate mentioned above, the accretion is super-Eddington.
During accretion the infalling material is compressed and becomes hot and
geometrically thick because of the inability of the advected photons to
escape from the accretion flow. As a result, the accretion is accompanied by
powerful outflow \citep{Narayan94, Blandford99, Igumenshchev00, McKinney12},
as verified by the observation of ultra-relativistic outflow from a neutron
star accreting gas from a companion \citep{Fender04}. Usually the accretion
rate is assumed to be a power-law in radius $\dot{M}\left( r\right) =\dot{M}%
_{\mathrm{fb}}\left( r/r_{\mathrm{fb}}\right) ^{s}$
\citep[e.g.,][]{Kohri05,
Dexter13}, where $\dot{M}_{\mathrm{fb}}$ is the mass accretion rate at the
fallback radius $r_{\mathrm{fb}}$, and $0<s<1$. It should be stressed that $%
\dot{M}_{\mathrm{fb}}$ is not the mass accretion rate onto the compact
object because a large fraction of the accretion flow is channeled as an
outflow. The net accretion rate $\dot{M}_{\mathrm{ac}}$ onto the neutron
star is usually only $\sim 1\%$ of $\dot{M}_{\mathrm{fb}}$, namely $\dot{M}_{%
\mathrm{ac}}=\xi \dot{M}_{\mathrm{fb}}$\ with $\xi \simeq 0.01$. About 10\%
of the accreted matter is converted as radiation energy. Consequently, the
accretion energy rate is $\dot{E}_{w}=\epsilon \dot{M}_{\mathrm{fb}}c^{2}$
with $\epsilon \simeq 10^{-3}$ \citep{Dexter13}.
Because of the existence of powerful outflows, the accretion cannot be
strictly spherical. The aspherical accretion may be induced by the spiral
modes of the standing accretion shock instability
\citep[SASI;][]{Burrows95,
Janka96, Blondin03, Marek09, Fernandez10} or the convection in the
pre-collapse envelope \citep{Gilkis14}. Recently, the jet-feedback mechanism %
\citep{Gilkis16} based on SASI is suggested to carry out the accretion
energy. In this scenario the energy may be carried out by jets accreted near
the equatorial plane, as indicated by observations \citep{Fender04}.
The fallback mass is a function of the compactness of the progenitor stars
and explosion energy \citep{Chevalier89, Zhang08}. For loose progenitors
with typical explosion energies $\sim 10^{51}\unit{erg}$, like red
supergiants (RSGs), the fallback mass is usually small, $\lesssim
0.1M_{\odot }$. However, for more compact progenitors, e.g., blue
supergiants, the H/He interface triggers the formation of a strong reverse
shock, which decelerates the ejecta and enhances the fallback mass
significantly. For weak explosions, most of the mass may fall back %
\citep{Moriya10}. The metallicity of the progenitor stars influences the
mass loss history before explosion and therefore is another factor that
impacts the fallback mass. As a result, fallback accretion influences the
final mass of the central compact objects. For population III (zero
metallicity) stars above $25M_{\odot }$ and explosion energies less than $%
1.5\times 10^{51}\unit{erg}$, the central compact objects are more likely
black holes \citep{Zhang08} because of large amount of fallback. For
population I (solar metallicity) stars, black hole production is much less
frequent because of large scale mass loss before explosion.
\section{The model and fitting results}
\label{sec:fit}
The accretion outflows not only heat the SN ejecta, but also accelerate the
ejecta. We use the method outlined in \cite{WangWang16} to calculate the
light curve and the evolution of the photospheric velocities. In this model
the photospheric radius is at the position outside of which the optical
depth is equal to $2/3$ \citep{WangWang16}. The acceleration of the ejecta
by the energy injection has been taken into account by this model, which\
assumes a homologous expansion of the SN ejecta, with a homogeneous density
distribution. The energy injection from the energy sources, which may be a
spinning-down magnetar, $^{56}$Ni cascade decay, or fallback accretion, will
be trapped by the ejecta. The trapped energy undergoes adiabatic expansion,
which accelerates the ejecta according to the following equation %
\citep{WangWang16}%
\begin{equation}
\frac{dE_{K}}{dt}=L_{\mathrm{inp}}-L_{e},
\end{equation}%
where $L_{\mathrm{inp}}$\ is the power trapped by the ejecta, $L_{e}$\ is
the SN luminosity, and $E_{K}$\ is the kinetic energy of the SN. The
expansion velocity $v_{\mathrm{sc}}$\ (which is approximately equal to the
observed photospheric velocity for massive ejecta at early epoch) is
calculated according to $E_{K}=3M_{\mathrm{ej}}v_{\mathrm{sc}}^{2}/10$\ %
\citep{Arnett82}, where $M_{\mathrm{ej}}$\ is the ejecta mass. A part of the
trapped energy diffuses out of the ejecta, resulting in the multiband
optical emission of the SN.
To account for the multiple peaks in the light curve of iPTF14hls, we
propose that the accretion is episodic. Such episodes are not rare in
astrophysics. For example, episodic accretion may be caused by instabilities
of disks around protostars \citep{Sakurai16,
Kuffmeier18}, or by knotty jets in young protostellar disks %
\citep{Vorobyov18}.
For fallback accretion, the energy input is%
\begin{equation}
L_{\mathrm{inp}}=\dot{E}_{w}=\epsilon \dot{M}_{\mathrm{fb}}\left( t\right)
c^{2}, \label{eq:fallback-power}
\end{equation}%
where $\dot{M}_{\mathrm{fb}}\left( t\right) $ takes the expression%
\begin{equation}
\dot{M}_{\mathrm{fb}}\left( t\right) =\dot{M}_{i}\left( t/t_{i}\right)
^{-5/3}
\end{equation}%
during the constant density accretion phase and%
\begin{equation}
\dot{M}_{\mathrm{fb}}\left( t\right) =\dot{M}_{i}\left( t/t_{i}\right)
^{-\left( 2\alpha -3\right) /3}
\end{equation}%
for the final power-law density accretion. Here $\dot{M}_{i}$ is the mass
fallback rate at time $t_{i}$ when the $i$th fallback episode begins. To
calculate the light curve of an SN powered by fallback accretion, the energy
input given by Equation $\left( \ref{eq:fallback-power}\right) $\ takes the
place of the magnetar spinning-down power in the case of a magnetar-powered
SN. In both the magnetar-powered case and the accretion-powered case, the
energy is assumed to be deposited at the center of the SN ejecta. The
photospheric emission is a result of photon diffusion.
In this work we use the bolometric luminosity data of iPTF14hls provided by
\cite{Sollerman18} who extended the observation to more than 1200 days since
discovery. We neglect the possible contribution of $^{56}$Ni and $^{56}$Co
to the light curve of iPTF14hls. The SN explosion would have surely
synthesized some amount of $^{56}$Ni. However, because of the finite
lifetimes of $^{56}$Ni ($8.8\unit{days}$) and $^{56}$Co ($111.3\unit{days}$%
), such contribution is only limited to the first $\sim 100\unit{days}$
since explosion, which were largely missed by the observation.
The fitting results (solid lines), including the light curve and
photospheric velocity evolution, are shown in Figure \ref{fig:lc-v}, with
the 19 fitting parameters listed in Table \ref{tbl:para}. It can be found
that the fallback rates listed in Table \ref{tbl:para} are similar to that
given by \cite{Moriya18}, who interpret OGLE-2014-SN-073 as a fallback
accretion powered type II supernova. To give a decent fit to the light
curve, eight episodes are needed. In Figure \ref{fig:lc-v} we mark $t_{i}$
as vertical blue ticks. It is found that the first seven accretion episodes
can be fit by the $t^{-5/3}$\ law, whereas the last episode ($t_{8}$\ and $%
\dot{M}_{8}$) can only be fit by a steep decay with a density power-law
index $\alpha \simeq -22$. With this $\alpha $, the late-time light curve
decay index is $\sim -15.6$, slightly steeper than that measured by \cite%
{Sollerman18}, who gave a decay index $-13.5$.
\begin{table*}[tbph]
\caption{Best-fitting parameters.}
\label{tbl:para}
\begin{center}
\begin{tabular}{ccccccccccccccccccc}
\hline\hline
$M_{\mathrm{ej}}$ & $v_{\mathrm{sc}0}$ & $\dot{M}_{1}$ & $t_{1}$ & $\dot{M}%
_{2}$ & $t_{2}$ & $\dot{M}_{3}$ & $t_{3}$ & $\dot{M}_{4}$ & $t_{4}$ & $\dot{M%
}_{5}$ & $t_{5}$ & $\dot{M}_{6}$ & $t_{6}$ & $\dot{M}_{7}$ & $t_{7}$ & $\dot{%
M}_{8}$ & $t_{8}$ & $\alpha $ \\ \hline
21 & 4200 & 4.9 & 20 & 0.75 & 186 & 0.43 & 321 & 0.5 & 380 & 0.4 & 494 & 0.24
& 550 & 0.065 & 832 & 0.043 & 1078 & $-22$ \\ \hline
\end{tabular}%
\end{center}
\par
\textbf{Notes.} $M_{\mathrm{ej}}$ and $v_{\mathrm{sc}0}$ are in units of $%
M_{\odot }$ and $\unit{km}\unit{s}^{-1}$, respectively. The accretion rates $%
\dot{M}_{i}$ at fallback radius are in units of $10^{-8}M_{\odot }\unit{s}%
^{-1}$, while $t_{i}$ are in units of $\unit{days}$ since SN explosion. In
this fit we fixed $\kappa =0.33\unit{cm}^{2}\unit{g}^{-1}$. Because of the
lack of observational data between the third and fourth peaks, $t_{4}$
cannot be accurately constrained, so is $t_{1}$ because of the missing of
observational data around the first peak. The first seven accretion episodes
can be fit by the $t^{-5/3}$ law, whereas the last episode ($t_{8}$ and $%
\dot{M}_{8}$) can only be fit by a steep decay with a density power-law
index $\alpha $.
\end{table*}
The explosion date is $\sim 120\unit{days}$ before the first observational
data point. In Table \ref{tbl:para} $v_{\mathrm{sc}0}$\ is the initial
expansion velocity of the surface of the ejecta. Assuming homologous
expansion of the ejecta, the initial explosion energy of this SN is $3M_{%
\mathrm{ej}}v_{\mathrm{sc}0}^{2}/10=2.2\times 10^{51}\unit{erg}$. This
energy can be attributed to neutrino-driven mechanism, which may drive an
explosion up to energy $\sim 2.5\times 10^{51}\unit{erg}$\
\citep{Janka16,
Bollig17}.
\begin{figure}[tbph]
\centering\includegraphics[width=0.5\textwidth,angle=0]{iPTF14hls-lc-v.pdf}
\caption{Light curve (top panel) and photospheric velocity (bottom panel) of
iPTF14hls reproduced by the fallback accretion model (solid lines). The
dashed lines are the modeling results including the energy injection from an
outburst. The red dashed line in the upper panel assumes a $t^{-5/3}$
accretion rate, which fails to fit the data after $\sim 1100$ days since
explosion. This is why a late-time steep decline has been introduced here,
as depicted by the solid blue line.}
\label{fig:lc-v}
\end{figure}
Figure \ref{fig:lc-v} shows that the fallback accretion model gives a
reasonably good fit to both the light curve and velocity evolution of
iPTF14hls. However, the third peak in the light curve cannot be fitted. Such
a steep peak require a very rapid energy release rate. This may suggest some
activity in the central compact object. At such late times ($\sim 300\unit{%
days}$) since explosion, the energy may be released by a magnetic outburst %
\citep{Gavriil02, Rea09, Rea12} if the central object is a neutron star.
Indeed, stellar evolution model predicts that a single star with initial
masses between $\sim 8$ and $25M_{\odot }$ will explode as an SN II-P,
leaving behind a neutron star remnant \citep{Heger03}.
To quantify the outburst power, we assume a power-law injection%
\begin{equation}
L_{\mathrm{rise}}=L_{\mathrm{pk}}\left( \frac{t-t_{\mathrm{start}}}{t_{%
\mathrm{rise}}}\right) ^{n}, \label{eq:L-rise}
\end{equation}%
followed by a rapid shutoff of the outburst%
\begin{equation}
L_{\mathrm{fall}}=L_{\mathrm{pk}}\left( \frac{t_{\mathrm{shutoff}}-t}{t_{%
\mathrm{fall}}}\right) .
\end{equation}%
Here $t_{\mathrm{start}}$ and $t_{\mathrm{shutoff}}$ are the times at which
the outburst begins and ends, respectively; $t_{\mathrm{rise}}$ and $t_{%
\mathrm{fall}}$ are the durations for the rise and fall of the outburst,
respectively. Obviously, $t_{\mathrm{shutoff}}-t_{\mathrm{start}}=t_{\mathrm{%
rise}}+t_{\mathrm{fall}}$. We set $n=3$ in Equation $\left( \ref{eq:L-rise}%
\right) $ during the fitting. This power exponent does not result from the
fitting constraints but was appropriately selected. To give a good fit to
the light curve, we found $2\lesssim n\lesssim 5$. The 5 fitting parameters
for the outburst are listed in Table \ref{tbl:burst-para}, with the
resulting light curve depicted in Figure \ref{fig:lc-v} as dashed lines.
Here we choose another set of values for $t_{4}$\ and $\dot{M}_{4}$\ (see
Section \ref{sec:conc} for some discussion). It can be seen that the model
including a magnetic outburst can fit the light curve very closely.
\begin{table}[tbph]
\caption{Fitting parameters for the outburst.}
\label{tbl:burst-para}
\begin{center}
\begin{tabular}{ccccc}
\hline\hline
$t_{\mathrm{start}}$ & $t_{\mathrm{rise}}$ & $t_{\mathrm{fall}}$ & $L_{%
\mathrm{pk}}$ & $n$ \\ \hline
$\left( \unit{days}\right) $ & $\left( \unit{days}\right) $ & $\left( \unit{%
days}\right) $ & $\left( \unit{erg}\unit{s}^{-1}\right) $ & \\ \hline
$318$ & $29.2$ & $0.8$ & $1.7\times 10^{43}$ & $3$ \\ \hline
\end{tabular}%
\end{center}
\end{table}
From Table \ref{tbl:burst-para} we see that the outburst lasted for $\sim 30%
\unit{days}$, and released $1.1\times 10^{49}\unit{erg}$ in total. This is
in accordance with observations, which show that some X-ray pulsars may
experience sporadic giant X-ray outbursts lasting weeks to years followed by
a long-term quiescence \citep{Gavriil02, Kaspi03, Rea12, Cusumano16}.
\begin{figure}[tbph]
\includegraphics[width=0.5\textwidth,angle=0]{radius.pdf} \centering
\caption{Fit (solid green line) to the photospheric radius of iPTF14hls
estimated (1) using blackbody fits to the broad-band $BVgi$ photometry
(blue) and (2) using the expansion velocities of Fe \textsc{ii }$\protect%
\lambda 5169$ times the elapsed rest-frame time ($vt$) since explosion
(pink). The data are taken from \protect\cite{Arcavi17}, but rescaled to
times since explosion, rather than since discovery.}
\label{fig:ph-radius}
\end{figure}
One intriguing feature of iPTF14hls is that the radius derived by the Fe
\textsc{ii }$\ \lambda 5169$\ expansion velocity times the elapsed
rest-frame time is not equal to the radius determined by blackbody fits (see
Figure \ref{fig:ph-radius}). Spectrum measurements of Fe \textsc{ii }\ $%
\lambda 5169$\ indicate that iron expands at roughly a constant velocity $%
\sim 4000\unit{km}\unit{s}^{-1}$, from which the so-called line-forming
radius can be derived (pink points in Figure \ref{fig:ph-radius}). This
radius is, however, much larger than the blackbody-determined radius (blue
circles in Figure \ref{fig:ph-radius}). As can be seen from Figure \ref%
{fig:ph-radius}, the photospheric radius predicted by this model closely
follows the line-forming radius. We discuss this feature in Section \ref%
{sec:conc}.
\section{Discussion and conclusions}
\label{sec:conc}
To date, many SNe were found to be double-peaked
\citep{Arnett89,
Richmond94, Mazzali08, Nicholl15, Nicholl16}, in which case the first
short-lived peak has been attributed to shock cooling
\citep{Piro15,
Vreeswijk17, WangSQCano17}, although sometimes the cooling peak could merge
with the second main peak \citep{Wang18}. The first peak in the light curve
of iPTF14hls is unlikely the result of shock cooling because that would
require a very massive and extended envelope.
Because of the lack of observational data between the third and fourth peaks
of the light curve, and also the missing of observational data around the
first peak, $t_{1}$ and $t_{4}$ in Table \ref{tbl:para} cannot be accurately
constrained. As a demonstration of this uncertainty, in Figure \ref{fig:lc-v}
we choose different $t_{4}$ for the solid and dashed curves. However, as can
be seen from Figure \ref{fig:lc-v}, an earlier $t_{4}$, as depicted by the
dashed curve, is preferred because of the light-curve decline rate between $%
t_{4}$ and $t_{5}$. For the dashed line (the curve including the energy
injection of a magnetic outburst), we choose $t_{4}=391\unit{days}$ and $%
\dot{M}_{4}=0.49\times 10^{-8}M_{\odot }\unit{s}^{-1}$.
The most likely progenitor of iPTF14hls is a red supergiant since
observations have demonstrated that the progenitors of several type II-P SNe
are RSGs (e.g., \citealt{Smartt09,Davies17,VanDyk17,Huang18}). This is
consistent with the fact that the total fallback mass is about $\sim
0.2M_{\odot }$, as expected for a RSG progenitor
\citep{Chevalier89,
Dexter13}. The ejecta mass, $M_{\mathrm{ej}}\simeq 21M_{\odot }$, is also
consistent with a RSG \citep{Davies18}, though at the high end of the
distribution of the SNe II-P ejecta masses.
The remnant of a RSG explosion is believed to be a neutron star. This is
consistent with the study of remnant mass distribution of massive star
explosions \citep{Zhang08}. iPTF14hls occurred on the outskirts of a
low-mass star-forming galaxy, indicating low metallicity \citep{Arcavi17}.
For population III star explosions with progenitor mass larger than $%
25M_{\odot }$\ and explosion energies less than $1.5\times 10^{51}\unit{erg}$%
, black holes are more frequent outcome. The progenitor mass of iPTF14hls, $%
22M_{\odot }$\ (the sum of $M_{\mathrm{ej}}$\ and remnant mass, which is
assumed to be $\sim 1M_{\odot }$), combined with its explosion energy $%
2.2\times 10^{51}\unit{erg}$, indicates that the remnant of iPTF14hls is
more likely a neutron star. On the other hand, despite the possible low
metallicity of iPTF14hls, \cite{Arcavi17} estimated a metallicity of $%
0.5Z_{\odot }$, which is more compatible with a population I star explosion.
In this case the production of a black hole remnant is highly suppressed %
\citep{Zhang08}.
The production of a neutron star remnant is also partially supported by the
need to fit the third peak of the light curve by a magnetic outburst.
Inspection of Table \ref{tbl:para} shows that the mass fallback rates $\dot{M%
}_{i}$ decrease monotonically, as expected. However, $\dot{M}_{1}$ is much
larger than $\dot{M}_{2}$. This is in sharp contrast to the mass fallback
rates that follow. $\dot{M}_{1}$ could be reduced if there is some
contribution from the energy injection of the neutron star. We have
neglected the contribution of $^{56}$Ni and $^{56}$Co because their
contribution is short-lived, but the contribution of a neutron star (or
magnetar) could be long-lived
\citep{Kasen10, Woosley10, Inserra13,
Nicholl14, Metzger15, WangWang15, WangYu17, Dai16, LiuWang17}.
To examine the properties (dipole magnetic field $B_{p}$, and initial spin
period $P_{0}$) of the neutron star, we set (somewhat arbitrarily) $\dot{M}%
_{1}=1.0\times 10^{-8}M_{\odot }\unit{s}^{-1}$ and found $P_{0}\simeq 8\unit{%
ms}$, $B_{p}\simeq 5\times 10^{14}\unit{G}$. With these parameters, the
first peak can be closely fitted while the remaining peaks are affected
negligibly by the contribution of the magnetar. We will not show the fitting
results by this model because the resulting light curve closely follows the
curves presented in Figure \ref{fig:lc-v}. It is found that $P_{0}$ cannot
be too large, say $P_{0}\gtrsim 10\unit{ms}$, because in that case the
magnetar would contribute too much at late times so that the late-time light
curve deviates from the $t^{-5/3}$ law. We note that the above constraints
on $P_{0}$ and $B_{p}$ should not be taken seriously because they are
degenerated with $\dot{M}_{1}$.
As we said, the third peak cannot be explained by fallback accretion, and
magnetic activity is therefore proposed as its energy source. For the
fallback rate at the third peak, $\dot{M}_{\mathrm{fb}}\simeq 0.4\times
10^{-8}M_{\odot }\unit{s}^{-1}$, the corresponding accretion rate $\dot{M}_{%
\mathrm{ac}}=\xi \dot{M}_{\mathrm{fb}}\simeq 1\times 10^{-3}M_{\odot }\unit{%
yr}^{-1}$\ indicates a trapping radius $r_{\mathrm{tr}}\simeq 7\times 10^{10}%
\unit{cm}$. For the energy in the magnetic activity to diffuse out, the
dissipation radius $r_{\mathrm{act}}$\ of the magnetic energy should be
larger than $r_{\mathrm{tr}}$, namely $r_{\mathrm{act}}>r_{\mathrm{tr}}$. To
estimate $r_{\mathrm{act}}$, let us first assume a spherical accretion. The
balance of stellar wind pressure $L/4\pi r^{2}c$\ with the ram pressure $%
\rho v^{2}=\left( \dot{M}_{\mathrm{ac}}/4\pi \right) \left( 2GM/r^{5}\right)
^{1/2}$\ of the infalling material gives a radius%
\begin{equation}
r_{b}=2GM\left( \frac{\dot{M}_{\mathrm{ac}}c}{L}\right) ^{2}\simeq 1.3\times
10^{12}\unit{cm}, \label{eq:r_b}
\end{equation}%
where the typical value of pulsar luminosity $L=1\times 10^{41}\unit{erg}%
\unit{s}^{-1}$\ at the time of the third peak, the neutron star mass $%
M=1.4M_{\odot }$, accretion rate $\dot{M}_{\mathrm{ac}}=\xi \dot{M}_{\mathrm{%
fb}}=0.01\times 10^{-8}M_{\odot }\unit{s}^{-1}$\ have been substituted. This
balance is unstable. During the accretion phase, mater falls within $r_{b}$.
During the magnetic outburst phase, the central neutron star inflates a
bubble, known as pulsar wind nebula (PWN), whose radius $r_{\mathrm{act}}$\
is much larger than $r_{b}$, which is also larger than $r_{\mathrm{tr}}$. As
a result, the magnetic energy stored within the PWN can diffuse out of the
accretion flow.
During the normal accretion phase the balance between the magnetic pressure
and the accretion ram pressure cannot be maintained and the material falls
well within $r_{b}$. The magnetic outburst is usually triggered by some
instability of the PWN. The energy released by the spinning-down pulsar does
not lose immediately as radiation. It is estimated that about half of the
energy lost by Crab ($\sim 1.8\times 10^{49}\unit{erg}$) is still resident
within the synchrotron nebula \citep{Hester08}. This energy is very close to
the energy assumed here to power the third peak of iPTF14hls. The magnetic
activity may interplay with and even quench the accretion. If the accretion
is quenched by the magnetic activity, then the third peak is purely powered
by the magnetic activity. This argument also applies to the first peak where
a magnetar spin-down was proposed to contribute most part of the SN
luminosity.
For a rapidly spinning magnetar, outside of the light cylinder, $%
R_{c}=cP/(2\pi )=3.8\times 10^{7}\left( P/8\unit{ms}\right) \unit{cm}$, the
magnetic field lines of the magnetar cannot corotate with the magnetar %
\citep{Shapiro83}, and therefore the field lines in the PWN wind tightly to
form a spindle nebula \citep[see Figure 3 of ][]{Hester08}, whose toroidal
field is amplified significantly and much stronger than its poloidal field.
The lower bound of $r_{\mathrm{act}}$\ given by Equation $\left( \ref{eq:r_b}%
\right) $\ yields an upper limit of the strength of the magnetic field
within the PWN, $B_{\mathrm{PWN}}<\left( 8\pi E_{\mathrm{burst}}/V\right)
^{1/2}=5\times 10^{6}\unit{G}$, where $V=4\pi r_{b}^{3}/3$\ is the lower
limit of the volume of the PWN, and $E_{\mathrm{burst}}\sim 1.1\times 10^{49}%
\unit{erg}$\ is the magnetic outburst energy.
The total magnetic outburst energy, $E_{\mathrm{burst}}\sim 1.1\times 10^{49}%
\unit{erg}$, should be accumulated during the first $\sim 320\unit{days}$\
before the third peak. This requires an average energy injection rate $%
4\times 10^{41}\unit{erg}\unit{s}^{-1}$\ during this period.\footnote{%
The true energy injection rate should be somewhat higher because a part of
the injected energy will leak out of the PWN.} We found that with the
magnetar parameters ($P_{0}\simeq 8\unit{ms}$, $B_{p}\simeq 5\times 10^{14}%
\unit{G}$) listed above to explain the first peak of iPTF14hls, it is just
right to give such an average energy injection rate. At day $1400$\ since
explosion, the magnetar's energy injection rate declines to $1\times 10^{40}%
\unit{erg}\unit{s}^{-1}$, which is lower than but comparable to the observed
luminosity of iPTF14hls. After $1400\unit{days}$\ since explosion, the
magnetar's energy injection rate dominates over the accretion energy
injection rate. Therefore the suggestion of magnetic outburst scenario for
the third peak can be falsified if future observation does not reveal a
flattening of the luminosity of iPTF14hls after $1400\unit{days}$.
As mentioned in Section \ref{sec:fit}, the initial explosion energy ($%
2.2\times 10^{51}\unit{erg}$) of this SN can be attributed to neutrino
heating. However, for an SN that is powered by fallback accretion, the
explosion energy may also be partially provided by accretion, especially the
recently proposed jet-feedback mechanism \citep{Gilkis16, Soker16,
Soker17}.
We mentioned in Section \ref{sec:Intro} that for spherical accretion, the
photons behind the accretion shock can diffuse out and heat the ejecta if
the condition $r_{s}>r_{\mathrm{tr}}$\ is fulfilled. Assuming a power-law
neutrino cooling function, the shock position takes the approximate form %
\citep{Houck91}%
\begin{equation}
r_{s}=1.6\times 10^{8}\left( \frac{\dot{M}_{\mathrm{ac}}}{M_{\odot }\unit{yr}%
^{-1}}\right) ^{-2/5}\unit{cm}.
\end{equation}%
With the peak accretion rate $\dot{M}_{\mathrm{ac}}\simeq 10^{-3}M_{\odot }%
\unit{yr}^{-1}$, the above equation gives $r_{s}=2.5\times 10^{9}\unit{cm}$,
which is at its face value smaller than the trapping radius $r_{\mathrm{tr}%
}\simeq 7\times 10^{10}\unit{cm}$. However, the above estimate of $r_{s}$\
should be treated as a lower limit because of the uncertainties in neutrino
cooling function and relativistic corrections \citep{Houck91}. It is
actually found that the condition $r_{s}>r_{\mathrm{tr}}$\ is satisfied when
$\dot{M}_{\mathrm{ac}}\lesssim 10^{-3}M_{\odot }\unit{yr}^{-1}$\ %
\citep{Houck91}, which is the case for the accretion episodes listed in
Table \ref{tbl:para}, except for the first accretion episode because for the
fallback rate $\dot{M}_{\mathrm{fb}}\simeq 0.7\times 10^{-8}M_{\odot }\unit{s%
}^{-1}$\ (see Table \ref{tbl:para}), the accretion rate is $\dot{M}_{\mathrm{%
ac}}=\xi \dot{M}_{\mathrm{fb}}\simeq 2\times 10^{-3}M_{\odot }\unit{yr}^{-1}$%
. For the scenario proposed in this paper to be valid, a magnetar energy
input is necessary for the first accretion episode. This also strengthens
the hypothesis of the formation of a magnetar in this SN explosion.
With the condition $r_{s}>r_{\mathrm{tr}}\simeq 7\times 10^{10}\unit{cm}$,
the shock radius $r_{s}$\ is about four orders of magnitude larger than the
Schwarzschild radius and it seems unlikely to convert 10\% of the
gravitational binding energy of the accreted material into radiation energy.
However, the exact accretion process is that the accretion flow reaches at
the neutron star surface, where shock is formed and energy is advected along
with the outmoving shock and carried far away from the neutron star surface.
The shock eventually stops at $r_{s}$\ because of efficient neutrino
cooling. During this process, a significant fraction (approximately 10\%) of
the gravitational binding energy is converted into shock energy. Note that
the fallback energy conversion factor $\epsilon \simeq 10^{-3}$\ consists of
two factors: the ratio of accretion rate to the fallback rate ($\xi =\dot{M}%
_{\mathrm{ac}}/\dot{M}_{\mathrm{fb}}\simeq 0.01$) and the conversion
efficiency (10\%) of the gravitational binding energy.
To account for the late steep decline of the light curve, we suggest that
accreted material has a power-law density profile with $\alpha =-22$\ at
late time. If we adopt the light curve decay index $-13.5$, as measured by
\cite{Sollerman18}, we found $\alpha =-18$. This density profile is very
steep and may be formed by the interaction between the bound and unbound
material. Future numerical simulations are encouraged to test this
hypothesis.
We propose that the third, brightest peak is mainly powered by a magnetic
outburst. Such outburst is usually accompanied by X-ray emission, which is
however not detected \citep{Arcavi17}. The nondetection of X-ray emission
can be understood by considering the optical depth of the ejecta in the
X-ray band%
\begin{eqnarray}
\tau _{X} &=&\frac{3\kappa _{X}M_{\mathrm{ej}}}{4\pi v_{\mathrm{sc}}^{2}t^{2}%
} \notag \\
&=&20\left( \frac{M_{\mathrm{ej}}}{21M_{\odot }}\frac{\kappa _{X}}{0.33\unit{%
cm}^{2}\unit{g}^{-1}}\right) \left( \frac{v_{\mathrm{sc}}}{4300\unit{km}%
\unit{s}^{-1}}\right) ^{-2}\left( \frac{t}{350\unit{days}}\right) ^{-2},
\label{eq:x-depth}
\end{eqnarray}%
where the values of X-ray opacity $\kappa _{X}$, SN expansion velocity $v_{%
\mathrm{sc}}$, and the time since explosion $t$ have been substituted. Here $%
v_{\mathrm{sc}}$ is slightly larger than the initial expansion velocity $v_{%
\mathrm{sc}0}$ because of the energy injection. We see that at the time the
third peak was observed, the ejecta are still opaque to X-rays. In the above
estimate, $\kappa _{X}$ is taken to be the same as $\kappa $, that is, the
electron Thomson scattering opacity. This should be a lower limit to the
true X-ray opacity because other heavier elements could make a significant
contribution to $\kappa _{X}$.
Despite the nondetection of X-ray emission, the detection of $\gamma $-ray
emission, temporally and positionally consistent with iPTF14hls, in the
energy band between 0.2 and $500\unit{GeV}$\ was report by \cite{Yuan18}.
The $\gamma $-ray source appears $\sim 300\unit{days}$\ after the first
optical detection of iPTF14hls and is still detectable up to $\sim 850\unit{%
days}$. Translated to the time since SN explosion in our model, the $\gamma $%
-ray source appears $\sim 420\unit{days}$\ to $\sim 970\unit{days}$.
According to Equation $\left( \ref{eq:x-depth}\right) $, assuming a lower
limit to $\gamma $-ray opacity $\kappa _{\gamma }=0.33\unit{cm}^{2}\unit{g}%
^{-1}$,\footnote{%
At late stage, the $\gamma $-ray photons come from $^{56}$Co decay with
typical energy $\sim 1\unit{MeV}$. At this energy, the atomic scattering
opacity \citep{Kotera13} of a type II SN is approximately equal to the
electron Thomson scattering opacity.} the SN ejecta are still opaque to $%
\gamma $-ray emission at time $t\sim 970\unit{days}$. Therefore, in our
model the $\gamma $-ray emission cannot come from the deep interior of the
SN ejecta.
This $\gamma $-ray emission may alternatively result from the interaction
between the ejecta and circumstellar medium (CSM) or produced by a blazar
because there is a blazar candidate within the error circle of the $\gamma $%
-ray source \citep{Yuan18}. Late-time observation of iPTF14hls revealed
narrow H$\alpha $\ emission \citep{Andrews18}, which may be evidence for
circumstellar interaction where unshocked circumstellar material is ionized
by the shock emission and recombines. However, such evidence for interaction
only appears at 3 years after the first optical detection of iPTF14hls. The
interaction origin of the $\gamma $-ray emission is also in tension with the
aforementioned nondetection of X-ray and radio emission. \cite{Sollerman18}
argue that the narrow H$\alpha $\ emission may come from H {\scriptsize II}
region which is located just at the SN position. Because the $\gamma $-ray
association with iPTF14hls is only tentative, we consider it more likely
that the $\gamma $-ray emission is produced by the blazar.
Observations indicate that the photospheric radius of iPTF14hls is quite
different from the line-forming region. \cite{Arcavi17} estimate the latter
at position of $vt$, where $v$ is the SN expansion velocity. Although the
photospheric radius of an SN recedes as the SN expands and inner material is
observed, the large ejecta mass, as inferred from light curve modeling,
implies that the photospheric recession should be negligible during the
first two years since its discovery. The discrepancy of these two radii
might be linked to the existence of persistent Balmer series P Cygni lines
observed in the spectra of iPTF14hls \citep{Arcavi17}. The presence of
P-Cygni profiles betrays the existence of a stellar wind, as observed in
Wolf-Rayet stars \citep{Willis82} and luminous blue variables %
\citep{Israelian99}. We suggest that this wind is far above the photosphere
and is responsible for the spectral lines.
The rarity of iPTF14hls among SNe II-P may be understood because of its
extreme ejecta mass. This large ejecta mass may also account for the reason
why so much mass falls back so as to give a multi-peaked light curve.
In summary, iPTF14hls can be explained by the episodic fallback accretion
model.\footnote{%
Alternatively, the CSM interaction is also a plausible model. %
\citet{LiuWang18} propose a multiple ejecta-CSM interaction model and
employed it to model multi-peak SNe iPTF15esb and iPTF13dcc.} The fitting
parameters suggest a RSG as the progenitor. Although the central object
cannot be identified, the rapid third peak and other considerations might
indicate the formation of a neutron star that experienced a magnetic
outburst lasting for $\sim 30\unit{days}$ with a total burst energy $%
1.1\times 10^{49}\unit{erg}$.
\begin{acknowledgements}
We thank Iair Arcavi and Jesper Sollerman for providing us the observational data.
We also thank the anonymous referee for helpful comments.
This work is supported by the National Program on Key Research and Development
Project of China (Grant Nos. 2016YFA0400801 and 2017YFA0402600), National
Basic Research Program of China (\textquotedblleft 973" Program, Grant
No. 2014CB845800) and the National Natural Science Foundation of China (Grant
Nos. 11573014, 11533033, 11673006). X. Wang is supported by the National Natural Science
Foundation of China (NSFC grants 11325313 and 11633002), and the National
Program on Key Research and Development Project (grant no. 2016YFA0400803).
S.Q.W. and L.D.L. are also supported by China Scholarship Program to conduct research
at U.C. Berkeley and UNLV, respectively.
\end{acknowledgements}
|
1,314,259,994,985 | arxiv | \section{Introduction}
\label{sec:Introduction}
Let $M$ be a compact Riemannian manifold. The \textbf{length spectrum} $L(M)$ of $M$ is the set of all lengths of closed geodesics on $M$ counted with multiplicities. Two manifolds $M_1$ and $M_2$ are said to be \textbf{iso-length spectral} if $L(M_1)=L(M_2)$.
In \cite{Sunada}, Sunada provided a method to construct iso-length spectral manifolds that are frequently not isometric (see also \cite[Ch.11-13]{BuserBook}). This requires a notion from group theory.
Let $G$ be a finite group. Two subgroups $H$ and $K$ of $G$ are said to be \textbf{almost conjugate} if, for any $g\in G$,
\[
\left|H\cap(g)\right|=\left|K\cap(g)\right|,
\]
where $(g)$ denotes the conjugacy class of $g$ in $G$.
\begin{mythm}[\textbf{Sunada}]
\label{T:Sunada construction}
Let $M_0$ be a closed Riemannian manifold, $G$ a finite group, and $H$ and $K$ almost conjugate subgroups of $G$. If there is a surjective homomorphism from $\pi_1(M_0)$ onto $G$, then the finite covering spaces $M_H$ and $M_K$ of $M_0$ corresponding to the subgroups $H$ and $K$, respectively, are iso-length spectral.
\end{mythm}
When $H$ and $K$ are not conjugate in $G$, the manifolds $M_H$ and $M_K$ can often be shown to be nonisometric. For example, when $M_0$ is a surface, a generic hyperbolic metric on $M_0$ will produce nonisometric $M_H$ and $M_K$; see \cite[Ch.12.7]{BuserBook}.
For surfaces, the simple closed geodesics often carry more topological information. Accordingly, the \textbf{simple length spectrum} $L^s(M)$ of $M$ is defined to be the set of all lengths of simple closed geodesics on $M$ counted with multiplicities; see \cite{McShane}. Two manifolds $M_1$ and $M_2$ are said to be \textbf{simple iso-length spectral} if $L^s(M_1)=L^s(M_2)$.
\begin{question}
\label{Q:Question1}
Are there nonisometric simple iso-length spectral hyperbolic surfaces?
\end{question}
In \cite{McShane}, McShane and Parlier give example of pairs of 4-holed spheres with geodesic boundary which have the same \textit{interior simple lengh spectrum} (one ignores the boundary lengths). They do in fact have different boundary lengths, and so they have different simple length spectrum.
One can ask if Sunada's construction provides a positive resolution to Question~\ref{Q:Question1}.
\begin{question}
\label{Q:question2}
Does Sunada's construction, for a given homomorphism \\$\rho:\pi_1(M_0) \to G$, generically give simple iso-length spectral surfaces?
\end{question}
To answer Question~\ref{Q:question2}, we choose one of the examples of almost conjugate subgroups Sunada provided in his paper \cite{Sunada}.
\begin{ex}
$G=(\mathbb Z/8\mathbb Z)^{\times} \ltimes \mathbb Z/8\mathbb Z$ with usual action of $(\mathbb Z/8\mathbb Z)^{\times}$ on $\mathbb Z/8\mathbb Z$.
$H=\left\{(1,0),(3,0),(5,0),(7,0)\right\}$ and
$K=\left\{(1,0),(3,4),(5,4),(7,0)\right\}$ are almost conjugate but not conjugate.
\end{ex}
Our main theorem is the following.
\begin{thm}
\label{T:main theorem}
Let $M_0$ be a closed oriented surface of genus 2, $G$, $H$, and $K$ the groups provided in the example above.
There is a surjective homomorphism $\rho:\pi_{1}(M_0) \to G$ such that, for almost every $[m]\in \mathcal{T}(M_0)$, the corresponding iso-length spectral surfaces $M_H$ and $M_K$ are not simple iso-length spectral.
\end{thm}
In fact, we prove a little bit more. We define the \textbf{length set} and the \textbf{simple length set} of a manifold $M$ to be the set of all lengths of closed geodesics on $M$ without multiplicities and the set of all lengths of simple closed geodesics on $M$ without multiplicities, respectively. Then from the proof of Theorem~\ref{T:main theorem} we have the following corollary.
\begin{cor}
\label{C:cor}
The surfaces $M_H$ and $M_K$ in Theorem~\ref{T:main theorem} have the same length set but they do not have the same simple length set.
\end{cor}
This corollary shows that the construction of length equivalent manifolds in~\cite{Leininger2} does not necessarily give simple length equivalent manifolds.
\bigskip
{\bf Outline of the paper.} Section~\ref{sec:background} contains the relevant background.
In Section~\ref{sec:proof of the main theorem}, we give the proof of the main theorem. The sketch of the proof is as follow. We begin by defining a surjective homomorphism $\rho:\pi_{1}(M_0) \to G$ and a closed curve $\alpha$ in $M_0$. By Sunada's construction, the covering spaces $\pi_H:M_H \to M_0$ and $\pi_K:M_K \to M_0$ corresponding to the subgroups $H$ and $K$ are iso-length spectral. We then show that, for almost every $[m]\in\mathcal{T}(M_0)$, the induced metrics on $M_H$ and $M_K$ have the following property. In each of these two covering spaces $M_H$ and $M_K$, there are exactly four closed geodesics having the same length as $\alpha$, namely the two degree-one components of $\pi_H^{-1}(\alpha)$ (and $\pi_K^{-1}(\alpha)$) and their images under the lifts of the hyperelliptic involution $\tau:M_0 \to M_0$. We also show that these four closed geodesics on $M_H$ are nonsimple while the other four closed geodesics on $M_K$ are simple. Therefore $M_H$ and $M_K$ are not simple iso-length spectral.
We remark on one subtlety of the proof. According to \cite{Randol}, there are curves $\gamma, \gamma'$ on $M_0$ such that for every hyperbolic metric $m$ on $M_0$, $\length_m(\gamma)=\length_m(\gamma')$. Although these are nonsimple on $M_0$, they become simple in a finite sheeted cover, so must be accounted for in our proof.
\section{Background}
\label{sec:background}
Let $M$ be a closed oriented surface of genus $g \geq 2$. We denote the Teichm\"uller space of $M$ by
\[
\mathcal{T}(M)=\left\{[m]\mid m\:\:\mbox{is a hyperbolic metric on}\;M\right\},
\]
where $[m]$ represents the equivalence class via the equivalence relation $m \sim m'$ if there exists an isometry $f:(M,m) \to (M,m')$ such that $f \simeq id_M $, see e.g. \cite{BuserBook}.
Given $[m]\in\mathcal{T}(M)$, the holonomy homomorphism
\[
\rho_m:\pi_1(M) \to \PSL(\mathbb R)
\]
is well defined up to conjugation in $\PSL(\mathbb R)$. This determines an embedding
\begin{equation}
\label{eq:embedding}
\mathcal{T}(M) \to \Hom(\pi_1(M),\PSL(\mathbb R))/\mbox{conjugation}
\end{equation}
by $[m]\mapsto[\rho_m]$.
Let $\gamma$ be an essential closed curve on $M$. The length function of $\gamma$
\[
\length_{(\cdot)}(\gamma): \mathcal{T}(M) \to \mathbb R_+
\]
is defined as the length of the $m$-geodesic homotopic to $\gamma$. Using the holonomy homomorphism, one can compute
\begin{equation}
\label{eq:length_function}
\length_{[m]}(\gamma)=2\cosh^{-1}
\left(
\frac
{\left|
\tr(
\rho_m(\gamma)
)
\right|}
{2}
\right).
\end{equation}
The embedding~(\ref{eq:embedding}) makes $\mathcal{T}(M)$ into a real analytic manifold. By~(\ref{eq:length_function}), the length functions are analytic (see e.g. \cite{kerckhoff} or \cite{Abikoff}). Since $\mathcal{T}(M)$ is connected, we then have the following theorem; see \cite{McShane}.
\begin{thm}
\label{T:length_function}
Let $c\in\mathbb R$, $\alpha$ and $\beta$ be closed curves on $M$. The function
\[
f=c\cdot\length_{(\cdot)}(\beta)-\length_{(\cdot)}(\alpha):\mathcal{T}(M) \to \mathbb R
\]
is real analytic, in particular, $f\neq0$ almost everywhere or $f=0$ everywhere.
\end{thm}
Let $\gamma$ and $\gamma'$ be closed curves on $M$. The geometric intersection number of $\gamma$ and $\gamma'$ is defined by
\[
i(\gamma,\gamma')=\min_{\overline{\gamma},\overline{\gamma}\:'}|\left(\overline{\gamma}\times\overline{\gamma}\:'\right)^{-1}\left(\Delta\right)|,
\]
where $\overline{\gamma}$ and $\overline{\gamma}\:'$ are in the homotopy classes $[\gamma]$ and $[\gamma']$, respectively, $\overline{\gamma}\times\overline{\gamma}\:':S^1 \times S^1 \to M \times M$, and $\Delta \subset M \times M$ is diagonal.
The next theorem provides a tool for dealing with the phenomenon arising from \cite{Randol}.
\begin{thm}
\label{T:geometric_intersection}
Let $\gamma$, $\gamma'$ be closed curves on $M$ and $k\in\mathbb R$.
If \;$\length_m(\gamma)=k\cdot\length_m(\gamma')$, for all $[m]\in \mathcal{T}(M)$, then $i(\gamma,\alpha)=k\cdot i(\gamma',\alpha)$, for all simple closed curves $\alpha$ on $M$.
\end{thm}
\begin{proof}
For $k = 1$, a proof can be found in \cite{Leininger}, for example. The same idea works here, and we sketch it.
Given a simple closed curve $\alpha$, there exists a sequence $\left\{\left[m_n\right]\right\}\subset \mathcal{T}(M)$ such that
\[
\frac{1}{n}\cdot\length_{\left[m_n\right]}(\eta) \to i(\eta,\alpha),
\]
for all closed curves $\eta$ on $M$.
Now suppose $\length_{[m]}(\gamma)=k\cdot\length_{[m]}(\gamma')$ for all $[m] \in \mathcal{T}(M)$. Then
\[
\frac{1}{n}\cdot\length_{\left[m_n\right]}(\gamma) \to i(\gamma,\alpha)
\]
and
\[
\frac{k}{n}\cdot\length_{\left[m_n\right]}(\gamma') \to k\cdot i(\gamma',\alpha).
\]
So $k\cdot i(\gamma',\alpha)=i(\gamma,\alpha)$.
\end{proof}
The following theorem is shown in \cite{Leininger}.
\begin{thm}
\label{T:homology}
Given $\gamma$ and $\gamma'$ closed curves on $M$, if
\[
\length_{[m]}(\gamma)=\length_{[m]}(\gamma'),
\]
for all $[m]\in \mathcal{T}(M)$, then $[\gamma]=\pm[\gamma']$ in $H_1(M)$.
\end{thm}
\section{Proof of the main theorem}
\label{sec:proof of the main theorem}
Let $M_0$ be a closed oriented surface of genus 2. We write the fundamental group of $M_0$ as $\pi_{1}(M_0)=\langle a,b,c,d|[a,b][c,d]=1 \rangle$, see Figure~\ref{F:2g with generators}.
\begin{figure}[ht]
\begin{center}
\includegraphics[height=2cm]{2g_generators.pdf}
\caption{$M_0$ with the generators of $\pi_{1}(M_0)$.}
\label{F:2g with generators}
\end{center}
\end{figure}
Let $G$, $H$ and $K$ be groups given in the example in Section~\ref{sec:Introduction}. We define a surjective homomorphism $\rho:\pi_{1}(M_0) \to G$ by
\[
\rho(a)=(3,0), \quad
\rho(b)=(5,0), \quad
\rho(c)=(1,0), \quad\mbox{and}\quad
\rho(d)=(1,1).
\]
Let $\pi:M \to M_0$, $\pi_H:M_H \to M_0$ and $\pi_K:M_K \to M_0$ be the covering spaces of $M_0$ corresponding to $\ker(\rho)$, $\rho^{-1}(H)$ and $\rho^{-1}(K)$, respectively.
To help visualizing the covering space M, first we construct the covering space $\pi:M_N \to M_0$ corresponding to the subgroup $N=\mathbb Z/8\mathbb Z$ of $G$, as shown in Figure~\ref{F:the covering space $M_N$}. Then we construct $M$ from the surjective homomorphism $\sigma:\pi_{1}(M_N) \to N$, the restriction of $\rho$ to $\pi_{1}(M_N) < \pi_{1}(M_0)$, see Figure~\ref{F:the covering space $M$}. Observe that the generator of $\mathbb Z/8\mathbb Z \cong N < G$ translates each piece in Figure~\ref{F:the covering space $M$} to the right, and sends the last piece to the first piece.
\begin{figure}[ht]
\begin{center}
\includegraphics[height=4cm]{covering_space_M_N.pdf}
\caption{The covering space $M_N$.}
\label{F:the covering space $M_N$}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\includegraphics[height=6cm]{covering_space_M.pdf}
\caption{The covering space $M$.}
\label{F:the covering space $M$}
\end{center}
\end{figure}
\begin{lem}
\label{L:beta}
Let $\alpha = abd[d,c^{-1}]d^{-1}$ be a closed curve on $M_0$. Then $\pi_{H}^{-1}(\alpha)=\beta_{1}^{H}\cup\dots\cup\beta_{5}^{H}$, $\pi_{K}^{-1}(\alpha)=\beta_{1}^{K}\cup\dots\cup\beta_{5}^{K}$ where $\pi_{H}|_{\beta_{i}^{H}}$, $\pi_{K}|_{\beta_{i}^{K}}$ are degree one, for $i=1,2$, and degree two, for $i=3, 4, 5$. Furthurmore $\beta_{1}^{H}$, $\beta_{2}^{H}$ are nonsimple and $\beta_{1}^{K}$, $\beta_{2}^{K}$ are simple.
\end{lem}
\begin{figure}[ht]
\begin{center}
\includegraphics[height=2.5cm]{2g_curve.pdf}
\caption{The closed curve $\alpha$ on $M_0$.}
\label{F:Curve alpha}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\includegraphics[height=6cm]{covering_space_M_curve.pdf}
\caption{The covering space $M$ and a component $\gamma_{1}$ of $\pi^{-1}(\alpha)$.}
\label{F:the covering space $M$ and alpha}
\end{center}
\end{figure}
\begin{proof}
First we look at a component $\gamma_1$ of $\pi^{-1}\left(\alpha\right)$ in $M$, see Figure~\ref{F:the covering space $M$ and alpha}. Observe that the preimage of $\alpha$ is sixteen simple closed curves on M denotes $X=\left\{\gamma_{1},\dots,\gamma_{16}\right\}$. $G$ acts on $X$ and this action is equivalent to the action of $G$ on the cosets of $L=\Stab_{G}(\gamma_{1})=\left\{(1,0), (7,0)\right\}$. More precisely, the bijection
\[G/\!/L \to X\]
given by
\[gL \mapsto g\cdot\gamma_{1}\]
is equivariant with respect to the actions of $G$.
We assume $\left\{\gamma_{1},\dots,\gamma_{16}\right\}$ are numbered so that
\[
\begin{array}{rrrr}
\gamma_{1} \rightarrow L, & \gamma_{2}\rightarrow (1,1)L, & \gamma_{3}\rightarrow(1,2)L, & \gamma_{4}\rightarrow(1,3)L,\\
\gamma_{5} \rightarrow (1,4)L, & \gamma_{6}\rightarrow (1,5)L, & \gamma_{7}\rightarrow(1,6)L, & \gamma_{8}\rightarrow(1,7)L,\\
\gamma_{9} \rightarrow (3,0)L, & \gamma_{10}\rightarrow (3,3)L, & \gamma_{11}\rightarrow(3,6)L, & \gamma_{12}\rightarrow(3,1)L,\\
\gamma_{13}\rightarrow (3,4)L, & \gamma_{14}\rightarrow (3,7)L, & \gamma_{15}\rightarrow(3,2)L, & \gamma_{16}\rightarrow(3,5)L.
\end{array}
\]
We use the above representations to compute $H$ and $K$ orbits under the actions of $H$ and $K$ on $X$. Then the $H$ orbits partition $\left\{\gamma_{1},\dots,\gamma_{16}\right\}$ as
\[
\left\{\gamma_{1},\gamma_{9}\right\},
\left\{\gamma_{5},\gamma_{13}\right\},
\left\{\gamma_{2},\gamma_{8},\gamma_{10},\gamma_{16}\right\},
\left\{\gamma_{3},\gamma_{7},\gamma_{11},\gamma_{15}\right\},
\left\{\gamma_{4},\gamma_{6},\gamma_{12},\gamma_{14}\right\}
\]
and the $K$ orbits partition $\left\{\gamma_{1},\dots,\gamma_{16}\right\}$ as
\[
\left\{\gamma_{1},\gamma_{13}\right\},
\left\{\gamma_{5},\gamma_{9}\right\},
\left\{\gamma_{2},\gamma_{8},\gamma_{10},\gamma_{14}\right\},
\left\{\gamma_{3},\gamma_{7},\gamma_{11},\gamma_{15}\right\},
\left\{\gamma_{4},\gamma_{6},\gamma_{10},\gamma_{16}\right\}.
\]
All closed curves in each $H$ orbit lie above exactly one closed curve on $M_{H}$ and all closed curves in each $K$ orbit lie above exactly one closed curve on $M_{K}$. So we can write $\pi_{H}^{-1}(\alpha)=\beta_{1}^{H}\cup\dots\cup\beta_{5}^{H}$ and $\pi_{K}^{-1}(\alpha)=\beta_{1}^{K}\cup\dots\cup\beta_{5}^{K}$. We may associate $\beta_{1}^{H}$, $\beta_{2}^{H}$, $\beta_{1}^{K}$ and $\beta_{2}^{K}$ with the orbits $\left\{\gamma_{1}, \gamma_{9}\right\}$, $\left\{\gamma_{5}, \gamma_{13}\right\}$, $\left\{\gamma_{1}, \gamma_{13}\right\}$ and $\left\{\gamma_{5}, \gamma_{9}\right\}$, respectively.
Next we observe that $\pi_{H}|_{\beta_{i}^{H}}$, $\pi_{K}|_{\beta_{i}^{K}}$ are degree one, for $i=1,2$, and degree two, for $i=3, 4, 5$.
For the simplicity of $\beta_{1}^{H}$, $\beta_{2}^{H}$, $\beta_{1}^{K}$ and $\beta_{2}^{K}$, we look at their associated orbits. We observe that $\gamma_{1}$ intersects $\gamma_{9}=(3,0)\cdot\gamma_{1}$ nontrivially by inspecting Figure~\ref{F:the covering space $M$} for the actions of $G$ and Figure~\ref{F:the covering space $M$ and alpha} for the picture of $\gamma_{1}$. Similarly we can compute
\[
\begin{array}{rr}
\gamma_{1}\cap \gamma_{9\ } \neq \emptyset, &\gamma_{5}\cap \gamma_{13} \neq \emptyset, \\
\gamma_{1}\cap \gamma_{13} = \emptyset, &\gamma_{5}\cap \gamma_{9\ } = \emptyset.
\end{array}
\]
Since the $H$ orbit $\left\{\gamma_{1}, \gamma_{9}\right\}$ corresponding to $\beta_{1}^{H}$ contains intersecting curves, $\beta_{1}^{H}$ is nonsimple. Similarly, $\beta_{2}^{H}$ is also nonsimple. Since the $K$ orbit $\left\{\gamma_{1}, \gamma_{13}\right\}$ corresponding to $\beta_{1}^{K}$ contains pairwise disjoint curves, $\beta_{1}^{K}$ is simple. Similarly, $\beta_{2}^{H}$ is also simple.
\end{proof}
To prove Theorem~\ref{T:main theorem}, we will show that generically a hyperbolic metric on $M_0$ lifted to a hyperbolic metric on $M_H$ has the property that there are exactly four closed curves on $M_H$ having the same length as $\beta_{1}^{H}$(and $\beta_{2}^{H}$) and these four closed curves are nonsimple. In the previous Lemma, we found two such closed curves, namely $\beta_{1}^{H}$ and $\beta_{2}^{H}$. Lemma~\ref{L:tau} provides the other two closed curves and we will use Lemma~\ref{L:trace check} to show that there are exactly four such closed curves. Since $M_K$ has a simple closed curve, $\beta_{1}^{K}$, of the same length in its lifted metric, $M_H$ and $M_K$ cannot be simple iso-length spectral.
Let $\tau: M_0 \to M_0$ be the hyperelliptic involution. $\tau$ is isotopic to an isometry for any hyperbolic metric on $M_{0}$. So for any curve $\lambda$ on $M_0$, $\length_{M_0}\left(\lambda\right)=\length_{M_0}\left(\tau\left(\lambda\right)\right)$. For a specific basepoint, the induced map $\tau_\ast:\pi_{1}(M_0) \to \pi_{1}(M_0)$ can be computed to be
\[
\begin{array}{ll}
\tau_\ast\left(a\right)= a^{-1}, &\tau_\ast\left(b\right)= b^{-1},\\
\tau_\ast\left(c\right)= ac^{-1}dc^{-1}d^{-1}ca^{-1}, &\tau_\ast\left(d\right)= b^{-1}ad^{-1}ba^{-1}.
\end{array}
\]
We have the following lemma.
\begin{lem}
\label{L:tau}
The hyperelliptic involution $\tau: M_0 \to M_0$ lifts to $\tau_H: M_H \to M_H$ and $\tau_K: M_K \to M_K$. In particular, $\tau_H\left(\beta_{i}^H\right)\subset M_H$ is nonsimple and $\tau_K\left(\beta_{i}^K\right)\subset M_K$ is simple, for $i=1,2$.
\end{lem}
\begin{proof}
Let $\psi: G \to G$ be the automorphism of $G$ defined by $\psi(j,k)=(j,-k)$, for any element $(j,k) \in G$. Then we can compute $\psi \circ \rho = \rho \circ \tau_{\ast}$ and $H = \psi^{-1}(H)$. So $\rho^{-1}(H)=\rho^{-1}(\psi^{-1}(H))= \tau_{\ast}^{-1}(\rho^{-1}(H))$. Thus
\[
\tau_{\ast}\left(\left(\pi_{H}\right)_{\ast}\left(\pi_{1}\left(M_{H}\right)\right)\right)
=\tau_{\ast}\left(\rho^{-1}\left(H\right)\right)
=\rho^{-1}\left(H\right)
=\left(\pi_{H}\right)_{\ast}\left(\pi_{1}\left(M_{H}\right)\right).
\]
Hence the lifting criterion implies that we may lift $\tau$ to $\tau_H$. The existance of a lift $\tau_K$ to $M_K$ is proven in the same way.
\end{proof}
\begin{lem}
\label{L:trace check}
For almost every $[m] \in \mathcal{T}(M_0)$, if $\gamma$ is a closed curve, $k \in \mathbb Q$ and
\begin{center}
$k \cdot \length_{[m]}\left(\gamma\right)=\length_{[m]}\left(\alpha\right)$
\end{center}
then $k=1$ and $\gamma=\alpha$ or $\tau\left(\alpha\right)$.
\end{lem}
\begin{proof}
For any $\gamma$ and any $k$, either $k\cdot\length_{[m]}(\gamma)=\length_{[m]}(\alpha)$ is true for every $[m]$ or $k\cdot\length_{[m]}(\gamma)\neq \length_{[m]}(\alpha)$ for almost every $[m]$, by Theorem~\ref{T:length_function}. So it suffices to show that if $k\cdot\length_{[m]}(\gamma)=\length_{[m]}(\alpha)$, for every $[m]$, then $k=1$ and $\gamma=\alpha$ or $\tau\left(\alpha\right)$.
\begin{figure}[ht]
\begin{center}
\includegraphics[height=2.5cm]{2g_x.pdf}
\caption{The simple closed curves $x_1$ and $x_2$ on the surface $M_0$.}
\label{F:x}
\end{center}
\end{figure}
Let $y_1$ be a simple closed curve as shown in Figure~\ref{F:x}. The geometric intersection number of $\alpha$ and $y_1$ is $i(\alpha,y_1)=1$. Since $k\cdot\length_{[m]}(\gamma)=\length_{[m]}(\alpha)$, by Theorem~\ref{T:geometric_intersection}, $k \cdot i(\gamma,y_1)=i(\alpha,y_1)=1$. Since the geometric intersection numbers are nonnegative intergers, $k=1$.
To prove that $\gamma=\alpha$ or $\tau(\alpha)$, we find some neccessary conditions for $\gamma$ to have the same length as $\alpha$, for every $[m]\in\mathcal{T}(M_0)$
Let $y_2$ be the simple closed curve shown in Figure~\ref{F:x}. Since $i(\gamma,y_2)=i(\alpha,y_2)=0$ by Theorem~\ref{T:geometric_intersection}, $\gamma$ and $\alpha$ are contained in $M_0-y_2$.
We cut $M_0$ along the simple closed curve $y_2$ to get a torus with two holes and change the basis $\left\{a,b,d\right\}$ to the basis $\left\{a,b,x=da^{-1}\right\}$, see Figure~\ref{F:2holes}. Then $\alpha=abxaba^{-1}b^{-1}x^{-1}$ and $\tau_\ast(\alpha)=a^{-1}b^{-1}b^{-1}x^{-1}ba^{-1}b^{-1}axb$.
\begin{figure}[ht]
\begin{center}
\includegraphics[height=3cm]{2holes.pdf}
\caption{The torus with two holes, $M_0-x_2$.}
\label{F:2holes}
\end{center}
\end{figure}
Consider the spine as shown in Figure~\ref{F:spine}, we homotope $\alpha$ and $\gamma$ into spine, as edge loops without backtracking. Then by considering metrics on $M_0$ where length of some of the edges are bounded and others tend to infinity, we see that in order for $\gamma$ to have the same length as $\alpha$ in $M_0$,
\begin{center}
$\sharp \left\{a_1 \:\mbox{edges of}\:\gamma\right\} = \sharp \left\{a_1 \:\mbox{edges of}\: \alpha \right\} = 3$,\\
$\sharp \left\{x_1 \:\mbox{edges of}\:\gamma\right\} = \sharp \left\{x_1 \:\mbox{edges of}\: \alpha \right\} = 3$,\\
$\sharp \left\{b_1 \:\mbox{edges of}\: \gamma\right\} + \sharp \left\{b_2 \:\mbox{edges of}\: \gamma\right\} = \sharp \left\{b_1 \:\mbox{edges of}\: \alpha\right\} + \sharp \left\{b_2 \:\mbox{edges of}\:\alpha\right\} =8$.
\end{center}
\begin{figure}[ht]
\begin{center}
\includegraphics[height=3cm]{spine.pdf}
\caption{The torus with two holes, $M_0-x_2$ with spine.}
\label{F:spine}
\end{center}
\end{figure}
Since $\length_{[m]}(\gamma)=\length_{[m]}(\alpha)$ and $\left[\alpha\right]=\left[ab\right]\in H_1(M_0)$, $\left[\gamma\right]=\pm\left[ab\right]\in H_1(M_0)$, by Theorem~\ref{T:homology}. Thus from the observation of the edge counts above (replacing $\gamma$ with $\gamma^{-1}$ if necessary), we have the following conditions;
\begin{enumerate}
\item $\gamma$ consists of exactly two $a$'s, one $a^{-1}$, one $x$, and one $x^{-1}$,
\item $\sharp \left\{b^{-1}\mbox{'s in}\:\gamma\right\} = \sharp \left\{b \mbox{'s in}\:\gamma\right\} -1$, and
\item $\sharp \left\{b_1 \:\mbox{edges of}\:\gamma\right\} + \sharp \left\{b_2 \:\mbox{edges of}\: \gamma\right\} = 8$.
\end{enumerate}
Next we find all closed curves on $M_0$ satisfying these three conditions. By the conditions above we know the exact number of $a$'s, $a^{-1}$'s, $x$'s, and $x^{-1}$ that appear in $\gamma$. So we only need to determine the possible number of $b$'s and $b^{-1}$. To do this, we note that while the number $a_1$-edge and the number of $x_1$-edge can be computed directly by counting the number of $\left\{a, a^{-1}\right\}$ and $\left\{x, x^{-1}\right\}$, respectively, some combinations of $x$'s and $b$'s provide cancellations in the sum of $b_1$ and $b_2$-edge count. One example is that $x$ alone contributes $2$ to the sum of $b_1$ and $b_2$-edge count, $b$ alone also contributes $2$ to the sum of $b_1$ and $b_2$-edge count but $xb$ contributes only $2$ to the sum of $b_1$ and $b_2$-edge count.
Taking this type of cancellation into consideration, we can produce a list $A$ of $4320$ words in $\left\{a^{\pm1},b^{\pm1},x^{\pm1}\right\}$ that contains all curves satisfying the three conditions.
One can explicitly construct $[m] \in \mathcal{T}(M_0)$, a hyperbolic metric on $M_0$ such that
\[ \rho_m(a)= \left( \begin{array}{cc}
5/3 & 3/4 \\
3/4 & 5/4
\end{array} \right),\]
\[ \rho_m(b)= \left( \begin{array}{cc}
4 & 0 \\
0 & 1/4
\end{array} \right),\]
\[ \rho_m(x)= \left( \begin{array}{cc}
5/3 & -16/3 \\
-1/3 & 5/3
\end{array} \right).\]
Then the trace of $\rho_m(\alpha)$ is
\[
\tr(\rho_m(\alpha))= 109505/2048.
\]
By using Mathematica, we have that the elements in $A$ having the same trace squared as $\alpha$ are $\alpha$ and $\tau(\alpha)^{-1}$.
So, by equation~(\ref{eq:length_function}), the only curves in $A$ that have the same length in $M_0$ as $\alpha$ are $\alpha$ and $\tau(\alpha)$.
Thus if $\length_{[m]}\left(\gamma\right)=\length_{[m]}\left(\alpha\right)$, for every $[m] \in \mathcal{T}(M_0)$, then $\gamma=\alpha$ or $\tau\left(\alpha\right)$.
\end{proof}
\begin{proof}[Proof of Theorem ~\ref{T:main theorem}]
Let $\rho:\pi_{1}(M_0) \to G$ be the surjective homomorphism defined in this section.
Let $\alpha = abd[d,c^{-1}]d^{-1}$ be a closed geodesic on $M_0$.
By Lemma~\ref{L:beta} and Lemma~\ref{L:tau}, for almost every $[m] \in \mathcal{T}(M_0)$, there are four nonsimple closed geodesics
$
\left\{
\beta_{1}^{H},
\beta_{2}^{H},
\tau_H\left(\beta_{1}^H\right),
\tau_H\left(\beta_{2}^H\right)
\right\}
$
on $M_H$ having length $l=\length_{[m]}(\beta_{1}^{H})=\length_{[m]}(\alpha)$ and there are four simple closed geodesics
$
\left\{
\beta_{1}^{K},
\beta_{2}^{K},
\tau_K\left(\beta_{1}^K\right),
\tau_K\left(\beta_{2}^K\right)
\right\}
$
on $M_K$ having length $l$.
If $\gamma^H$ is a closed geodesic on $M_H$ having length
\[
l=\length_{[m]}(\beta_{1}^{H})=\length_{[m]}(\alpha),
\]
then $\pi_H(\gamma^H)$ is a closed geodesic on $M_0$ having length
\[
k\cdot l=k\cdot\length_{[m]}(\beta_{1}^{H})=k\cdot\length_{[m]}(\gamma),
\]
for some $k = 1, 1/2, 1/4, \mbox{or}\;1/8$, since the degree of $\pi_H$ and $\pi_K$ is $8$.
By Lemma~\ref{L:trace check}, $k=1$ and $\pi_H(\gamma^H)=\alpha$ or $\tau(\alpha)$. Thus $\gamma^H$ is one of the four nonsimple closed curves above. Hence there are exactly four closed curves on $M_H$ having length $l$ and those four closed curves are nonsimple. Similarly, there are exactly four closed curves on $M_K$ having length $l$ and those four closed curves are simple.
Therefore $M_H$ and $M_K$ are not simple iso-length spectral.
\end{proof}
\begin{proof}[Proof of Corollary~\ref{C:cor}]
As the proof of Theorem~\ref{T:main theorem} shows, for almost every $[m] \in \mathcal{T}(M_0)$, there is a simple closed geodesic on $M_K$ with the same length as $\alpha$ on $M_0$, but no such simple geodesic on $M_H$. Therefore, $M_H$ and $M_K$ are not simple length equivalent.
\end{proof
\section{Final discussion}
\label{sec:Final discussion}
Theorem~\ref{T:main theorem} should hold for any surjective homomorphism $\rho: \pi_1(M_0) \to G$ and for any closed surface $M_0$. Indeed, it can be shown that for $G$ as in Theorem~\ref{T:main theorem} and any $\rho$, there is a genus $2$ or $3$ subsurface $\Sigma \subset M_0$ so that the restriction $\rho|_{\pi_1(\Sigma)}$ is surjective. Then, one can list all such surjective homomorphisms and try to construct a curve $\alpha$ in $\Sigma$ playing the role of $\alpha$ in the proof of Theorem~\ref{T:main theorem}. This does not seem to provide much new information, and even for the cases analyzed by the author, the resulting presentation is significantly more complicated.
It would be interesting to find an approach that works for all homomorphisms simultaneously.
Another class of examples that would be interesting to analyze with respect to Question~\ref{Q:question2} are those given in \cite{Brooks} and \cite{Buser}, as the proof that the surfaces are iso-length spectral is more directly geometric.
\section*{Acknowledgements}
I would like to thank my advisor Christopher J. Leininger for guidance and useful conversations.
\bibliographystyle{plain}
|
1,314,259,994,986 | arxiv | \section{Introduction}
Statistical hypothesis testing appears in areas as diverse as information theory, image processing, signal processing, social sciences or biology.
Depending on the field, this problem can be referred to as classification, discrimination, signal detection or model selection.
The goal of $M$-ary hypothesis testing is to decide among $M$ possible hypotheses based on the observation of a certain random variable.
In a Bayesian formulation, a prior distribution over the hypotheses is assumed, and the problem is translated into a minimization of the average error probability or its generalization, the Bayes risk.
When the number of hypotheses is $M = 2$, the problem is referred to as binary hypothesis testing. While a Bayesian approach in this case is still possible, the binary setting allows a simple formulation in terms of the two types of pairwise errors with no prior distribution over the hypotheses. The work of Neyman and Pearson~\cite{NeyPea33} established the optimum binary test in this setting. Thanks to its simplicity and robustness, this has been the most popular approach in the literature.
In the context of reliable communication, binary hypothesis testing has been instrumental in the derivation of converse bounds to the error probability. In \cite[Sec. III]{Shan67} Shannon, Gallager and Berlekamp derived lower bounds to the error probability in the transmission of $M$~messages, including the sphere-packing bound, by analyzing an instance of binary hypothesis testing~\cite{Shan67,Shan67II}. In~\cite{Forney1968}, Forney used a binary hypothesis test to determine the optimum decision regions in decoding with erasures. In~\cite{Blahut74}, Blahut emphasized the fundamental role of binary hypothesis testing in information theory and provided an alternative derivation of the sphere-packing exponent. Inspired by this result, Omura presented in~\cite{Omura1975} a general method for lower-bounding the error probability of channel coding and source coding.
More recently, Polyanskiy, Poor and Verd{\'u}~\cite{Pol09} applied the Neyman-Pearson lemma to a particular binary hypothesis test to derive the meta-converse bound, a fundamental finite-length lower bound to the channel-coding error probability from which several converse bounds can be recovered. The meta-converse bound was extended to joint source-channel coding in~\cite{allerton12,Kost13}.
The information-spectrum method expresses the error probability as the tail probability of a certain random variable, often referred to as information density, entropy density or information random variable \cite{Han03}. This idea was initially used by Shannon in \cite{Shan57} to obtain bounds to the channel coding error probability. Verd\'u and Han capitalized on this analysis to provide error bounds and capacity expressions that hold for general channels, including arbitrary memory, input and output alphabets~\cite{HanVer93,VerHan94,PoorVer95} (see also~\cite{Han03}).
In this work, we further develop the connection between hypothesis testing, information-spectrum and converse bounds in information theory by providing a number of alternative expressions for the error probability of Bayesian $M$-ary hypothesis testing. We show that this probability can be equivalently described by the error probability of a binary hypothesis test with certain parameters. In particular, this result implies that the meta-converse bound by Polyanskiy, Poor and Verd{\'u} gives the minimum error probability when it is optimized over its free parameters.
We also provide an explicit alternative expression using information-spectrum measures and illustrate the connection with existing information-spectrum bounds. This result implies that a suitably optimized generalization of the Verd\'u-Han bound also gives the minimum error probability. We discuss in some detail examples and extensions.
The rest of this paper is organized as follows. In Section~\ref{sec:BaryHT} of this paper we formalize the binary hypothesis testing problem and introduce notation.
In \refS{MaryHT} we present $M$-ary hypothesis testing and propose a number of alternative expressions to the average error probability.
The hypothesis-testing framework is related to several previous converse results in \refS{applications}. Proofs of several results are included in the appendices.
\section{Binary Hypothesis Testing}\label{sec:BaryHT}
Let $Y$ be a random variable taking values over a discrete alphabet $\Yc$.
We define two hypotheses $\Hc_0$ and $\Hc_1$, such that $Y$ is distributed according to a given distribution $P$ under $\Hc_0$, and according to a distribution $Q$ under $\Hc_1$.
A binary hypothesis test is a mapping $\Yc \to \{0,1\}$, where $0$ and $1$ correspond respectively to $\Hc_0$ and $\Hc_1$.
Denoting by $\hat H \in \{0,1\}$ the random variable associated with the test output, we may describe the (possibly randomized) test by a conditional distribution $T \triangleq P_{\hat H|Y}$.
The performance of a binary hypothesis test is characterized by two conditional error probabilities, namely $\epsilon_{0}(P,T)$ or type-0 probability, and $\epsilon_{1}(P,T)$ or type-1 probability, respectively given by
\begin{align}
\epsilon_{0}(P,T)
&\triangleq \Pr\bigl[\hat H = 1 \,\big|\, \Hc_0 \bigr]
= \sum_{y} P(y) T(1|y),
\label{eqn:bht-type0error}\\
\epsilon_{1}(Q,T)
&\triangleq \Pr\bigl[\hat H = 0 \,\big|\, \Hc_1 \bigr]
= \sum_{y} Q(y) T(0|y).
\label{eqn:bht-type1error}
\end{align}
In the Bayesian setting, for $\Hc_i$ with prior probability $\Pr[\Hc_i]$, $i=0,1$, the smallest average error probability is
\begin{align}
\bar\epsilon
\triangleq \min_{T}\Bigl\{ \Pr[\Hc_0]\,\epsilon_{0}(P,T)
+ \Pr[\Hc_1]\,\epsilon_{1}(Q,T) \Bigr\}.
\label{eqn:bht-average-error}
\end{align}
In the non-Bayesian setting, the priors $\Pr[\Hc_i]$, $i=0,1$, are unknown and the quantity $\bar\epsilon$ is not defined. Instead, one can characterize the optimal trade-off between $\epsilon_{0}(\cdot)$ and $\epsilon_{1}(\cdot)$. We define the smallest type-$0$ error $\epsilon_{0}(\cdot)$ among all tests $T$ with a type-$1$ error $\epsilon_{1}(\cdot)$ at most $\beta$ as
\begin{align}
\alpha_{\beta}\bigl(P, Q\bigr)
\triangleq \min_{{T: \epsilon_{1}(Q, T) \leq \beta}} \Big\{ \epsilon_{0}(P, T) \Big\}.
\label{eqn:bht-alpha}
\end{align}
The tests minimizing~\refE{bht-average-error} and~\refE{bht-alpha} have
the same form. The minimum is attained by the Neyman-Pearson test~\cite{NeyPea33},
\begin{align}\label{eqn:bht-NPtest}
T_{\text{NP}} (0|y) =
\begin{cases}
1, & \text{ if } \frac{P(y)}{Q(y)} > \gamma,\\
p, & \text{ if } \frac{P(y)}{Q(y)} = \gamma,\\
0, & \text{ otherwise},
\end{cases}
\end{align}
where $\gamma\geq 0$ and $p \in [0,1]$ are parameters.
When $\gamma = \frac{\Pr[\Hc_1]}{\Pr[\Hc_0]}$, the test $T_{\text{NP}}$
minimizes~\refE{bht-average-error} with the value of $p$
being irrelevant since it does not affect the objective.
When $\gamma$ and $p$ are chosen such that the type-$1$ error $\epsilon_{1}(Q,T_{\text{NP}})$ is equal to $\beta$, $T_{\text{NP}}$ attains the minimum in~\refE{bht-alpha}.
The test minimizing~\refE{bht-average-error} and~\refE{bht-alpha} is not unique in general, as the form of the test can vary for observations $y$ satisfying $P(y)=Q(y)$. Any test achieving~\refE{bht-alpha} is said to be optimal in the Neyman-Pearson sense.
\section{$M$-ary Hypothesis Testing}\label{sec:MaryHT}
Consider two random variables $V$ and $Y$ with joint distribution $\pvy$, where $V$ takes values on a discrete alphabet $\Vc$ of cardinality $|\Vc| = M$, and $Y$ takes values in a discrete alphabet $\Yc$. We shall assume that the cardinality $|\Vc|$ is finite; see Remark \ref{remark:mht-alpha} in \refS{proof-mht-alpha} for an extension to infinite alphabets $\Vc$. While throughout the article we use discrete notation for clarity of exposition, the results directly generalize to continuous alphabets $\Yc$; see Remark \ref{remark:proof-continuous-mht-alpha} in \refS{proof-mht-alpha}.
The estimation of $V$ given $Y$ is an $M$-ary hypothesis-testing problem. Since the joint distribution $\pvy$ defines a prior distribution $\pv$ over the alternatives, the problem is naturally cast within the Bayesian framework.
An $M$-ary hypothesis test is defined by a (possibly random) transformation $P_{\hat V|Y} : \Yc \to \Vc$, where $\hat{V}$ denotes the random variable associated to the test output.\footnote{While both binary and $M$-ary hypothesis tests are defined by conditional distributions, to avoid confusion, we denote binary tests by $T$ and $M$-ary tests by $P_{\hat V|Y}$.} We denote the average error probability of a test $P_{\hat V|Y}$ by $\bar\epsilon(P_{\hat V|Y})$. This probability is given by
\begin{align}
\bar\epsilon(P_{\hat V|Y})
&\triangleq \Pr\left[\hat V \neq V\right]
\label{eqn:mht-epsdef-1}\\
&= 1 - \sum_{v,y} \pvy(v,y)P_{\hat V|Y}(v|y).
\label{eqn:mht-epsdef-2}
\end{align}
Minimizing over all possible conditional distributions
$P_{\hat V|Y}$ gives the smallest average error probability, namely
\begin{align}
\bar\epsilon
&\triangleq \min_{P_{\hat V|Y}} \bar\epsilon(P_{\hat V|Y}).
\label{eqn:mht-epsopt-1}
\end{align}
An optimum test chooses the hypothesis $v$ with largest posterior probability $P_{V|Y}(v|y)$
given the observation $y$, that is the Maximum a Posteriori (MAP) test. The MAP test that breaks ties randomly with equal probability is given by
\begin{equation}\label{eqn:mht-PMAP}
P^{\text{MAP}}_{\hat{V}|Y}(v|y) = \begin{cases}
\frac{1}{|\Sc(y)|}, & \text{ if } v \in \Sc(y),\\
0, & \text{ otherwise,}
\end{cases}
\end{equation}
where the set $\Sc(y)$ is defined as
\begin{align} \label{eqn:mht-Sdef}
\Sc(y) &\triangleq
\left\{ v \in \Vc \,\;\big|\; P_{V|Y}(v|y)
= \max_{v'\in\Vc} P_{V|Y}(v'|y) \right\}.
\end{align}
Substituting~\eqref{eqn:mht-PMAP} in~\eqref{eqn:mht-epsdef-2} gives
\begin{align}
\bar\epsilon
&= 1 - \sum_{v,y} \pvy(v,y) P^{\text{MAP}}_{\hat{V}|Y}(v|y) \label{eqn:mht-epsopt-2}\\
&= 1- \sum_y \max_{v'} \pvy(v',y).
\label{eqn:mht-epsopt-3}
\end{align}
The next theorem introduces two alternative equivalent expressions for the minimum error probability $\bar\epsilon$.
\begin{theorem}
\label{thm:mht-alpha}
The minimum error probability of an $M$-ary hypothesis test (with possibly non-equally likely hypotheses) can be expressed as
\begin{align}
\bar\epsilon
&= \max_{\qy} \alpha_{\frac{1}{M}} \bigl(\pvy, \qv \times \qy\bigr)
\label{eqn:meta}\\
&=\maxp_{\qy} \sup_{\gamma\geq 0} \left\{ \Pr\left[ \frac{\pvy(V,Y)}{ \qy(Y) } \leq \gamma \right] - \gamma \right\},
\label{eqn:tight-vh}
\end{align}
where $\qv(v) \triangleq \frac{1}{M}$ for all $v\in\Vc$, and the probability in \refE{tight-vh} is computed with respect to $\pvy$.
Moreover, a maximizing distribution $\qy$ in both expressions is
\begin{equation} \label{eqn:qyMAP-def}
\qy^{\star}(y) \triangleq \frac{1}{\mu} \max_{v'} \pvy(v',y),
\end{equation}
where $\mu \triangleq \sum_y \max_{v'} \pvy(v',y)$ is a normalizing constant.\end{theorem}
\begin{IEEEproof}
See \refS{proof-mht-alpha}.
\end{IEEEproof}
Eq. \refE{meta} in Theorem~\ref{thm:mht-alpha} shows that the error probability of Bayesian $M$-ary hypothesis testing can be expressed as the best type-$0$ error probability of an induced binary hypothesis test discriminating between the original distribution $\pvy$ and an alternative product distribution $\qv \times \qy^{\star}$ with type-$1$-error equal to $\frac{1}{M}$.
Eq. \refE{tight-vh} in Theorem~\ref{thm:mht-alpha} provides an alternative characterization based on information-spectrum measures, namely the generalized information density $\log\frac{\pvy(v,y)}{\qy(y)}$. By choosing $\qy = \qy^{\star}$ and $\gamma = \mu$, the term $\Pr\left[ \frac{\pvy(V,Y)}{\qy(Y) } \leq \gamma \right] - \gamma$ can be interpreted as the error probability of an $M$-ary hypothesis test that, for each $v$, compares the posterior likelihood ${P_{V|Y}(v|y)}$ with a threshold equal to $\max_{v'} {P_{V|Y}(v'|y)}$ and decides accordingly, i.~e., this test emulates the MAP test yielding the exact error probability.
The two alternative expressions provided in Theorem~\ref{thm:mht-alpha} are not easier to compute than $\bar\epsilon$ in \refE{mht-epsopt-3}. To see this, note that the normalization factor $\mu$ in $\qy^{\star}$ is such that $\mu = 1 - \bar\epsilon$.
For any fixed test $P_{\hat V|Y}$, not necessarily MAP, using \refE{mht-epsopt-1} it follows that $\bar\epsilon(P_{\hat V|Y}) \geq \bar\epsilon$.
Therefore, Theorem~\ref{thm:mht-alpha} provides a lower bound to the error probability of any $M$-ary hypothesis test. This bound is expressed in \refE{meta} as a binary hypothesis test discriminating between $\pvy$ and an auxiliary distribution $\qvy = \qv \times \qy$. Optimizing over general distributions $\qvy$ (not necessarily product) may yield tighter bounds for a fixed test $P_{\hat V|Y}$, as shown next.
\begin{theorem} \label{thm:mht-suboptimal
The error probability of an $M$-ary hypothesis test $P_{\hat V|Y}$ satisfies
\begin{align}
\bar\epsilon(P_{\hat V|Y}) &= \max_{\qvy} \alpha_{\epsilon_{1}(\qvy,P_{\hat V|Y})} \bigl(\pvy, \qvy \bigr)\label{eqn:meta-suboptimal}\\
&=\maxp_{\qvy} \sup_{\gamma\geq 0} \Biggl\{ \Pr\Biggl[ \frac{\pvy(V,Y)}{ \qvy(V,Y) } \leq \gamma \Biggr]
- \gamma \epsilon_{1}(\qvy,P_{\hat V|Y}) \Biggr\},\label{eqn:tight-vh-suboptimal}
\end{align}
where
\begin{align} \label{eqn:eps1-def-suboptimal}
\epsilon_{1}(\qvy,P_{\hat V|Y}) \triangleq \sum_{v,y} \qvy(v,y) P_{\hat V|Y}(v|y).
\end{align}
\end{theorem}
\begin{IEEEproof}
Let us consider the binary test $T(0|v,y) = P_{\hat V|Y}(v|y)$. The type-$0$ and type-$1$ error probabilities of this test are $\epsilon_{0}(\pvy, T) = \bar\epsilon(P_{\hat V|Y})$ and $\epsilon_{1}(\qvy, T) = \epsilon_{1}(\qvy,P_{\hat V|Y})$ defined in \refE{eps1-def-suboptimal}, respectively. Therefore, from the definition of $\alpha_{(\cdot)}(\cdot)$ in \refE{bht-alpha} we obtain that, for any $\qvy$,
\begin{align}
\bar\epsilon(P_{\hat V|Y})
\geq \alpha_{\epsilon_{1}(\qvy,P_{\hat V|Y})} \bigl(\pvy, \qvy \bigr).
\label{eqn:meta-suboptimal-bound}
\end{align}
For $\qvy=\pvy$, using that $\alpha_{\beta}(\pvy,\pvy) = 1 - \beta$, the right-hand side of \refE{meta-suboptimal-bound} becomes $1 - \epsilon_{1}(\pvy,P_{\hat V|Y})$. As $1 - \epsilon_{1}(\pvy,P_{\hat V|Y}) = 1 - \epsilon_{1}(\pvy,T) = \epsilon_{0}(\pvy,T) = \bar\epsilon(P_{\hat V|Y})$, then \refE{meta-suboptimal} follows from optimizing \refE{meta-suboptimal-bound} over $\qvy$.
To obtain~\refE{tight-vh-suboptimal} we apply the lower bound in Lemma~\ref{lem:alpha-relax-2} in \refS{proof-mht-alpha} to \refE{meta-suboptimal} and note that, for $\gamma=1$, $\qvy=\pvy$, the bound holds with equality.
\end{IEEEproof}
The proof of Theorem~\ref{thm:mht-suboptimal} shows that the auxiliary distribution $\qvy=\pvy$ maximizes \refE{meta-suboptimal} and \refE{tight-vh-suboptimal} for any $M$-ary hypothesis test $P_{\hat V|Y}$. Nevertheless, the auxiliary distribution optimizing \refE{meta-suboptimal} and \refE{tight-vh-suboptimal} is is not unique in general, as seen in Theorem~\ref{thm:mht-alpha} for the MAP test and in the next result for arbitrary maximum-metric tests.
Consider the maximum-metric test $P^{(q)}_{\hat{V}|Y}$ that chooses the hypothesis $v$ with largest metric $q(v,y)$, where $q(v,y)$ is an arbitrary function of $v$ and $y$. This test can be equivalently described as
\begin{equation}\label{eqn:mht-PMAP-MM}
P^{(q)}_{\hat{V}|Y}(v|y) = \begin{cases}
\frac{1}{\left|\Sc_q(y)\right|}, & \text{ if } v \in \Sc_q(y),\\
0, & \text{ otherwise,}
\end{cases}
\end{equation}
where the set $\Sc_q(y)$ is defined as
\begin{align} \label{eqn:mht-Sdef-MM}
\Sc_q(y) &\triangleq
\left\{ v \in \Vc \;\Big|\; q(v,y) = \max_{v'\in \Vc} q(v',y) \right\}.
\end{align}
\begin{corollary} \label{cor:mht-mm
For the maximum metric test $P_{\hat V|Y} = P_{\hat V|Y}^{(q)}$,
a distribution $\qvy$ maximizing \refE{meta-suboptimal} and \refE{tight-vh-suboptimal}
is
\begin{align}\label{eqn:qvy-def-mm}
\qvy^{(q)}(v,y) \triangleq \frac{\pvy(v,y)}{\mu'} \frac{\max_{v'} q(v',y)}{q(v,y)},
\end{align}
where $\mu'$ is a normalizing constant.
\end{corollary}
\begin{IEEEproof}
See Appendix \ref{apx:mht-mm}.
\end{IEEEproof}
The expressions in Theorem~\ref{thm:mht-suboptimal} still depend on the specific test through $\epsilon_{1}(\cdot)$, cf. \refE{eps1-def-suboptimal}. For the optimal MAP test, i.~e., a maximum metric test with metric $q(v,y) = P_{V|Y}(v|y)$, we obtain $\qvy^{(q)} = \qv \times \qy^{\star}$ with uniform $\qv$ and $\qy^{\star}$ defined in~\refE{qyMAP-def}. For uniform $\qv$ it holds that
\begin{align}
\epsilon_{1}(\qv \times \qy, P_{\hat V|Y}) = \frac{1}{M},
\end{align}
for any $\qy$, $P_{\hat V|Y}$. As a result, for the optimal MAP test, the expressions in Theorem~\ref{thm:mht-suboptimal} and the distribution defined in Corollary~\ref{cor:mht-mm} recover those in Theorem~\ref{thm:mht-alpha}.
\subsection{Example}\label{sec:example}
To show the computation of the various expressions in Theorem~\ref{thm:mht-alpha} let us consider the ternary hypothesis test examined in \cite[Figs. 1 and 2]{PoorVer95} and revisited in \cite[Sec. III.A]{ChenAla2012}. Let $\Vc = \Yc = \{0,1,2\}$, $\pv(v)=\frac{1}{3}$, $v=0,1,2$, and
\begin{align} \label{eqn:ex1-pyx}
\pyv(y|v) =\begin{cases} 0.40,& (v,y) = (0,0), (1,1) \text{ and } (2,2),\\
0.33,& (v,y) = (0,2), (1,2) \text{ and } (2,0), \\
0.27,& \text{otherwise}.\end{cases}
\end{align}
Direct calculation shows that the MAP estimate is $\hat{v}(y) = y$, and from \refE{mht-epsopt-3} we obtain $\bar{\epsilon}=0.6$.
In order to evaluate the expressions in Theorem~\ref{thm:mht-alpha} we first compute $\qy^{\star}$ in \refE{qyMAP-def}, which yields $\qy^{\star}(y) = \frac{1}{3}$, $y=0,1,2$. According to \refE{meta} a binary hypothesis test between $\pvy$ and $\qvy^{\star}$, where $\qvy^{\star}(v,y)=\frac{1}{9}$, for all $v,y$, with type-$1$ error $\epsilon_1=\frac{1}{3}$, yields the minimum error probability
\begin{align}
\bar\epsilon &= \alpha_{\frac{1}{3}} \bigl(\pvy, \qvy^{\star}\bigr).
\label{eqn:ex1-alpha}
\end{align}
Solving the Neyman-Pearson test in \refE{bht-NPtest} for the type-$1$ error $\epsilon_1=\frac{1}{3}$, we obtain $\gamma = 1.2$ and $p = 1$ and therefore
\begin{align}
\label{eqn:ex1-TNP}
T_{\text{NP}} (0|y) =
\begin{cases}
1, & \text{ if } \pvy(v,y) \geq \frac{2}{15},\\
0, & \text{ otherwise}.
\end{cases}
\end{align}
Hence, \refE{ex1-alpha} yields
\begin{align}
\bar\epsilon &= \epsilon_{0}(\pvy,T_{\text{NP}}) \\
&= 1 - \sum_{v,y} \pvy(v,y) T_{\text{NP}}(0|y) = 0.6.
\end{align}
Similarly, to evaluate \refE{tight-vh} in Theorem~\ref{thm:mht-alpha},
we substitute $\qy^{\star}$ to obtain
\begin{align} \label{eqn:ex1-tight-vh}
\bar\epsilon
&=\sup_{\gamma\geq 0} \left\{ \Pr\left[ \pvy(V,Y) \leq \frac{\gamma}{3} \right]
- \gamma \right\}.
\end{align}
\begin{figure}[t]
\centering
\input{figs/bounds-ex1.tikz}
\caption{Information-spectrum lower bounds to the minimum error probability for the example in \refS{example}, as a function of the bound parameter~$\gamma$.} \label{fig:ex1-bounds}
\end{figure}
Fig. \ref{fig:ex1-bounds} shows the argument of \refE{ex1-tight-vh} with respect to $\gamma \in [0,1]$ compared to the exact error probability $\bar\epsilon$, shown in the plot with an horizontal line. For comparison, we also include the Verd\'u-Han lower bound~\cite[Th.~4]{VerHan94},
the Poor-Verd\'u lower bound~\cite[Th.~1]{PoorVer95}
and the lower bound proposed by Chen and Alajaji in~\cite[Th.~1]{ChenAla2012}. The Chen-Alajaji bound~\cite[Th.~1]{ChenAla2012} is parametrized by $\theta \geq 0$ and, for $\theta=1$, it reduces to the Poor-Verd\'u lower bound. We observe that \refE{ex1-tight-vh} gives the exact error probability $\bar\epsilon = 0.6$ at $\gamma=1-\bar\epsilon$. The Verd\'u-Han and the Poor-Verd\'u lower bounds both coincide and yield $\bar\epsilon \geq 0.574$. For this example, as shown in~\cite{ChenAla2012}, the Chen-Alajaji lower bound is tight for $\theta \to \infty$. For $\theta = 25$ the bound is still $\bar\epsilon \geq 0.579$.
As an application of Theorem~\ref{thm:mht-suboptimal} and Corollary~\ref{cor:mht-mm} we study now a variation of the previous example. For a hypothesis $v \in \Vc$, let $(y_1,y_2) \in \Yc^2$ denote two independent observations of the random variable $Y$ distributed according to $P_{Y|V=v}$ in \refE{ex1-pyx}. We consider the suboptimal hypothesis test that decides on the source message $v$ maximizing the metric $q(v,y_1,y_2) = \pyv(y_1|v)$. That is, for equiprobable hypotheses, this test applies the MAP rule based on the first observation, ignoring the second one.
The expressions in Theorem~\ref{thm:mht-alpha} do not depend on the decoder and yield the MAP error probability $\bar{\epsilon}=0.592$.
Then, for $P^{(q)}_{\hat{V}|Y_1Y_2}$ in \refE{mht-PMAP-MM}, it holds that $\bar\epsilon\bigl(P^{(q)}_{\hat{V}|Y_1Y_2}\bigr) \geq 0.592$.
Let us choose the auxiliary distribution
\begin{align}\label{eqn:Qex2}
Q_{VY_1Y_2}(v, y_1, y_2) = \frac{1}{9} \pyv(y_2|v).
\end{align}
Using that $P^{(q)}_{\hat{V}|Y_1Y_2}(v|y_1,y_2) = \openone\bigl\{v = y_1\bigr\}$ is independent of~$y_2$, we obtain
\begin{align}
\epsilon_{1}\bigl(Q_{VY_1Y_2},P^{(q)}_{\hat{V}|Y_1Y_2}\bigr
&= \frac{1}{9} \sum_{v,y_1,y_2} \pyv(y_2|v) P^{(q)}_{\hat{V}|Y_1Y_2}(v | y_1,y_2)\\
&= \frac{1}{9} \sum_{v,y_1} \openone\bigl\{v = y_1\bigr\}\\
&= \frac{1}{3}.
\end{align}
Therefore, the bound implied in Theorem~\ref{thm:mht-suboptimal} for this specific choice of $Q_{VY_1Y_2}$ yields
\begin{align}\label{eqn:ex2-alpha}
\bar\epsilon\Bigl(P^{(q)}_{\hat{V}|Y_1Y_2}\Bigr)
&\geq \alpha_{\frac{1}{3}} \bigl(P_{VY_1Y_2}, Q_{VY_1Y_2}\bigr).
\end{align}
Since the marginal corresponding to $Y_2$ is the same for $P_{VY_1Y_2}$ and $Q_{VY_1Y_2}$ in \refE{Qex2}, this component does not affect to the binary test and can be eliminated from \refE{ex2-alpha}. Therefore, the right-hand side in \refE{ex2-alpha} coincides with that of \refE{ex1-alpha}, and yields the lower bound $\bar\epsilon\bigl(P^{(q)}_{\hat{V}|Y_1Y_2}\bigr) \geq 0.6$.
It can be checked that an application of \refE{tight-vh-suboptimal} in Theorem~\ref{thm:mht-suboptimal} yields the same result. We conclude that allowing joint distributions $Q_{VY_1Y_2}$ we obtain decoder-specific bounds.
\subsection{Proof of Theorem~\ref{thm:mht-alpha}}\label{sec:proof-mht-alpha}
We first prove the equality between the left- and right-hand sides of \refE{meta} by showing the equivalence of the optimization problems \refE{mht-epsopt-1} and \refE{meta}.
From \refE{mht-epsopt-1} we have that
\begin{align}
\bar\epsilon
&= \min_{ P_{\hat V|Y}:\sum_{v} P_{\hat V|Y}(v|y) \leq 1, y\in\Yc } \sum_{v,y}\!\pvy(v,y)\!\left(1\!-\!P_{\hat V|Y}(v|y)\right)
\label{eqn:mht-epsopt-4}\\
&= \max_{\lambda(\cdot) \geq 0} \min_{ P_{\hat V|Y} } \Biggl\{ \sum_{v,y} \pvy(v,y) \left(1-P_{\hat V|Y}(v|y)\right)
+ \sum_y \lambda(y) \left( \sum_{v} P_{\hat V|Y}(v|y) - 1 \right) \Biggr\},
\label{eqn:mht-epsopt-5}
\end{align}
where in \refE{mht-epsopt-4} we wrote explicitly the (active) constraints resulting from $P_{\hat V|Y}$ being a conditional distribution; and \refE{mht-epsopt-5} follows from introducing the constraints into the objective via the Lagrange multipliers $\lambda(y) \geq 0$, $y\in\Yc$.
Similarly, we write \refE{meta} as
\begin{align}
\max_{\qy} \; &\alpha_{\frac{1}{M}}\left(\pvy, \qv \times \qy\right)\notag\\
&= \max_{\qy} \,\min_{T: \sum_{v,y} \frac{1}{M} \qy(y) T(0|v,y) \leq \frac{1}{M}} \Biggl\{\sum_{v,y} \pvy(v,y)
T(1|v,y) \Biggr\}
\label{eqn:mht-alphaopt-6}\\
&= \max_{\eta\geq 0}\max_{\qy} \min_{ T} \Biggl\{\sum_{v,y} \pvy(v,y) \Bigl(1-T(0|v,y)\Bigr)
+ \eta \left( \sum_{v,y} \qy(y) T(0|v,y) - 1 \right)\Biggr\},
\label{eqn:mht-alphaopt-7}
\end{align}
where in \refE{mht-alphaopt-6} we used the definitions of $\qv$ and $\alpha_{\beta}(\cdot)$; and \refE{mht-alphaopt-7} follows from introducing the constraint into the objective via the Lagrange multiplier $\eta$.
Since $\eta$ and $\qy$ only appear in the objective function of \refE{mht-alphaopt-7} as $\eta \qy(y)$, $y\in\Yc$, we may optimize \refE{mht-alphaopt-7} over $\bar\lambda(y) \triangleq \eta\qy(y)$ instead. Then, \refE{mht-alphaopt-7} becomes
\begin{align}
\max_{\bar\lambda(\cdot) \geq 0} \min_{T} \Biggl\{\sum_{v,y} \pvy(v,y) \Bigl(1-T(0|v,y)\Bigr)
+ \sum_{y} \bar\lambda(y) \left( \sum_{v} T(0|v,y) - 1 \right)\Biggr\}.
\label{eqn:mht-alphaopt-8}
\end{align}
Comparing \refE{mht-epsopt-5} and \refE{mht-alphaopt-8}, it is readily seen that the optimization problems \refE{mht-epsopt-1} and \refE{meta} are equivalent. Hence, the first part of the theorem follows.
We need the following result to prove identity \eqref{eqn:tight-vh}.
\begin{lemma}\label{lem:alpha-relax-2}
For any pair of distributions $\{P,Q\}$ over $\Yc$ and any $\gamma' \geq 0$, it holds
\begin{align}
\alpha_{\beta}\bigl(P, Q\bigr)
\geq \PP\left[\frac{P(Y)}{Q(Y)} \leq \gamma'\right]-\gamma'\beta.
\label{eqn:alpha-relax-1}
\end{align}
\end{lemma}
\begin{IEEEproof}
The bound \refE{alpha-relax-1} with the term $\PP\Bigl[\frac{P(Y)}{Q(Y)} \leq \gamma'\Bigr]$ replaced by $\PP\Bigl[\frac{P(Y)}{Q(Y)} < \gamma'\Bigr]$ corresponds to \cite[Eq. (102)]{Pol09}. The proof of the lemma follows the steps in \cite[Eq. (2.71)-(2.74)]{PolThesis} and is included in Appendix \ref{apx:alpha-relax} for completeness.
\end{IEEEproof}
Applying \refE{alpha-relax-1} to \refE{meta} with $\gamma' = \gamma M$, \mbox{$P \leftarrow \pvy$} and \mbox{$Q \leftarrow \qv \times \qy$} and optimizing over $\gamma$ we obtain
\begin{align}
\bar\epsilon \geq \maxp_{\qy} \sup_{\gamma\geq 0} \left\{ \Pr\left[ \frac{\pvy(V,Y)}{\qy(Y) } \leq \gamma \right] - \gamma \right\}.
\label{eqn:infspectrum}
\end{align}
By using the distribution $\qy = \qy^{\star}$ in \refE{qyMAP-def} and by choosing $\gamma = \mu$, the probability term in \refE{infspectrum} becomes
\begin{align}
\Pr\left[ \frac{\pvy(V,Y)}{ \qy^{\star}(Y) } \leq \mu \right]
=
\Pr\left[ P_{V|Y}(V|Y) \leq \max_{v'} P_{V|Y}(v'|Y) \right] = 1. \label{eqn:tight-infspectrum}
\end{align}
Substituting $\qy = \qy^{\star}$, $\gamma = \mu$, and using \refE{tight-infspectrum} in \refE{infspectrum} we obtain
\begin{align}
\bar\epsilon
&\geq \maxp_{\qy} \sup_{\gamma\geq 0} \left\{ \Pr\left[ \frac{\pvy(V,Y)}{\qy(Y)} \leq \gamma \right] - \gamma \right\} \label{eqn:tight-infspectrum-1}\\
&\geq 1-\mu \label{eqn:tight-infspectrum-2}\\
&= 1 - \sum_{y} \max_{v'} \pvy(v',y) \label{eqn:tight-infspectrum-3}\\
&=\bar\epsilon, \label{eqn:tight-infspectrum-4}
\end{align}
where in \refE{tight-infspectrum-3} we used the definition of $\mu$ and \refE{tight-infspectrum-4} follows from \refE{mht-epsopt-3}.
The identity \eqref{eqn:tight-vh} in the theorem is due to \refE{tight-infspectrum-1}-\refE{tight-infspectrum-4}, where it is readily seen that $\qy = \qy^{\star}$ is a maximizer of \eqref{eqn:tight-vh}. Moreover, since $\qy^{\star}$ is a maximizer of \eqref{eqn:tight-vh}, and Lemma~\ref{lem:alpha-relax-2} applies for a fixed $\qy$, it follows that $\qy^{\star}$ is also an optimal solution to \refE{meta}. The second part of the theorem thus follows from \refE{tight-infspectrum-1}-\refE{tight-infspectrum-4}.
\begin{remark}\label{remark:mht-alpha}A simple modification of Theorem~\ref{thm:mht-alpha} generalizes the result to countably infinite alphabets $\Vc$. We define $\barqv$ to be the counting measure, i. e., $\barqv(v) = 1$ for all $v$.
The function $\alpha_{\beta}(\cdot)$ in \refE{bht-alpha} is defined for arbitrary $\sigma$-finite measures, not necessarily probabilities. Then, by substituting
$\qv$ by $\barqv$, the type-$1$ error measure is $\epsilon_{1}(\barqv \times \qy, T)=1$ for any $T$, and \refE{meta} becomes
\begin{equation}
\bar\epsilon = \max_{\qy} \alpha_{1}
\left(\pvy, \barqv \times \qy\right).
\label{eqn:meta-bis}
\end{equation}
Since \refE{tight-vh} directly applies to both finite or countably infinite $\Vc$, so does Theorem~\ref{thm:mht-alpha} with \refE{meta} replaced by~\refE{meta-bis}.
\end{remark}
\begin{remark}\label{remark:proof-continuous-mht-alpha}
For continuous observation alphabets $\Yc$, the constraint of $P_{\hat V|Y}$ being a
conditional distribution
\begin{align}
\sum_{v} P_{\hat V|Y}(v|y) \leq 1, \ y \in \Yc,
\label{eqn:mht-const-v1}
\end{align}
can be equivalently described as
\begin{align}
\max_{\qy} \int \sum_{v} P_{\hat{V}|Y}(v|y) \diff \qy(y) \leq 1.
\label{eqn:mht-const-v2}
\end{align}
The fact that \refE{mht-const-v1} implies \refE{mht-const-v2} trivially follows by averaging both sides of \refE{mht-const-v1} over an arbitrary $\qy$, and in particular, for the one maximizing \refE{mht-const-v2}.
To prove that \refE{mht-const-v2} implies \refE{mht-const-v1}, let us assume that \refE{mht-const-v1} does not hold, i.~e., $\sum_{v} P_{\hat V|Y}(v|\bar y) > 1$ for some $\bar y\in\Yc$. Let $\bar Q_Y$ be the distribution that concentrates all the mass at $\bar y$. Since for $\qy=\bar Q_Y$ the condition \refE{mht-const-v2} is violated, so happens for the maximizing $\qy$. As a result, \refE{mht-const-v2} implies \refE{mht-const-v1}, as desired, and the equivalence between both expressions follows.
By using \refE{mht-const-v2} instead of \refE{mht-const-v1} in \refE{mht-epsopt-4}-\refE{mht-epsopt-5}, and after replacing the sums by integrals where needed, we obtain
\begin{align}
\bar\epsilon
&= \max_{\eta \geq 0} \min_{ P_{\hat V|Y} } \Biggl\{ \int \sum_{v} P_{V|Y}(v|y) \left(1-P_{\hat V|Y}(v|y)\right) \diff \py(y)
+ \eta \left( \max_{\qy} \int \sum_{v} P_{\hat{V}|Y}(v|y) \diff \qy(y) - 1\right) \Biggr\}. \label{eqn:mht-epsopt-5-bis}
\end{align}
For fixed $\qy$ the argument in \refE{mht-epsopt-5-bis} is linear with respect to $P_{\hat V|Y}$, and for fixed $P_{\hat V|Y}$ is linear with respect to $\qy$. Therefore, applying Sion's minimax theorem \cite[Cor. 3.5]{Sion58} to interchange $\min_{ P_{\hat V|Y} }$ and $\max_{\qy}$, \refE{mht-epsopt-5-bis} becomes \refE{mht-alphaopt-7}. The first part of the theorem thus holds for continuous alphabets $\Yc$.
Since Lemma \ref{lem:alpha-relax-2} applies to arbitrary probability spaces, so does \refE{infspectrum}. Therefore, for continuous alphabets $\Yc$, the second part of the theorem follows from \refE{infspectrum}, \refE{tight-infspectrum} and \refE{tight-infspectrum-1}-\refE{tight-infspectrum-4} after replacing the sum by an integral in \refE{tight-infspectrum-3}.
\end{remark}
\begin{remark}
The optimality of $\qy^{\star}$ in \refE{meta} can also be proved constructively. Consider the binary hypothesis testing problem between $\pvy$ and $\qv \times \qy^{\star}$. We define a test
\begin{align}\label{eqn:mht-TMAP-proof}
T_{\text{MAP}}(0|v,y) \triangleq
\begin{cases}
\frac{1}{|\Sc(y)|}, & \text{ if } v \in \Sc(y),\\
0, & \text{ otherwise.}
\end{cases}
\end{align}
For $\qv$ uniform, the type-$1$ error probability of this test is $\epsilon_{1}(\qv \times \qy^{\star}, T_{\text{MAP}}) = \frac{1}{M}$.
Using that the MAP test is a maximum metric test with $q(v,y) = \pvy(v,y)$, according to the proof of Corollary \ref{cor:mht-mm} in Appendix~\ref{apx:mht-mm}, the type-$0$ error probability of $T_{\text{MAP}}$ is precisely $\alpha_{\frac{1}{M}} \bigl(\pvy, \qv \times \qy^{\star}\bigr)$.
Moreover, since $\bar\epsilon = \epsilon_{0}(\pvy, T_{\text{MAP}})$ we conclude that $\qy = \qy^{\star}$ is an optimizer of \refE{meta}. While both $T_{\text{MAP}}$ and $T_{\text{NP}}$ attain the Neyman-Pearson performance, in general they are not the same test, as they may differ in the set of points that lead to a MAP test tie, i.e., the values of $y$ such that $|\Sc(y)|>1$.
\end{remark}
\section{Connection to Previous Converse Results}\label{sec:applications}
We next study the connection between Theorem~\ref{thm:mht-alpha} and previous converse results in the literature:
\subsubsection{The meta-converse bound}
In channel coding, one of $M$ equiprobable messages is to be sent over a channel with one-shot law $\pyx$. The encoder maps the source message $v\in\{1,\ldots,M\}$ to a codeword $x(v)$ using a specific codebook~$\Cc$. Since there is a codeword for each message, the distribution $\pv$ induces a distribution $\px^{\Cc}$ over the channel input. At the decoder, the decision among the $M$ possible transmitted codewords based on the channel output $y$ is equivalent to an $M$-ary hypothesis test with equiprobable hypotheses. The smallest error probability of this test for a codebook $\Cc$ is denoted as $\bar\epsilon(\Cc)$.
Fixing an arbitrary $\qy$ in \eqref{eqn:meta} and considering the codeword set instead of the message set, we obtain
\begin{align}\label{eqn:mht-alpha-bound-2}
\bar\epsilon(\Cc)
\geq \alpha_{\frac{1}{M}}
\bigl(\px^{\Cc}\times\pyx, \px^{\Cc} \times \qy\bigr),
\end{align}
namely the meta-converse bound of~\cite[Th.~26]{Pol09} for a given codebook and the choice $\qxy = \px^{\Cc} \times \qy$. Theorem \ref{thm:mht-alpha} thus shows that the meta-converse bound is tight for a fixed codebook after optimization over the auxiliary distribution~$\qy$.
\begin{figure}[t]
\centering
\input{figs/bounds-cc.tikz}
\caption{Channel coding error probability bounds for a BSC with cross-over probability $0.1$ and $M=4$ codewords.}\label{fig:BSC-CC-bounds}
\end{figure}
Upon optimization over $\qy$ and minimization over codebooks we obtain
\begin{align}
\min_{\Cc} \bar\epsilon(\Cc)
&= \min_{\px^{\Cc}} \max_{\qy} \left\{
\alpha_{\frac{1}{M}} \bigl(\px^{\Cc}\!\times\!\pyx,
\px^{\Cc}\!\times\!\qy \bigr) \right\}
\label{eqn:cc-bound-1}\\
&\geq \min_{\px} \max_{\qy} \left\{
\alpha_{\frac{1}{M}} \bigl(\px\!\times\!\pyx, \px\!\times\!\qy \bigr)\right\}.
\label{eqn:cc-bound-2}
\end{align}
The minimization in \refE{cc-bound-1} is done over the set of distributions induced by all possible codes, while the minimization in \refE{cc-bound-2} is done over the larger set of all possible distributions over the channel inputs. The bound in \refE{cc-bound-2} coincides with \cite[Th.~27]{Pol09}.
Fig. \ref{fig:BSC-CC-bounds} depicts the minimum error probability for the transmission of $M=4$ messages over $n$ independent, identically distributed channel uses of a memoryless binary symmetric channel (BSC) with single-letter cross-over probability $0.1$. We also include the meta-converse \refE{cc-bound-1}, computed for the best code~\cite[Th.~37]{moser2013} and $\qy=\qy^{\star}$, and the lower bound in \refE{cc-bound-2}. Here, we exploited the fact that for the BSC the saddlepoint in \refE{cc-bound-2} is attained for uniform $\px, \qy$ \cite[Th.~22]{Pol13}. The computation of \refE{cc-bound-1} and \refE{cc-bound-2} follows similar steps to those presented in \refS{example} for a different example.
It is interesting to observe that while \refE{cc-bound-1} characterizes the exact error probability, the weakening \refE{cc-bound-2} yields a much looser bound.
\subsubsection{Lower bound based on a bank of $M$ binary tests}
Eq. \eqref{eqn:meta} relates the error probability $\bar\epsilon$ to the type-$0$ error probability of a binary test between distributions $\pvy$ and $\qv^\star\times\qy$. Instead of a single binary test, it is also possible to consider a bank of $M$ binary hypothesis tests between distributions $P_{Y|V=v}$ and $\qy$~\cite{allerton12}. In this case, we can also express the average error probability of $M$-ary hypothesis testing as
\begin{equation}
\bar\epsilon=\max_{\qy} \left\{ \sum_v \pv(v) \,\alpha_{Q_{\hat{V}}^{\star}(v)} \bigl(P_{Y|V=v}, \qy\bigr) \right\}
\label{eqn:multi}
\end{equation}
where $Q_{\hat{V}}^{\star}(v) \triangleq \sum_{y}\qy(y) P_{\hat V|Y}^{\text{MAP}}(v|y)$;
see Appendix \ref{apx:Marymultiple}.
If instead of fixing $Q_{\hat{V}}^{\star}$, we minimize \refE{multi} with respect to an arbitrary $Q_{\hat{V}}$, \refE{multi} then recovers the converse bound~\cite[Lem.~2]{allerton12} for almost-lossless joint source-channel coding.
This lower bound is not tight in general as the minimizing distribution $Q_{\hat{V}}$ need not coincide with the distribution induced by the MAP decoder.
\subsubsection{Verd\'u-Han lower bound}
Weakening the identity in \eqref{eqn:tight-vh} for an arbitrary $\qy$ we obtain
\begin{align}
\bar\epsilon \geq \sup_{\gamma\geq 0} \left\{ \Pr\left[ \frac{ \pvy(V,Y)}{ \qy(Y) } \leq \gamma \right] - \gamma \right\}.
\label{eqn:vh}
\end{align}
By choosing $\qy=\py$ in \refE{vh} we recover the Verd\'u-Han lower bound in the channel~\cite[Th.~4]{VerHan94} and joint source-channel coding settings~\cite[Lem.~3.2]{Han07Joint}. The bound \eqref{eqn:vh} with arbitrary $\qy$ coincides with the Hayashi-Nagaoka lemma for classical-quantum channels~\cite[Lem.~4]{hayashi2003}, with its proof steps following exactly those of \cite[Th.~4]{VerHan94}. Theorem~\ref{thm:mht-alpha} shows that, by properly choosing $\qy$, this bound is tight in the classical setting.
\subsubsection{Wolfowitz's strong converse}
If we consider the hypothesis $v$ with smallest error probability in \eqref{eqn:tight-vh}, i.~e.,
\begin{align}
\bar\epsilon
&=
\maxp_{\qy} \sup_{\gamma\geq 0} \left\{ \sum_{v} \pv(v) \Pr\!\left[ \frac{ \pyv(Y|v) \pv(v)}{ \qy(Y) }\!\leq\!\gamma \right]- \gamma \right\} \label{eqn:mht-wolfowitz-pre}\\
&\geq
\maxp_{\qy} \sup_{\gamma\geq 0} \inf_v \left\{ \Pr\left[ \frac{ \pyv(Y|v) \pv(v)}{ \qy(Y) } \leq \gamma \right] - \gamma \right\},
\label{eqn:mht-wolfowitz}
\end{align}
we recover Wolfowitz's channel coding strong converse~\cite{Wolf68}.
Hence, this converse bound is tight as long as the bracketed term in \refE{mht-wolfowitz} does not depend on $v$ for the pair $\{\qy, \gamma\}$ optimizing \refE{mht-wolfowitz-pre}.
\subsubsection{Poor-Verd\'u lower bound}
By applying the following lemma, we recover the Poor-Verd\'u lower bound~\cite{PoorVer95} from Theorem~\ref{thm:mht-alpha}.
Let us denote by $\PP[\Ec]$ (resp. $\QQ[\Ec]$) the probability of the event $\Ec$ with respect to the underlying distribution $P$ (resp. $Q$).
\begin{lemma}\label{lem:alpha-relax-1}
For a pair of discrete distributions $\{P,Q\}$ defined over $\Yc$ and any $\gamma' \geq 0$, such that
\begin{align}
0\leq\beta\leq
\frac{\QQ\left[\frac{P(Y)}{Q(Y)}>\gamma'\right]}{ \PP\left[\frac{P(Y)}{Q(Y)}>\gamma'\right]},
\label{eqn:alpha-relax-assump}
\end{align}
the following result holds,
\begin{align}
\alpha_{\beta}\bigl(P, Q\bigr)
\geq (1-\gamma'\beta) \PP\left[\frac{P(Y)}{Q(Y)} \leq \gamma'\right].
\label{eqn:alpha-relax-2}
\end{align}
\end{lemma}
\begin{IEEEproof}
See Appendix \ref{apx:alpha-relax}.
\end{IEEEproof}
Using Lemma~\ref{lem:alpha-relax-1} with $\gamma' = \gamma M$, $P \leftarrow \pvy$ and $Q \leftarrow \qv \times \qy$ where $\qv$ is uniform, via \refE{meta}, we obtain
\begin{align}
\bar\epsilon \geq
(1-\gamma) \Pr\left[ \frac{ \pvy(V,Y)}{ \qy(Y) } \leq \gamma \right],
\label{eqn:pv}
\end{align}
provided that $\qy$ and $\gamma\geq0$ satisfy
\begin{align}
{\sum_{v,y} \pvy(v,y)
\openone\left\{\frac{ \pvy(v,y)}{ \qy(y)}>\gamma\right\}}
\leq
{\sum_{v,y} \qy(y)
\openone\left\{\frac{ \pvy(v,y)}{ \qy(y)}>\gamma\right\}}.\label{eqn:pv_cond}
\end{align}
This condition is fulfilled for any $\gamma \geq 0$ if $\qy=\py$ or $\qy = \qy^{\star}$ as defined in \refE{qyMAP-def}.
However, there exist pairs $\{\gamma,\qy\}$ for which \refE{pv_cond} does not hold.
For $\qy=\py$, and optimizing over $\gamma\geq 0$, \refE{pv} recovers the Poor-Verd\'u bound~\cite[Th.~1]{PoorVer95}. For $\qy=\qy^{\star}$ in \refE{qyMAP-def}, optimizing over $\gamma\geq 0$, \refE{pv} provides an expression similar to those in Theorem~\ref{thm:mht-alpha}:
\begin{align}
\bar\epsilon = \max_{\gamma\geq 0} \left\{(1-\gamma) \Pr\left[ \frac{ \pyv(Y|V) \pv(V)}{ \qy^{\star}(Y) } \leq \gamma \right] \right\}.
\label{eqn:tight-pv}
\end{align}
\subsubsection{Lossy source coding}
Finally, we consider a fixed-length lossy compression scenario, for which a converse based on hypothesis testing was recently obtained in~\cite[Th.~8]{Kost12}. The output of a general source $v$ with distribution $\pv$ is mapped to a codeword $w$ in a codebook $\Cc = \{w_1, w_2, \ldots, w_M\}$ with $w_1, w_2, \ldots, w_M$ belonging to the reconstruction alphabet $\Wc$. We define a non-negative real-valued distortion measure $d(v,w)$ and a maximum allowed distortion $D$. The excess distortion probability is thus defined as $\epsilon_d(\Cc, D)\triangleq \Pr\bigl[ d(V,W) > D \bigr]$.
Consider an encoder that maps the source message $v$ to codeword $w$ with smallest pairwise distortion. The distortion associated to the source message $v$ is then
\begin{equation}
d(v,\Cc) \triangleq \min_{w\in\Cc} d(v,w).
\end{equation}
Consequently, the excess distortion probability is given by
\begin{align}
\epsilon_d(\Cc, D) = \sum_{v} \pv(v) \openone\bigl\{ d(v,\Cc) > D \bigr\}.
\label{eqn:lsc-eps-2}
\end{align}
Given the possible overlap between covering regions, there is no straightforward equivalence between the excess distortion probability and the error probability of an $M$-ary hypothesis test. We may yet define an alternative binary hypothesis test as follows. Given an observation $v$, we choose $\Hc_0$ if the encoder meets the maximum allowed distortion and $\Hc_1$ otherwise, i.e. the test is defined as
\begin{align}\label{eqn:lsc-test}
T_{\text{LSC}}(0|v)
= \openone\bigl\{ d(v,\Cc) \leq D \bigr\}.
\end{align}
Particularizing \refE{bht-type0error} and \refE{bht-type1error} with this test, yields
\begin{align}
\epsilon_{0}(\pv, T_{\text{LSC}})
&= \sum_{v} \pv(v) \openone\bigl\{ d(v,\Cc) > D \bigr\},
\label{eqn:lsc-error-0}\\
\epsilon_{1}(\qv, T_{\text{LSC}})
&= \sum_{v} \qv(v) \openone\bigl\{ d(v,\Cc) \leq D \bigr\}\\
&= \QQ[ d(V,\Cc) \leq D ],
\label{eqn:lsc-error-1}
\end{align}
where $\QQ[\Ec]$ denotes the probability of the event $\Ec$ with respect to the underlying distribution $\qv$.
As \refE{lsc-eps-2} and \refE{lsc-error-0}
coincide, $\epsilon_d(\Cc,D)$ can be lower-bounded by the type-$0$ error of a Neyman-Pearson test, i.e.,
\begin{align}\label{eqn:lsc-bound}
\epsilon_d(\Cc,D) \geq \max_{\qv} \Bigl\{
\alpha_{\QQ[ d(V,\Cc) \leq D ]} \bigl(\pv, \qv\bigr) \Bigr\}.
\end{align}
Moreover, \refE{lsc-bound} holds with equality, as the next result shows.
\begin{theorem}\label{thm:lossymetaconverseistight}
The excess distortion probability of lossy source coding with codebook $\Cc$ and maximum distortion $D$ satisfies
\begin{align}
\epsilon_d(\Cc,D)
&= \max_{\qv}
\Bigl\{\alpha_{\QQ[ d(V,\Cc) \leq D ]}
\bigl(\pv, \qv\bigr) \Bigr\}
\label{eqn:lsc-bound-tight}\\
&\geq \max_{\qv}
\Bigl\{\alpha_{M \sup_{w\in\Wc} \QQ\left[d(V,w) \leq D \right]}
\bigl(\pv, \qv\bigr) \Bigr\}.
\label{eqn:lsc-kostinathm8}
\end{align}
\end{theorem}
\begin{IEEEproof}
See Appendix \ref{apx:prooflossymetaconverseistight}.
\end{IEEEproof}
The right-hand-side of \refE{lsc-bound-tight} still depends on the codebook $\Cc$ through $\QQ[ d(V,\Cc) \leq D ]$. This dependence disappears in the relaxation \refE{lsc-kostinathm8}, recovering the converse bound in \cite[Th.~8]{Kost12}.
The weakness of \refE{lsc-kostinathm8} comes from relaxing the type-$1$ error in the bound to $M$ times the type-$1$-error contribution of the best possible codeword belonging to the reconstruction alphabet.
In almost-lossless coding, $D=0$, the error events for different codewords no longer overlap, and the problem naturally fits into the hypothesis testing paradigm. Moreover, when $\qv$ is assumed uniform we have that $\QQ\left[ d(V,w) \leq 0 \right] = \QQ\left[ V = w \right] = \frac{1}{|\Vc|}$ for any $w$ and, therefore, \refE{lsc-kostinathm8} is an equality.
\section*{Acknowledgement}
The authors would like to thank Sergio Verd\'u for multiple discussions. We would also thank Te Sun Han for providing the classical version of the Hayashi-Nagaoka's lemma.
\appendices
\section{Proof of Corollary \ref{cor:mht-mm}}
\label{apx:mht-mm}
For a binary hypothesis testing problem between the distributions $\pvy$ and $\qvy^{(q)}$ in \refE{qvy-def-mm} we define the test $T_q(0|v,y) \triangleq P^{(q)}_{\hat{V}|Y}(v|y)$.
We now show that the test $T_q$
achieves the same type-I and type-II error probability
as a NP test $T_{\text{NP}}$ in \refE{bht-NPtest}.
To this end, let us fix $\gamma = \mu'$ and
\begin{align}
p & = \frac{\sum_{y} \sum_{v\in\Sc_{q}(y)} \frac{1}{|\Sc_{q}(y)|} \pvy(v,y) }
{\sum_{y} \sum_{v\in\Sc_{q}(y)} \pvy(v,y)}\label{eqn:mr-p0map-def1}\\
& = \frac{\sum_{y} \sum_{v\in\Sc_{q}(y)} \frac{1}{|\Sc_{q}(y)|} \qvy^{(q)}(v,y) }
{\sum_{y} \sum_{v\in\Sc_{q}(y)} \qvy^{(q)}(v,y)},\label{eqn:mr-p0map-def2}
\end{align}
where equality between \eqref{eqn:mr-p0map-def1} and \eqref{eqn:mr-p0map-def2} holds since $\pvy(v,y) = \mu' \qvy^{(q)}(v,y)$ for all $y$, $v \in \Sc_{q}(y)$.
The type-$0$ error probability of the NP test \refE{bht-NPtest} with these values of $\gamma$ and $p$ is given by
\begin{align}
\epsilon_0(\pvy, T_{\text{NP}})
&= 1 - \sum_{v,y} \pvy(v,y) T_{\text{NP}}(0|v,y)
\label{eqn:mr-typeI-1}\\
&= 1 - \sum_{y} \sum_{v \in \Sc_{q}(y)} p \pvy(v,y)
\label{eqn:mr-typeI-2}\\
&= 1 - \sum_{y} \sum_{v \in \Sc_{q}(y)} \frac{1}{|\Sc_{q}(y)|} \pvy(v,y)
\label{eqn:mr-typeI-3}\\
&= 1 - \sum_{v,y} \pvy(v,y) T_q(0|v,y)
\label{eqn:mr-typeI-4}\\
&= \epsilon_0(\pvy, T_{q}),
\label{eqn:mr-typeI-5}
\end{align}
where in \eqref{eqn:mr-typeI-2} we used the definition of $T_{\text{NP}}$ in \refE{bht-NPtest} with $P\leftarrow \pvy$ and $Q\leftarrow \qvy^{(q)}$ and the definition of $\Sc_{q}(y)$ in \refE{qvy-def-mm}; \eqref{eqn:mr-typeI-3} follows from \eqref{eqn:mr-p0map-def1}, and \eqref{eqn:mr-typeI-4} follows from the definition of $T_q$. Analogously, the type-$1$ error probability of the NP test is
\begin{align}
\epsilon_1(\qvy^{(q)}, T_{\text{NP}})
&= \sum_{y} \sum_{v \in \Sc_{q}(y)} p \qvy^{(q)}(v,y)
\label{eqn:mr-typeII-1}\\
&= \sum_{y} \sum_{v \in \Sc_{q}(y)} \frac{1}{|\Sc_{q}(y)|} \qvy^{(q)}(v,y)
\label{eqn:mr-typeII-2}\\
&= \sum_{v,y} \qvy^{(q)}(v,y) T_q(0|v,y)
\label{eqn:mr-typeII-3}\\
&= \epsilon_1(\qvy^{(q)}, T_q),
\label{eqn:mr-typeII-4}
\end{align}
where
\eqref{eqn:mr-typeII-2} follows from \eqref{eqn:mr-p0map-def2};
and \eqref{eqn:mr-typeII-3} follows from the definition of $T_q$.
Then, using \eqref{eqn:mr-typeI-1}-\eqref{eqn:mr-typeI-5} and
\eqref{eqn:mr-typeII-1}-\eqref{eqn:mr-typeII-4}, we obtain
\begin{align}
\alpha_{\epsilon_{1}\bigl(\qvy^{(q)},T_{q}\bigr)} \bigl(\pvy,\qvy^{(q)}\bigr) &=\epsilon_{0}(\pvy, T_{\text{NP}}) \label{eqn:mr-chain-1}\\
&=\epsilon_{0}(\pvy, T_{q}). \label{eqn:mr-chain-2}
\end{align}
Noting that $\bar\epsilon\bigl(P^{(q)}_{\hat{V}|Y}\bigr)$ and $\epsilon_{0}(\pvy, T_{q})$ coincide by definition, then \refE{meta-suboptimal} holds with equality for $\qvy =\qvy^{(q)}$.
Applying Lemma~\ref{lem:alpha-relax-2} to \refE{meta-suboptimal} and fixing $\qvy =\qvy^{(q)}$ yields
\begin{align}
\bar\epsilon\Bigl(P^{(q)}_{\hat{V}|Y}\Bigr) \geq \sup_{\gamma'\geq 0} \Biggl\{ \Pr\left[ \frac{\pvy(V,Y)}{ \qvy^{(q)}(V,Y) } \leq \gamma' \right] - \gamma' \epsilon_{1}\Bigl(\qvy^{(q)},P^{(q)}_{\hat{V}|Y}\Bigr) \Biggr\}. \label{eqn:mr-chain-3}
\end{align}
Choosing $\gamma' = \mu'$ in \refE{mr-chain-3} direct computation shows
that
\begin{align}
\Pr\left[ \frac{\pvy(V,Y)}{ \qvy^{(q)}(V,Y) } \!\leq\! \mu' \right]
&= \Pr\left[ {q(V,Y)}\!\leq\!{\max_{v'} q(v',Y)} \right]
\label{eqn:mr-chain-4a}\\
&= 1
\label{eqn:mr-chain-4b}
\end{align}
and
\begin{align}
\mu'\epsilon_{1}\Bigl(\qvy^{(q)},P^{(q)}_{\hat{V}|Y}\Bigr)
&= \sum_{v,y} \pvy(v,y) \frac{\max_{v'} q(v',y)}{q(v,y)} P^{(q)}_{\hat{V}|Y}(v|y)
\label{eqn:mr-chain-5a}\\
&= \sum_{v,y} \pvy(v,y) P^{(q)}_{\hat{V}|Y}(v|y),
\label{eqn:mr-chain-5b}
\end{align}
where in \refE{mr-chain-5b} we have used that $P^{(q)}_{\hat{V}|Y}(v|y) \neq 0$ implies $q(v,y) = \max_{v'} q(v',y)$. Therefore, substituting \refE{mr-chain-4a}-\refE{mr-chain-4b} and \refE{mr-chain-5a}-\refE{mr-chain-5b} in \refE{mr-chain-3}, and using the definition of $\bar\epsilon(P_{\hat{V}|Y})$ in \refE{mht-epsdef-2}, we conclude that \refE{mr-chain-3} holds with equality, and so does \refE{tight-vh-suboptimal} with $\qvy =\qvy^{(q)}$.
\section{Proof of Lemmas \ref{lem:alpha-relax-2} and \ref{lem:alpha-relax-1}}
\label{apx:alpha-relax}
Consider a binary hypothesis test between distributions $P$ and $Q$ defined over the alphabet $\Yc$. Let us denote by $\PP[\Ec]$ the probability of the event $\Ec$ with respect to the underlying distribution $P$, and $\QQ[\Ec]$ that with respect to $Q$.
For the sake of clarity we assume that, for a given type-$1$ error $\beta$, the term $p$ in \refE{bht-NPtest} is equal to zero. The proof easily extends to arbitrary $p$, although with more complicated notation. Then, there exists $\gamma^{\star}$ such that
\begin{align}\label{eqn:betaNPtest}
\beta = \QQ\left[ \frac{P(Y)}{Q(Y)} > \gamma^{\star} \right],
\end{align}
and the NP lemma yields
\begin{align}\label{eqn:alphaNPtest}
\alpha_{\beta}(P,Q) = \PP\left[ \frac{P(Y)}{Q(Y)} \leq \gamma^{\star} \right].
\end{align}
For $0 \leq \gamma' < \gamma^{\star}$, $\PP\left[ \frac{P(Y)}{Q(Y)} \leq \gamma' \right] \leq \PP\left[ \frac{P(Y)}{Q(Y)} \leq \gamma^{\star} \right] = \alpha_{\beta}(P,Q)$. Then both Lemmas \ref{lem:alpha-relax-2} and \ref{lem:alpha-relax-1} hold trivially.
For $\gamma' \geq \gamma^{\star}$ it follows that
\begin{align}
\alpha_{\beta}(P,Q)
&= \PP\left[ \frac{P(Y)}{Q(Y)}\!\leq\! \gamma' \right]
- \PP\left[ \gamma^{\star}\!<\!\frac{P(Y)}{Q(Y)} \leq \gamma' \right]\label{eqn:proofbound2_1} \\
&\geq \PP\left[ \frac{P(Y)}{Q(Y)}\!\leq\! \gamma' \right]
- \gamma' \QQ\left[ \gamma^{\star}\!<\! \frac{P(Y)}{Q(Y)} \leq \gamma' \right]
\label{eqn:proofbound2_2}\\
&= \PP\left[ \frac{P(Y)}{Q(Y)}\!\leq\!\gamma' \right]
- \gamma' \Biggl( \QQ\left[ \frac{P(Y)}{Q(Y)} > \gamma^{\star} \right]
- \QQ\left[\frac{P(Y)}{Q(Y)} > \gamma' \right] \Biggr), \label{eqn:proofbound2_3}
\end{align}
where \refE{proofbound2_2} follows by noting that in the interval considered $P(y) < \gamma' Q(y)$.
Lemma \ref{lem:alpha-relax-2} follows from \refE{proofbound2_3} by lower bounding $\QQ\left[\frac{P(Y)}{Q(Y)} > \gamma' \right] \geq 0$ and using \refE{betaNPtest}.
In order to prove Lemma \ref{lem:alpha-relax-1},
we shall use in \refE{proofbound2_3} the tighter lower bound
\begin{align}
\QQ\left[\frac{P(Y)}{Q(Y)} > \gamma' \right]
\geq \beta \PP\left[\frac{P(Y)}{Q(Y)} > \gamma' \right],
\end{align}
which holds by the assumption in \refE{alpha-relax-assump}.
\section{One Test versus Multiple Tests}\label{apx:Marymultiple}
In this appendix, we prove the equivalence between the optimization problems in \refE{meta} and \refE{multi}.
First, note that the argument of the maximization in \refE{multi} can be written in terms of tests $T_v$ for fixed $v$ as
\begin{align}
\sum_v\pv(v)&\alpha_{Q_{\hat{V}}(v)}\bigl(P_{Y|V=v},\qy\bigr)\notag\\
&= \sum_v \pv(v)
\min_{T_v: \epsilon_{1}(\qy, T_v) \leq Q_{\hat{V}}(v)}
\Big\{ \epsilon_{0}(P_{Y|V=v}, T_v) \Big\}
\label{eqn:mht-cor-dem-1}\\
&= \sum_{v} \pv(v) \max_{\lambda(v) \geq 0} \min_{T_v} \Biggl\{
\sum_{y} P_{Y|V}(y|v) T_v(1|y)
- \lambda(v) \Biggl( \sum_{y'} \qy(y') T_v(0|y') - Q_{\hat{V}}(v) \Biggr) \Biggr\},
\label{eqn:mht-cor-dem-2}
\end{align}
where \refE{mht-cor-dem-1} follows from the definition of $\alpha_{(\cdot)} (\cdot)$, and
in \refE{mht-cor-dem-2} we used the definitions of the type-$0$ and type-$1$ errors and introduced the constraints into the objective by means of the Lagrange multipliers $\lambda(v)$.
Similarly, from \refE{meta} we have that
\begin{align}
&\max_{\qy} \; \alpha_{\frac{1}{M}}\left(\pvy, \qv \times \qy\right)\notag\\
&= \max_{\qv\times\qy} \alpha_{\epsilon_{1}(\qv\times\qy, T_{\text{MAP}})}\left(\pvy, \qv\times\qy\right)
\label{eqn:mht-cor-dem-3}\\
&= \max_{\qy} \max_{\eta\geq 0} \max_{\qv} \min_{T} \Biggl\{\sum_{v,y} \pvy(v,y) T(1|v,y)
+ \eta \Biggl( \sum_{v',y'} \qv(v')\qy(y') \left(T(0|v',y') - P_{\hat V|Y}^{\text{MAP}}(v'|y') \right) \Biggr)\Biggr\}
\label{eqn:mht-cor-dem-4}\\
&= \max_{\qy} \sum_v \pv(v) \max_{\bar\lambda(v) \geq 0} \min_{T} \Biggl\{ \sum_{y} \pyv(y|v) T(1|v,y)
+ \bar\lambda(v) \Biggl( \sum_{y'}\qy(y') T(0|v,y') - Q_{\hat{V}}(v) \Biggr)\Biggr\},
\label{eqn:mht-cor-dem-5}
\end{align}
where \refE{mht-cor-dem-3} follows as $\qv$ uniform is a maximizer of the RHS of \refE{mht-cor-dem-3}; in \refE{mht-cor-dem-4} used the definition of $\alpha_{(\cdot)}(\cdot)$, and introduced the constraint into the objective by means of the Lagrange multiplier $\eta$; and in \refE{mht-cor-dem-5} we rearranged terms and defined
\begin{align}
\bar\lambda(v) \triangleq \frac{\eta\qv(v)}{\pv(v)}.
\end{align}
The result follows from \refE{mht-cor-dem-2} and \refE{mht-cor-dem-5} by optimizing \refE{mht-cor-dem-2} over $\qy$ and identifying $T(i|v,y) \equiv T_v(i|y)$, $i=0,1$.
\section{Proof of Theorem \ref{thm:lossymetaconverseistight}}
\label{apx:prooflossymetaconverseistight}
We define
\begin{equation}
\qv^{\Cc}(v) \triangleq \frac{1}{\mu''} \openone\bigl\{ d(v,\Cc) > D \bigr\},
\end{equation}
with $\mu''$ a normalization constant.
The NP test \refE{bht-NPtest} with $P \leftarrow \pv$, $Q \leftarrow \qv^{\Cc}$, $\gamma = \mu''$, $p=1$, particularizes to
\begin{align}\label{eqn:lossyNPtest}
T_{\text{NP}} (0|v) =
\begin{cases}
1, & \text{ if } \pv(v) \geq \openone\bigl\{ d(v,\Cc) > D \bigr\},\\
0, & \text{ otherwise}.
\end{cases}
\end{align}
Assuming that $\pv(v)<1$ for all $v$, eq. \refE{lossyNPtest} reduces to
\begin{align}\label{eqn:lossyNPtestbis}
T_{\text{NP}} (0|v)
&= \openone\bigl\{ d(v,\Cc) \leq D \bigr\}\\
&= T_{\text{LSC}}(0|v).
\end{align}
That is, for $\qv=\qv^{\Cc}$, the test $T_{\text{LSC}}$ defined in \refE{lsc-test} is optimal in the Newman-Pearson sense. Then it holds that
\begin{align}
\max_{\qv} \left\{
\alpha_{ \epsilon_{1}(\qv, T_{\text{LSC}}) } \bigl(\pv, \qv\bigr) \right\}
&\geq \alpha_{ \epsilon_{1}(\qv^{\Cc}, T_{\text{LSC}}) } \bigl(\pv, \qv^{\Cc}\bigr)
\label{eqn:lossy-chain-1}\\
&= \epsilon_{0}\bigl(\pv, T_{\text{LSC}}\bigr)
\label{eqn:lossy-chain-2}\\
&= \epsilon_d(\Cc,D),
\label{eqn:lossy-chain-3}
\end{align}
where the last step follows since \refE{lsc-eps-2} and \refE{lsc-error-0}
coincide.
From \refE{lsc-bound} and \refE{lossy-chain-1}-\refE{lossy-chain-2}, the equality
\refE{lsc-bound-tight} follows by noting that
$\epsilon_{1}(\qv, T_{\text{LSC}}) = \QQ[ d(V,\Cc) \leq D ]$.
Let $P_{W|V}$ denote the encoder that maps the source message $v$ to the codeword $w\in\Cc$ with smallest pairwise distortion. The lower bound \refE{lsc-kostinathm8} follows from the fact that
\begin{align}
\epsilon_{1}(\qv, T_{\text{LSC}})
&=\sum_{v} \qv(v) \openone\left\{ d(v,\Cc) \leq D \right\}
\label{eqn:lossy-epsII-1}\\
&=\sum_{v} \qv(v) \sum_{w} P_{W|V}(w|v)
\openone\left\{ d(v,w) \leq D \right\}
\label{eqn:lossy-epsII-2}\\
&\leq \sum_{w\in\Cc} \sum_{v} \qv(v) \openone\left\{ d(v,w) \leq D \right\}
\label{eqn:lossy-epsII-3}\\
&\leq M \sup_{w\in\Cc} \sum_{v} \qv(v) \openone\left\{ d(v,w) \leq D \right\}
\label{eqn:lossy-epsII-4}\\
&\leq M \sup_{w\in\Wc} \sum_{v} \qv(v) \openone\left\{ d(v,w) \leq D \right\},
\label{eqn:lossy-epsII-5}
\end{align}
where in \refE{lossy-epsII-3} we used that $P_{W|V}(w|v)=0$ for $w\notin\Cc$
and that $P_{W|V}(w|v)\leq 1$ for $w\in\Cc$; \refE{lossy-epsII-4} follows from considering the largest term in the sum, and in \refE{lossy-epsII-5} we relaxed the set over which the maximization is performed.
\balance
\bibliographystyle{IEEEtran}
|
1,314,259,994,987 | arxiv | \section{Introduction}
\label{sec:intro}
Estimating entropy and mutual information in a consistent manner is of
importance in a number problems in machine learning. For example, entropy
estimators have applications in
goodness-of-fit testing \cite{goria05new},
parameter estimation in semi-parametric models \cite{Wolsztynski85minimum},
studying fractal random walks \cite{Alemany94fractal},
and texture classification \cite{hero02alpha,hero2002aes}.
Mutual information estimators have applications in
feature selection \cite{peng05feature},
clustering \cite{aghagolzadeh07hierarchical},
causality detection \cite{Hlavackova07causality},
optimal experimental design \cite{lewi07realtime, poczos09identification},
f\acro{MRI} data processing \cite{chai09exploring},
prediction of protein structures \cite{adami04information},
and boosting and facial expression recognition \cite{Shan05conditionalmutual}.
Both entropy estimators and mutual information estimators have been used for
independent component and subspace analysis
\cite{radical03,szabo07undercomplete_TCC, poczos05geodesic,Hulle08constrained},
as well as for image registration
\cite{kybic06incremental,hero02alpha,hero2002aes}.
For further applications, see
\cite{Leonenko-Pronzato-Savani2008}.
In this paper, we focus on the problem of estimating the Shannon entropy of
a continuous random variable given samples from its distribution. All of our
results extend to the estimation of mutual information, since the latter can be
written as a sum of entropies.
\footnote{Specifically, for random variables $X$ and $Y$,
$I(X; Y) = H(X) + H(Y) - H(X, Y)$.}
In our setting, we assume we are given $n$ IID
samples from an unknown probability measure $P$. Under nonparametric
assumptions (on the smoothness and tail behavior of $P$), our task is then to
estimate the differential Shannon entropy of $P$.
Estimators of entropy and mutual information come in many forms (as reviewed
in Section \ref{sec:related}), but one common approach is based on statistics
of $k$-nearest neighbor ($k$-NN) distances (i.e., the distance from a sample to
its $k^{th}$ nearest neighbor amongst the samples, in some metric on the
space). These nearest-neighbor estimates are largely based on initial work by
\citet{kozachenko87statistical}, who proposed an estimate for differential
Shannon entropy and showed its weak consistency. Henceforth, we refer to this
historic estimator as the `KL estimator', after its discoverers. Although there
has been much work on the problem of entropy estimation in the nearly three
decades since the KL estimator was proposed, there are still major open
questions about the finite-sample behavior of the KL estimator. The goal of
this paper is to address some of these questions in the form of finite-sample
bounds on the bias and variance of the estimator.
Specifically, our {\bf main contributions} are the following:
\begin{enumerate}
\item
We derive
$O \left( \left( k/n \right)^{\beta/D} \right)$ bounds on the bias of the KL
estimate, where $\beta$ is a measure of the smoothness (i.e., H\"older
continuity) of the sampling density, $D$ is the intrinsic dimension of the
support of the distribution, and $n$ is the sample size.
\item
We derive $O \left( n\inv \right)$ bounds on the variance of the KL estimator.
\item
We derive concentration inequalities for $k$-NN distances, as well as general
bounds on expectations of $k$-NN distance statistics, with important special
cases:
\begin{enumerate}
\item
We bound the moments of $k$-NN distances, which play a role in analysis of many
applications of $k$-NN methods, including both the bias and variance of the KL
estimator. In particular, we significantly relax strong assumptions underlying
previous results by \citet{evans02KNNmoments}, such as compact support and
smoothness of the sampling density. Our results are also the first which apply
to negative moments (i.e., $\E \left[ X^\alpha \right]$ with $\alpha < 0$);
these are important for bounding the variance of the KL estimator.
\item
We give upper and lower bounds on the logarithms of $k$-NN distances. These are
important for bounding the variance of the KL estimator, as well as $k$-NN
estimators for divergences and mutual informations.
\end{enumerate}
\end{enumerate}
We present our results in the general setting of a set equipped with a metric,
a base measure, a probability density, and an appropriate definition of
dimension. This setting subsumes Euclidean spaces, in which $k$-NN methods have
traditionally been analyzed,
\footnote{A recent exception in the context of classification, is
\citet{chaudhuri14KNNrates} which considers general metric spaces.}
but also includes, for instance, Riemannian manifolds, and perhaps other
spaces of interest. We also strive to weaken some of the restrictive
assumptions, such as compact support and boundedness of the density, on which
most related work depends.
We anticipate that the some of the tools developed here may be used to derive
error bounds for $k$-NN estimators of mutual information, divergences
\citep{Wang-Kulkarni-Verdu2009}, their generalizations (e.g., R\'enyi and
Tsallis quantities \citep{Leonenko-Pronzato-Savani2008}), norms, and other
functionals of probability densities. We leave such bounds to future work.
\subsection*{Organization}
Section \ref{sec:related} discusses related work. Section \ref{sec:setting}
gives theoretical context and assumptions underlying our work. In Section
\ref{sec:concentration}, we prove concentration boundss for $k$-NN distances,
and we use these in Section \ref{sec:KNN_stats} to derive bounds on the
expectations of $k$-NN distance statistics. Section \ref{sec:KL_est} describes
the KL estimator, for which we prove bounds on the bias and variance in
Sections \ref{sec:bias_bound} and \ref{sec:variance_bound}, respectively.
\section{Related Work}
\label{sec:related}
Here, we review previous work on the analysis of $k$-nearest neighbor
statistics and their role in estimating information theoretic functionals, as
well as other approaches to estimating information theoretic functionals.
\subsection{The Kozachenko-Leonenko Estimator of Entropy}
In general contexts, only weak consistency of the KL estimator is known
\citep{kozachenko87statistical}.
\citet{biau15EntropyKNN} recently reviewed finite-sample results known for
the KL estimator. They show (Theorem 7.1) that, if the density $p$ has compact
support, then the variance of the KL estimator decays as $O(n\inv)$. They also
claim (Theorem 7.2) to bound the bias of the KL estimator by $O(n^{-\beta})$,
under the assumptions that $p$ is $\beta$-H\"older continuous
($\beta \in (0, 1]$), bounded away from $0$, and supported on the interval
$[0, 1]$. However, in their proof \citet{biau15EntropyKNN} neglect the
additional bias incurred at the boundaries of $[0, 1]$, where the density cannot
simultaneously be bounded away from $0$ and continuous. In fact, because the KL
estimator does not attempt to correct for boundary bias, for densities bounded
away from $0$, the estimator may suffer bias worse than $O(n^{-\beta})$.
The KL estimator is also important for its role in the mutual information
estimator proposed by \citet{Kraskov04estimating}, which we refer to as the KSG
estimator. The KSG estimator expands the mutual information as a sum of
entropies, which it estimates via the KL estimator with a particular
\emph{random} (i.e., data-dependent) choice of the nearest-neighbor parameter
$k$. The KSG estimator is perhaps the most widely used estimator for the mutual
information between continuous random variables, despite the fact that it
currently appears to have no theoretical guarantees, even asymptotically. In
fact, one of the few theoretical results, due to
\citet{gao15stronglyDependent}, concerning the KSG estimator is a negative
result: when estimating the mutual information between strongly dependent
variables, the KSG estimator tends to systematically underestimate mutual
information, due to increased boundary bias.
\footnote{To alleviate this, \citet{gao15stronglyDependent} provide a heuristic
correction based on using local PCA to estimate the support of the
distribution. \citet{gao15localGaussian} provide and prove asymptotic
unbiasedness of another estimator, based on local Gaussian density estimation,
that directly adapts to the boundary.}
Nevertheless, the widespread use of the KSG estimator motivates study of its
behavior. We hope that our analysis of the KL estimator, in terms of which the
KSG estimator can be written, will lead to a better understanding of the
latter.
\subsection{Analysis of nearest-neighbor distance statistics}
\citet{evans08SLLNforKNN} derives a law of large numbers for $k$-NN statistics
with uniformly bounded (central) kurtosis as the sample size $n \to \infty$.
Although it is not obvious that the kurtosis of $\log$-$k$-NN distances is
uniformly bounded (indeed, each $\log$-$k$-NN distance approaches $-\infty$
almost surely), we show in Section \ref{sec:variance_bound} that this is indeed
the case, and we apply the results of \citet{evans08SLLNforKNN} to bound the
variance of the KL estimator.
\citet{evans02KNNmoments} derives asymptotic limits and convergence rates for
moments of $k$-NN distances, for sampling densities with bounded derivatives
and compact domain. In contrast, we use weaker assumptions to simply prove
bounds on the moments of $k$-NN distances. Importantly, whereas the results of
\citet{evans02KNNmoments} apply only to non-negative moments (i.e.,
$\E \left[ |X|^\alpha \right]$ with $\alpha \geq 0$), our results also hold for
certain negative moments, which is crucial for our bounds on the variance of
the KL estimator.
\subsection{Other Approaches to Estimating Information Theoretic Functionals}
{\bf Analysis of convergence rates:}
For densities over $\R^D$ satisfying a H\"older smoothness condition
parametrized by $\beta \in (0, \infty)$, the minimax rate for estimating
entropy has been known since \citet{birge95estimation} to be
$O \left( n^{-\min \left\{ \frac{8\beta}{4\beta + D}, 1 \right\}} \right)$ in
mean squared error, where $n$ is the sample size.
Quite recently, there has been much work on analyzing new estimators for
entropy, mutual information, divergences, and other functionals of densities.
Most of this work has been along one of three approaches. One series of papers
\citep{liu12exponential,singh14divergence,singh14densityfuncs} studied
boundary-corrected plug-in approach based on under-smoothed kernel density
estimation. This approach has strong finite sample guarantees, but requires
prior knowledge of the support of the density and can necessitate
computationally demanding numerical integration. A second approach
\citep{krishnamurthy14divergences,kandasamy15vonMises} uses von Mises expansion
to correct the bias of optimally smoothed density estimates. This approach
shares the difficulties of the previous approach, but is statistically more
efficient. Finally, a long line of work
\citep{perez08estimation,pal10estimation,sricharan12ensemble,sricharan10confidence,moon14ensemble}
has studied entropy estimation based on continuum limits of certain properties
of graphs (including $k$-NN graphs, spanning trees, and other sample-based
graphs).
Most of these estimators achieve rates of
$O \left( n^{-\min \left\{ \frac{2\beta}{\beta + D}, 1 \right\}} \right)$
or $O \left( n^{-\min \left\{ \frac{4\beta}{2\beta + D}, 1 \right\}} \right)$.
Only the von Mises approach of \citet{krishnamurthy14divergences} is
known to achieve the minimax rate for general $\beta$ and $D$, but due to its
high computational demand ($O(2^D n^3)$), the authors suggest the use
of other statistically less efficient estimators for moderately sized datasets.
In this paper, we prove that, for $\beta \in (0, 2]$, the KL estimator
converges at the rate
$O \left( n^{-\min \left\{ \frac{4\beta}{2\beta + D}, 1 \right\}} \right)$.
It is also worth noting the relative computational efficiency of the KL
estimator ($O \left( D n^2 \right)$, or $O \left( 2^D n \log n \right)$ using
$k$-d trees for small $D$).
{\bf Boundedness of the density:}
For all of the above approaches, theoretical finite-sample results known so far
assume that the sampling density is lower and upper bounded by positive
constants. This also excludes most distributions with unbounded support, and
hence, many distributions of practical relevance. A distinctive feature of our
results is that they hold for a variety of densities that approach $0$ and
$\infty$ on their domain, which may be unbounded. Our bias bounds apply, for
example, to densities that decay exponentially, such as Gaussian distributions.
To our knowledge, the only previous results that apply to unbounded densities
are those of \citet{tsybakov96rootn}, who show $\sqrt{n}$-consistency of a
truncated modification of the KL estimate for a class of functions with
exponentially decaying tails. In fact, components of our analysis are inspired
by \citet{tsybakov96rootn}, and some of our assumptions are closely
related. Their analysis only applies to the case $\beta = 2$ and $D = 1$, for
which our results also imply $\sqrt{n}$-consistency, so our results can be seen
in some respects as a generalization of this work.
\section{Setup and Assumptions}
\label{sec:setting}
While most prior work on $k$-NN estimators has been restricted to $\R^D$, we
present our results in a more general setting. This includes, for example,
Riemannian manifolds embedded in higher dimensional spaces, in which case we
note that our results depend on the \emph{intrinsic}, rather than
\emph{extrinsic}, dimension. Such data can be better behaved in their native
space than when embedded in a lower dimensional Euclidean space (e.g., working
directly on the unit circle avoids boundary bias caused by mapping data to the
interval $[0, 2\pi]$).
\begin{definition}
{\bf (Metric Measure Space):}
A quadruple $(\XX, d, \Sigma, \mu)$ is called a \emph{metric measure space} if
$\XX$ is a set, $d : \XX \times \XX \to [0, \infty)$ is a metric on $\XX$,
$\Sigma$ is a $\sigma$-algebra on $\X$ containing the Borel $\sigma$-algebra
induced by $d$, and $\mu : \Sigma \to [0, \infty]$ is a $\sigma$-finite measure
on the measurable space $(\XX, \Sigma)$.
\label{def:metric_meas_space}
\end{definition}
\begin{definition}
{\bf (Dimension):}
A metric measure space $(\XX, d, \Sigma, \mu)$ is said to have \emph{dimension}
$D \in [0, \infty)$ if there exist constants $c_D, \rho > 0$ such that,
$\forall r \in [0, \rho]$, $x \in \X$, $\mu(B(x, r)) = c_D r^D$.
\footnote{Here and in what follows, $B(x, r) := \{y \in \X : d(x, y) < r\}$
denotes the open ball of radius $r$ centered at $x$.}
\label{def:dim}
\end{definition}
\begin{definition}
{\bf (Full Dimension):}
Given a metric measure space $(\XX, d, \Sigma, \mu)$ of dimension $D$, a
measure $P$ on $(\XX, \Sigma)$ is said to have \emph{full dimension} on a set
$\X \subseteq \XX$ if there exist functions
$\gamma_*, \gamma^* : \X \to (0, \infty)$ such that, for all $r \in [0,\rho]$
and $\mu$-almost all $x \in \X$,
\[\gamma_*(x)r^D \leq P(B(x, r)) \leq \gamma^*(x) r^D.\]
\label{def:full_dim}
\end{definition}
\begin{remark}
If $\XX = \R^D$, $d$ is the Euclidean metric, and $\mu$
is the Lebesgue measure, then the dimension of the metric measure space is $D$.
However, if $\XX$ is a lower dimensional subspace of $\R^D$, then the dimension
may be less than $D$. For example, if
$\XX = \mathbb{S}_{D - 1} := \{x \in \R^D : \|x\|_2 = 1\}$), $d$ is the
geodesic distance on $\mathbb{S}_{D - 1}$, and $\mu$ is the
$(D - 1)$-dimensional surface measure, then the dimension is $D - 1$.
\end{remark}
\begin{remark}
In previous work on $k$-NN statistics
\citep{evans02KNNmoments, biau15EntropyKNN} and estimation of information
theoretic functionals
\citep{sricharan10confidence,krishnamurthy14divergences,singh14divergence,moon14ensemble},
it has been common to make the assumption that the sampling distribution has
full dimension with \emph{constant} $\gamma_*$ and $\gamma^*$ (or, equivalently,
that the density is lower and upper bounded by positive constants). This
excludes distributions with densities approaching $0$ or $\infty$ on their
domain, and hence also densities with unbounded support. By letting $\gamma_*$
and $\gamma^*$ be functions, our results extend to unbounded densities that
instead satisfy certain tail bounds.
\end{remark}
In order to ensure that entropy is well defined, we assume that $P$ is a
probability measure absolutely continuous with respect to $\mu$, and that its
probability density function $p : \X \to [0, \infty)$ satisfies
\footnote{See \cite{baccetti13infiniteEntropy} for discussion of sufficient
conditions for $H(p) < \infty$.}
\begin{equation}
H(p)
:= \E_{X \sim P} \left[ \log p(X) \right]
= \int_\X p(x) \log p(x) \, d\mu(x) \in \R.
\label{eq:entropy}
\end{equation}
Finally, we assume we have $n + 1$ samples
$X,X_1,...,X_n$ drawn IID from $P$. We would like to use these samples to
estimate the entropy $H(p)$ as defined in Equation (\ref{eq:entropy}).
Our analysis and methods relate to the $k$-nearest neighbor distance $\e_k(x)$,
defined for any $x \in \X$ by $\e_k(x) = d(x, X_i)$, where $X_i$ is the
$k^{th}$-nearest neighbor of $x$ in the set $\{X_1,...,X_n\}$. Note that, since
the definition of dimension used precludes the existence of atoms (i.e.,
for all $x \in \X$, $p(x) = \mu(\{x\}) = 0$), $\e_k(x) > 0$, $\mu$-almost
everywhere. This is important, since we will study $\log \e_k(x)$.
Initially (i.e., in Sections \ref{sec:concentration} and \ref{sec:KNN_stats}),
we will study $\log \e_k(x)$ with fixed $x \in \X$, for which we will derive
bounds in terms of $\gamma_*(x)$ and $\gamma^*(x)$. When we apply these results
to analyze the KL estimator in Section \ref{sec:bias_bound} and
\ref{sec:variance_bound}, we will need to take expectations such as
$\E \left[ \log \e_k(X) \right]$ (for which we reserve the extra sample $X$),
leading to `tail bounds' on $p$ in terms of the functions $\gamma_*$ and
$\gamma^*$.
\section{Concentration of $k$-NN Distances}
\label{sec:concentration}
We begin with a consequence of the multiplicative Chernoff bound, asserting
a sort of concentration of the distance of any point in $\X$ from its
$k^{th}$-nearest neighbor in $\{X_1,\dots,X_n\}$. Since the results of this
section are concerned with fixed $x \in \X$, for notational simplicity, we
suppress the dependence of $\gamma_*$ and $\gamma^*$ on $x$.
\begin{lemma}
Let $(\XX, d, \Sigma, \mu)$ be a metric measure space of dimension $D$. Suppose
$P$ is an absolutely continuous probability measure with full dimension on
$\X \subseteq \XX$ and density function $p : \X \to [0, \infty)$.
For $x \in \X$, if
$r \in \left[\left( \frac{k}{\gamma_* n} \right)^{1/D}, \rho \right]$, then
\[\pr \left[ \e_k(x) > r \right]
\leq e^{-\gamma_* r^D n} \left( e\frac{\gamma_* r^D n}{k} \right)^k.\]
and, if
$r \in \left[ 0,
\min\left\{\left( \frac{k}{\gamma^* n} \right)^{1/D},
\rho \right\} \right]$, then
\[\pr \left[ \e_k(x) \leq r \right]
\leq \left( \frac{e \gamma^* r^D n}{k} \right)^{k\gamma_*/\gamma^*}.\]
\label{lemma:KNN_concentration}
\end{lemma}
\section{Bounds on Expectations of KNN Statistics}
\label{sec:KNN_stats}
Here, we use the concentration bounds of Section \ref{sec:concentration} to
bound expectations of functions of $k$-nearest neighbor distances. Specifically,
we give a simple formula for deriving bounds that applies to many functions of
interest, including logarithms and (positive and negative) moments. As in the
previous section, the results apply to a fixed $x \in \X$, and we continue to
suppress the dependence of $\gamma_*$ and $\gamma^*$ on $x$.
\begin{theorem}
Let $(\X, d, \Sigma, \mu)$ be a metric measure space of dimension $D$. Suppose
$P$ is an absolutely continuous probability measure with full dimension and
density function $p : \X \to [0, \infty)$ that satisfies the tail condition
\footnote{Since $f$ need not be surjective, we use the generalized inverse
$f\inv : \R \to [0, \infty]$ defined by
$f\inv(\e) := \inf \{x \in (0, \infty) : f(x) \geq \e\}$.}
\begin{equation}
\E_{X \sim P}
\left[ \int_\rho^\infty \left[ 1 - P(B(X, f\inv(r))) \right]^n \right]
\leq \frac{C_T}{n}
\label{eq:tail_condition}
\end{equation}
for some constant $C_T > 0$. Suppose $f : (0, \infty) \to \R$ is continuously
differentiable, with $f' > 0$. Fix $x \in \X$. Then, we have the upper bound
\begin{align}
\label{ineq:KNN_functional_upper}
& \E \left[ f_+(\e_k(x)) \right]
\leq f_+\left( \left( \frac{k}{\gamma_* n} \right)^{\frac{1}{D}} \right)
+ \frac{C_T}{n} \\
& + \frac{(e/k)^k}{D(n\gamma_*)^{\frac{1}{D}}}
\int_k^\infty
e^{-y} y^{k + \frac{1}{D} - 1}
f' \left( \left( \frac{y}{n \gamma_*} \right)^{\frac{1}{D}} \right)
\, dy
\notag
\end{align}
and the lower bound
\begin{align}
\notag
& \E \left[ f_-(\e_k(x)) \right]
\leq f_-\left( \left( \frac{k}{\gamma^* n} \right)^{1/D} \right)
+ \frac{C_T}{n} \\
& + \left( \frac{e n \gamma^*}{k} \right)^{\frac{k\gamma_*}{\gamma^*}}
\int_0^{\left( \frac{k}{\gamma^* n} \right)^{\frac{1}{D}}}
y^{Dk\gamma_*/\gamma^*} f'(y)
\, dy
\label{ineq:KNN_functional_lower}
\end{align}
($f_+(x) = \max\{0, f(x)\}$ and $f_-(x) = -\min\{0, f(x)\}$ denote the
positive and negative parts of $f$, respectively).
\label{thm:KNN_functional}
\end{theorem}
\begin{remark}
If $f : (0, \infty) \to \R$ is continuously differentiable with
$f' < 0$, we can apply Theorem \ref{thm:KNN_functional} to $-f$. Also, similar
techniques can be used to prove analogous lower bounds (i.e., lower bounds on
the positive part and upper bounds on the negative part).
\end{remark}
\begin{remark}
The tail condition
(\ref{eq:tail_condition}) is difficult to validate directly for many
distributions. Clearly, it is satisfied when the support of $p$ is
bounded. However, \cite{tsybakov96rootn} show that, for the functions $f$ we
are interested in (i.e., logarithms and power functions), when $\X = \R^D$, $d$
is the Euclidean metric, and $\mu$ is the Lebesgue measure,
(\ref{eq:tail_condition}) is also satisfied by upper-bounded densities with
exponentially decreasing tails. More precisely, that is when there exist
$a,b,\alpha,\delta > 0$ and $\beta > 1$ such that, whenever $\|x\|_2 > \d$,
\[a e^{-\alpha \|x\|^\beta}
\leq p(x)
\leq b e^{-\alpha \|x\|^\beta},\]
which permits, for example, Gaussian distributions. It should be noted that the
constant $C_T$ depends only on the metric measure space, the distribution $P$,
and the function $f$, and, in particular, not on $k$.
\end{remark}
\subsection{Applications of Theorem \ref{thm:KNN_functional}}
We can apply Theorem \ref{thm:KNN_functional} to several functions $f$ of
interest. Here, we demonstrate the cases $f(x) = \log x$ and $f(x) = x^\alpha$
for certain $\alpha$, as we will use these bounds when analyzing the KL
estimator.
When $f(x) = \log(x)$, (\ref{ineq:KNN_functional_upper}) gives
\begin{align}
\notag
\E \left[ \log_+(\e_k(x)) \right]
& \leq \frac{1}{D} \log_+ \left( \frac{k}{\gamma_* n} \right)
+ \left( \frac{e}{k} \right)^k \frac{\Gamma(k, k)}{D} \\
& \leq \frac{1}{D}
\left( 1 + \log_+ \left( \frac{k}{\gamma_* n} \right) \right)
\label{ineq:pos_log_stat}
\end{align}
(where $\Gamma(s, x) := \int_x^\infty t^{s - 1} e^{-t} \, dt$ denotes the upper
incomplete Gamma function, and we used the bound
$\Gamma(s, x) \leq x^{s - 1}e^{-x}$), and (\ref{ineq:KNN_functional_lower})
gives
\begin{align}
\E \left[ \log_-(\e_k(x)) \right]
& \leq \frac{1}{D} \log_-\left( \frac{k}{\gamma^* n} \right)
+ C_1,
\label{ineq:neg_log_stat}
\end{align}
for $C_1 = \frac{\gamma^* e^{k\gamma_*/\gamma^*}}{Dk\gamma_*}$.
For $\alpha > 0$, $f(x) = x^\alpha$, (\ref{ineq:KNN_functional_upper})
gives
\begin{align}
\notag
\E \left[ \e_k^\alpha(x) \right]
& \leq \left( \frac{k}{\gamma_* n} \right)^{\frac{\alpha}{D}}
+ \left( \frac{e}{k} \right)^k
\frac{\alpha\Gamma\left( k + \alpha/D, k \right)}
{D(n\gamma_*)^{\alpha/D}} \\
& \leq C_2
\left( \frac{k}{\gamma_* n} \right)^{\frac{\alpha}{D}},
\label{ineq:pos_moment_stat}
\end{align}
where $C_2 = 1 + 2\frac{\alpha}{D}$. For any $\alpha \in [-Dk\gamma_*/\gamma^*, 0]$, when $f(x) = -x^\alpha$,
(\ref{ineq:KNN_functional_lower}) gives
\begin{align}
\E \left[ \e_k^\alpha(x) \right]
& \leq C_3
\left( \frac{k}{\gamma^* n} \right)^{\frac{\alpha}{D}},
\label{ineq:neg_moment_stat}
\end{align}
where $C_3 = 1 + \frac{\alpha \gamma^* e^{k\gamma_*/\gamma^*}}{Dk\gamma_* + \alpha\gamma^*}$.
\section{The KL Estimator for Entropy}
\label{sec:KL_est}
Recall that, for a random variable $X$ sampled from a probability density $p$
with respect to a base measure $\mu$, the Shannon entropy is defined as
\[H(X) = -\int_\X p(x) \log p(x) \, dx.\]
As discussed in Section \ref{sec:intro}, many applications call for estimate of
$H(X)$ given $n$ IID samples $X_1,\dots,X_n \sim p$.
For a positive integer $k$, the KL estimator is typically written as
\[\hat H_k(X)
= \psi(n) - \psi(k) + \log c_D + \frac{D}{n} \sum_{i = 1}^n \log \e_k(X_i),\]
where $\psi : \N \to \R$ denotes the digamma function. The motivating insight
is the observation that, independent of the sampling distribution,
\footnote{See \cite{Kraskov04estimating} for a concise proof of this fact.}
\[\E \left[ \log P(B(X_i, \e_k(X_i))) \right] = \psi(k) - \psi(n),\]
Hence,
\begin{align*}
& \E \left[ \hat H_k(X) \right] \\
& = \E \left[
-\log P(B(X_i, \e_k(X_i)))
+ \log c_D + \frac{D}{n} \sum_{i = 1}^n \log \e_k(X_i) \right] \\
& = -\E \left[ \frac{1}{n} \sum_{i = 1}^n \log \left(
\frac{P(B(x_i, \e_k(X_i)))}{c_D\e_k^D(X_i)} \right) \right] \\
& = -\E \left[ \frac{1}{n} \sum_{i = 1}^n \log p_{\e_k(i)}(X_i) \right]
= -\E \left[ \log p_{\e_k(X_1)}(X_1) \right],
\end{align*}
where, for any $x \in \X$, $\e > 0$,
\[p_\e(x)
= \frac{1}{c_D \e^D} \int_{B(x, \e)} p(y) \, d\mu(y)
= \frac{P(B(x, \e))}{c_D \e^D}\]
denotes the local average of $p$ in a ball of radius $\e$ around $x$. Since
$p_\e$ is a smoothed approximation of $p$ (with smoothness increasing with
$\e$), the KL estimate can be intuitively thought of as a plug-in estimator for
$H(X)$, using a density estimate with an adaptive smoothing parameter.
In the next two sections, we utilize the bounds derived in Section
\ref{sec:KNN_stats} to bound the bias and variance of the KL estimator. We note
that, for densities in the $\beta$-H\"older smoothness class
($\beta \in (0, 2]$), our results imply a mean-squared error of
$O(n^{-2\beta/D})$ when $\beta < D/2$ and $O(n\inv)$ when $\beta \geq D/2$.
\section{Bias Bound}
\label{sec:bias_bound}
In this section, we prove bounds on the bias of the KL estimator, first in a
relatively general setting, and then, as a corollary, in a more specific but
better understood setting.
\begin{theorem}
Suppose $(\XX, d, \Sigma, \mu)$ and $P$ satisfy the conditions of Theorem
\ref{thm:KNN_functional}, and there exist $C, \beta \in (0, \infty)$ with
\[\sup_{x \in \X} \left| p(x) - p_\e(x) \right| \leq C_\beta \e^\beta,\]
and suppose $p$ satisfies a `tail bound'
\begin{equation}
\Gamma_B
:= \E_{X \sim P} \left[ \left( \gamma_*(X) \right)^{-\frac{\beta + D}{D}} \right]
< \infty.
\label{ineq:tail_cond}
\end{equation}
Then,
\[\left| \E \left[ H(X) - \hat H_k(X) \right] \right|
\leq C_B \left( \frac{k}{n} \right)^{\frac{\beta}{D}},\]
where $C_B = (1 + c_D) C_2 C_\beta \Gamma_B$.
\label{thm:gen_bias_bound}
\end{theorem}
We now show that the conditions of Theorem \ref{thm:gen_bias_bound} are
satisfied by densities in the commonly used nonparametric class of
$\beta$-H\"older continuous densities on $\R^D$.
\begin{definition}
Given a constant $\beta > 0$ and an open set $\X \subseteq \R^D$, a function
$f : \X \to \R$ is called \emph{$\beta$-H\"older continuous} if $f$ is $\ell$
times differentiable and there exists $L > 0$ such that, for any multi-index
$\alpha \in \N^D$ with $|\alpha| < \beta$,
\[\sup_{x \neq y \in \X}
\frac{|D^\alpha f(x) - D^\alpha f(y)|}{\|x - y\|^{\beta - \ell}}
\leq L,\]
where $\ell := \lfloor \beta \rfloor$ is the greatest integer \emph{strictly}
less than $\beta$.
\end{definition}
\begin{definition}
Given an open set $\X \subseteq \R^D$ and a function $f : \X \to \R$, $f$ is
said to \emph{vanish on the boundary $\partial \X$} of $\X$ if, for any sequence
$\{x_i\}_{i = 1}^\infty$ in $\X$ with
$\inf_{x' \in \partial \X} \|x - x'\|_2 \to 0$ as $i \to \infty$, $f(x) \to 0$
as $i \to \infty$. Here,
\[\partial \X
:= \{x \in \R^D : \forall \d > 0,
B(x, \delta) \not\subseteq \X
\mbox{ and }
B(x, \delta) \not\subseteq \X^c\},\]
denotes the boundary of $\X$.
\end{definition}
\begin{corollary}
Consider the metric measure space $(\R^D, d, \Sigma, \mu)$, where $d$ is Euclidean
and $\mu$ is the Lebesgue measure. Let $P$ be an absolute continuous probability
measure with full dimension and density $p$ supported on an open set
$\X \subseteq \R^D$. Suppose $p$ satisfies (\ref{ineq:tail_cond}) and the
conditions of Theorem \ref{thm:KNN_functional} and is $\beta$-H\"older
continuous ($\beta \in (0, 2]$) with constant $L$. Assume $p$ vanishes on
$\partial \X$. If $\beta > 1$, assume $\|\nabla p\|_2$ vanishes on
$\partial \X$. Then,
\[\left| \E \left[ \hat H_k(X) - H(X) \right] \right|
\leq C_H \left( \frac{n}{k} \right)^{-\frac{\beta}{D}},\]
where $C_H = (1 + c_D) C_2 \Gamma \frac{LD}{D + \beta}$.
\label{corr:Holder_bias_bound}
\end{corollary}
\begin{remark}
The assumption that $p$ (and perhaps $\|\nabla p\|$) vanish on the boundary of
$\X$ can be thought of as ensuring that the trivial continuation
$q : \R^D \to [0, \infty)$
\[q(x)
= \left\{
\begin{array}{ll}
p(x) & x \in \X \\
0 & x \in \R^D \sminus \X
\end{array}
\right.\]
of $p$ to $\R^D$ is $\beta$-H\"older continuous. This reduces boundary bias,
for which the KL estimator does not correct.
\footnote{Several estimators controlling for boundary bias have been proposed
(e.g., \citet{sricharan10confidence} give a modified $k$-NN estimator that
accomplishes this \emph{without} prior knowledge of $\X$.}
\end{remark}
\section{Variance Bound}
\label{sec:variance_bound}
We first use the bounds proven in Section \ref{sec:KNN_stats} to prove uniform
(in $n$) bounds on the moments of $\E \left[ \log \e_k(X) \right]$. We the
for any fixed $x \in \X$, although $\log \e_k(x) \to -\infty$ almost surely as
$n \to \infty$, $\Var \left[ \log \e_k(x) \right]$, and indeed all higher
central moments of $\log \e_k(x)$, are bounded, uniformly in $n$. In fact,
there exist exponential bounds, independent of $n$, on the
density of $\log \e_k(x) - \E \left[ \log \e_k(x) \right]$.
\subsection{Moment Bounds on Logarithmic $k$-NN distances}
\begin{lemma}
Suppose $(\XX, d, \Sigma, \mu)$ and $P$ satisfy the conditions of Theorem
\ref{thm:KNN_functional}. Suppose also that
$\Gamma_0 := \sup_{x \in \X} \frac{\gamma^*(x)}{\gamma_*(x)} < \infty$. Let
$\lambda \in \left(0, \frac{Dk}{\Gamma_0} \right)$ and assume the following
expectations are finite:
\begin{equation}
\Gamma := \E_{X \sim P} \left[ \frac{\gamma^*(X)}{\gamma_*(X)} \right] < \infty.
\label{const:gamma_ratio}
\end{equation}
\begin{equation}
\Gamma_*(\lambda) := \E_{X \sim P} \left[ \left( \gamma_*(X) \right)^{-\lambda/D} \right] < \infty.
\label{const:gamma_sub_pow}
\end{equation}
\begin{equation}
\Gamma^*(\lambda) := \E_{X \sim P} \left[ \left( \gamma^*(X) \right)^{\lambda/D} \right] < \infty.
\label{const:gamma_sup_pow}
\end{equation}
Then, for any integer $\ell > 1$, the $\ell^{th}$ central moment
\[M_\ell := \E \left[ \left( \log \e_k(X) - \E \left[ \log \e_k(X) \right] \right)^\ell \right]\]
satisfies
\begin{equation}
M_\ell \leq C_M \ell!/\lambda^\ell,
\label{ineq:moment_bound}
\end{equation}
where $C_M > 0$ is a constant independent of $n$, $\ell$, and $\lambda$.
\label{lemma:log_moment_bound}
\end{lemma}
\begin{remark}
The conditions (\ref{const:gamma_ratio}), (\ref{const:gamma_sub_pow}), and
(\ref{const:gamma_sup_pow}) are mild. For example, when $\X = \R^D$, $d$ is the
Euclidean metric, and $\mu$ is the Lebesgue measure, it suffices that $p$ is
Lipschitz continuous
\footnote{Significantly milder conditions than Lipschitz continuity suffice,
but are difficult to state here due to space limitations.} and there exist
$c, r > 0, p > \frac{D^2}{D - \alpha}$ such that $p(x) \leq c\|x\|^{-p}$
whenever $\|x\|_2 > r$. The condition $\Gamma_0 < \infty$ is more prohibitive,
but still permits many (possibly unbounded) distributions of interest.
\end{remark}
\begin{remark}
If the terms $\log \e_k(X_i)$ were independent, a Bernstein inequality,
together with the moment bound (\ref{ineq:moment_bound}) would imply a
sub-Gaussian concentration bound on the KL estimator about its expectation.
This may follow from one of several more refined concentration results relaxing
the independence assumption that have been proposed.
\end{remark}
\subsection{Bound on the Variance of the KL Estimate}
Bounds on the variance of the KL estimator now follow from the law of large
numbers in \citet{evans08SLLNforKNN} (itself an application of the Efron-Stein
inequality to $k$-NN statistics).
\begin{theorem}
Suppose $(\XX, d, \Sigma, \mu)$ and $P$ satisfy the conditions of Lemma
\ref{lemma:log_moment_bound}, and that that there exists a constant $N_k \in \N$
such that, for any finite $F \subseteq \X$, any $x \in F$ can be among the
$k$-NN of at most $N_k$ other points in that set. Then,
$\hat H_k(X) \to \E \left[ \hat H_k(X) \right]$ almost surely (as
$n \to \infty$), and, for $n \geq 16 k$ and $M_4$ satisfying
(\ref{ineq:moment_bound}).
\[\Var \left[ \hat H_k(X) \right]
\leq \frac{5(3 + kN_k)(3 + 64k)M_4}{n}
\in O \left( \frac{1}{nk} \right),\]
\label{thm:variance_bound}
\end{theorem}
\begin{remark}
$N_k$ depends only on $k$ and the geometry of the metric space $(\X, d)$. For
example, Corollary A.2 of \citet{evans08SLLNforKNN} shows that, when
$\X = \R^D$ and $d$ is the Euclidean metric, then $N_k \leq k K(D)$, where
$K(D)$ is the kissing number of $\R^d$.
\end{remark}
\section{Bounds on the Mean Squared Error}
The bias and variance bounds (Theorems \ref{thm:gen_bias_bound} and
\ref{thm:variance_bound}) imply a bound on the mean squared error of the KL
estimator:
\begin{corollary}
Suppose $p$
\begin{enumerate}
\item
is $\beta$-H\"older continuous with $\beta \in (0, 2]$.
\item
vanishes on $\partial \X$. If $\beta > 1$, then also suppose
$\|\nabla p\|_2$ vanishes on $\partial \X$.
\item
\end{enumerate}
[TODO: Other assumptions.]
satisfies the assumptions of Theorems \ref{thm:gen_bias_bound} and
\ref{thm:variance_bound}. Then,
\begin{equation}
\E \left[ \left( \hat H_k(X) - H(X) \right)^2 \right]
\leq C_B^2 \left( \frac{k}{n} \right)^{2\beta/D} + \frac{C_V}{nk}.
\label{ineq:MSE_bound_general_k}
\end{equation}
If we let $k$ scale as $k \asymp n^{\max \left\{ 0, \frac{2 \beta - D}{2 \beta + D} \right\}}$
this gives an overall convergence rate of
\begin{equation}
\E \left[ \left( \hat H_k(X) - H(X) \right)^2 \right]
\leq C_B^2 \left( \frac{k}{n} \right)^{2\beta/D} + \frac{C_V}{nk}.
\label{ineq:MSE_bound_optimal_k}
\end{equation}
\label{corr:MSE_bound}
\end{corollary}
\section{Conclusions and Future Work}
This paper derives finite sample bounds on the bias and variance of the KL
estimator under general conditions, including for certain classes of unbounded
distributions. As intermediate results, we proved concentration inequalities for
$k$-NN distances and bounds on the expectations of statistics of $k$-NN
distances. We hope these results and methods may lead to convergence rates for
the widely used KSG mutual information estimator, or to generalize convergence
rates for other estimators of entropy and related functionals to unbounded
distributions.
\section*{Acknowledgements}
This material is based upon work supported by a National Science Foundation
Graduate Research Fellowship to the first author under Grant No. DGE-1252522.
|
1,314,259,994,988 | arxiv | \section{Introduction}
We discuss the numerical solution of minimization and evolution
problems related to the $p$-Dirichlet energy
\[
E_p[u] = \frac1p \int_\Omega |\nabla u|^p\dv{x},
\]
with $1\le p < 2$. The Euler--Lagrange equations give rise to a singular
differential operator which requires a careful numerical treatment.
Related problems occur in the description of minimal surfaces,
porous media, non-Newtonian fluids, nonlinear elasticity,
and Newton's problem of minimal resistance; we refer the reader
to~\cite{Dziu99,Cham04,DeDzEl05,FeOePr05,BeDiRu15,DiFoWa17-pre} for related
results. Typically, standard numerical schemes such as Newton or
Picard iterations fail to determine stationary configurations.
Gradient flows provide a robust approach to find minimizers for
functionals that involve $E_p$ or arise as models to describe
certain nonlinear evolutions. In the simplest case this leads to
the equation
\begin{equation}\label{eq:p_flow}
\partial_t u - \diver \big(|\nabla u|^{p-2} \nabla u\big) = 0,
\end{equation}
subject to initial and boundary conditions. An implicit discretization
in time leads to the nonlinear recursion formula
\begin{equation}\label{nonlinear-recursion}
d_t \widetilde{u}^k - \diver \big(|\nabla \widetilde{u}^k|^{p-2} \nabla \widetilde{u}^k\big) = 0,
\end{equation}
for $k=1,2,\dots,K$, with a step-size $\tau>0$ and the backward
difference quotient operator $d_t c^k = (c^k-c^{k-1})/\tau$. The
iterates $(\widetilde{u}^k)_{k=0,\dots,K}$ are well defined and optimal
error estimates
\[
\max_{k=0,\dots,K} \|u(t_k) - \widetilde{u}^k\| = \mathcal{O}(\tau),
\]
with $t_k = k \tau$, can be derived under appropriate conditions
on the initial function~$u^0$,
cf.~\cite{BarLiu93,BarLiu94,Rull96,NoSaVe00,DiEbRu07}.
Unfortunately, the development of efficient numerical schemes
for computing the iterates $(\widetilde{u}^k)_{k=0,\dots,K}$ is far from
being obvious. Moreover, including perturbation terms in the error
analysis of the implicit scheme shows that very restrictive stopping
criteria for the iterative approximate solution are necessary.
It is therefore desirable to develop time-discretizations that lead to
linear systems of equations in every time step but still have good
stability properties. In fact, such schemes can then also be used as
iterative solvers for approximating nonlinear problems
such as \eqref{nonlinear-recursion}.
A popular approach to discretizing the nonlinear partial
differential equation consists in defining iterates $(u^k)_{k=0,\dots,K}$
via a semi-implicit discretization of~\eqref{eq:p_flow}
and the corresponding sequence of linear problems
\begin{equation}\label{eq:simpl}
d_t u^k - \diver \big(|\nabla u^{k-1}|_\varepsilon^{p-2} \nabla u^k \big) = 0,
\end{equation}
for $k=1,2,\dots,K$. Here, the use of a regularization of the
euclidean length, e.g., defined via $|a|_\varepsilon = (a^2+\varepsilon^2)^{1/2}$
with a positive parameter $\varepsilon$, guarantees that the iterates
are well-defined. The unconditional well-posedness in the sense
of stability of the iteration is nonobvious due to the loss of
monotonicity properties related to the implicit-explicit treatment
of the differential operator.
It is the purpose of this article to demonstrate that the
iteration is nonetheless unconditonally energy stable and to provide
error estimates that control the influence of the regularization
and semi-implicit discretization on the quality of approximations.
A related stability estimate has been proved for the mean
curvature flow of graphs in~\cite{Dziu99} which corresponds to the
case $p=1$ and $\varepsilon = 1$.
We discuss now our unexpected observation for the special and most
singular situation corresponding to the exponent $p=1$, the so-called
regularized {\em total variation flow}. Testing
the iterative scheme~\eqref{eq:simpl} with $d_t u^k$ and incorporating
a standard binomial formula leads to the identity
\[
\|d_t u^k\|^2 + \frac12 \int_\Omega
\frac{d_t |\nabla u^k|^2 + \tau |d_t \nabla u^k|^2}{|\nabla u^{k-1}|_\varepsilon} \dv{x} = 0.
\]
To identify the regularized energy $E_{1,\varepsilon}$ on the left-hand side
we employ difference quotient calculus and derive the formula
\[\begin{split}
d_t |a^k|_\varepsilon = d_t \frac{|a^k|_\varepsilon^2}{|a^k|_\varepsilon}
& = \frac{d_t |a^k|_\varepsilon^2}{|a^{k-1}|_\varepsilon} + |a^k|_\varepsilon^2 \, d_t \frac{1}{|a^k|_\varepsilon} \\
&
= \frac{d_t |a^k|_\varepsilon^2}{|a^{k-1}|_\varepsilon} - \frac{|a^k|_\varepsilon d_t|a^k|_\varepsilon}{|a^{k-1}|_\varepsilon} \\
&= \frac{d_t |a^k|_\varepsilon^2}{|a^{k-1}|_\varepsilon} - \frac12 \frac{d_t |a^k|_\varepsilon^2 + \tau (d_t |a^k|_\varepsilon)^2}{|a^{k-1}|_\varepsilon} \\
&= \frac12 \frac{d_t |a^k|_\varepsilon^2}{|a^{k-1}|_\varepsilon} - \frac12 \frac{\tau (d_t |a^k|_\varepsilon)^2}{|a^{k-1}|_\varepsilon}.
\end{split}\]
Using this formula with $a^k = \nabla u^k$ and noting that $d_t |a^k|_\varepsilon^2 = d_t |a^k|^2$ for the
regularized euclidean length specified above, we find that
\[
\|d_t u^k\|^2
+ d_t \int_\Omega |\nabla u^k|_\varepsilon \dv{x}
+ \frac{\tau}{2} \int_\Omega \frac{|d_t \nabla u^k|^2 + (d_t |\nabla u^k|_\varepsilon)^2}{|\nabla u^{k-1}|_\varepsilon} \dv{x} = 0.
\]
The last term on the left-hand side is nonnegative so that upon summation
over $k=1,2,\dots, L\le K$ and multiplication by $\tau$ we have the energy decay
and unconditional stability property
\begin{equation}\label{unconditional-stab}
E_{1,\varepsilon}[u^L] + \tau \sum_{k=1}^L \|d_t u^k\|^2 \le E_{1,\varepsilon}[u^0],
\end{equation}
where $E_{1,\varepsilon}$ results from replacing the euclidean length in $E_p$ with $p=1$
by a regularization. We will prove this inequality for a class of Orlicz type
functionals which includes the regularized $p$-Dirichlet energy as a special
case. The arguments and the unconditional stability
estimate carry over verbatim to spatial discretizations
of the semi-implicit scheme.
Good stability properties of a numerical scheme are important to obtain
useful error estimates. We derive bounds on the approximation error
by controlling the differences between the iterates of the implicit
and semi-implicit schemes and incorporating known error estimates for
the implicit discretizations. In contrast to the estimates for implicit
schemes we thereby obtain error estimates that involve a dependence on
negative powers of the regularization parameter $\varepsilon$.
Moreover, we have to employ inverse estimates that introduce a critical dependence on the
spatial mesh-size~$h$. For lowest order continuous finite elements we obtain
the following error estimates
for the difference between the solution~$u$ of the gradient
flow \eqref{eq:p_flow} and
the approximations $(u_h^k)_{k=0,\dots,K}$ of the regularized,
semi-implicit scheme \eqref{eq:simpl}
\[\begin{split}
\max_{k=0,\dots,K} \|u(t_k) - u_h^k \| \le & \, c_{{\rm isf}} \tau^\alpha + 2 (c_{p,{\rm r}}T)^{1/2} \varepsilon^{p/2} \\
& +
\begin{cases}
c_{1,{\rm i}} h^{\beta} + c_{1,{\rm s}} \big(\tau h^{-2}
\varepsilon^{-1}\big)^{1/2} & \mbox{for } p=1, \\
c_{p,{\rm i}} h^{\gamma} + c_{p,{\rm s}} \big(\tau h^{p-2} \varepsilon^{p-2} \big)^{1/2} & \mbox{for } p>1,
\end{cases}
\end{split}\]
where $\beta = 1/6$ or $1/4$ and $\gamma = 1-d(2-p)/8$.
The first term on the right-hand side results from the general analysis of implicit
time discretization of subgradient flows, cf.~\cite{Rull96,NoSaVe00}; we
have $1\le \alpha \le 2$, depending on regularity properties of the initial data.
The second term accounts for the regularization of the evolution problem.
Spatial discretization errors due to the implicit scheme \eqref{nonlinear-recursion}
result in the first terms involving the positive powers of mesh-size $h$ under the
case distinction. We observe a significant gap between the cases $p=1$ and $p>1$ which
is related to the fact that for $p>1$ regularity results for nonlinear parabolic
partial differential
equations can be used, cf.~\cite{DiEbRu07}, while the analysis of the case $p=1$ is
solely based on energy arguments using the limited regularity properties of solutions
provided by the problem, cf.~\cite{BaNoSa14,BaNoSa15}.
The exponent $\gamma = 1/6$ is generic while $\gamma = 1/4$ relies on
a total variation diminishing interpolation operator, which
is constructed in \cite{BaNoSa15} for special meshes and definition of
total variation using the $\ell^1$-norm for vectors.
We note that the constant $c_{p,{\rm i}}$
is expected and in fact has to deteriorate as $p\searrow 1$. The factor $h^{\beta}$
can be replaced by $h$ if the reverse step-size condition $\tau \ge c h^{\alpha(p,d)}$ is
imposed. In our situation such a condition conflicts
with the last terms that involve the
inverse of the mesh size. These terms result from the
semi-implicit time discretization \eqref{eq:simpl},
and here the gap between the two cases is related
to the strong monotonicity properties of the problem for $p>1$.
The outline of this article is as follows. In Section~\ref{sec:prelim} we
specify notation and collect some basic estimates. Section~\ref{sec:stab}
is devoted to the generalization of the unconditional stability estimate
for semi-implicit discretizations of a class of singular flows
including \eqref{eq:p_flow}. An error analysis
for fully discrete schemes is provided in Section~\ref{sec:error}.
Numerical experiments for the case $p=1$ illustrate our theoretical results
and are presented in Section~\ref{sec:numex}.
\section{Preliminaries}\label{sec:prelim}
\subsection{Notation}
We use standard notation for Lebesgue and Sobolev spaces on the bounded
Lipschitz domain $\Omega\subset \mathbb{R}^d$. The inner product on $L^2(\Omega;\mathbb{R}^\ell)$
is denoted by $(\cdot,\cdot)$ and the corresponding norm by $\|\cdot\|$.
For a closed, possibly empty subset ${\Gamma_{\rm D}}\subset \partial\Omega$ we let $W^{1,p}_\DD(\Omega)$
be the set of functions in $W^{1,p}(\Omega)$ that vanish on ${\Gamma_{\rm D}}$; we write
$W^{1,p}_0(\Omega)$ if ${\Gamma_{\rm D}} = \partial\Omega$.
The space $BV(\Omega)$ consists of all functions $v\in L^1(\Omega)$ with bounded
total variation, i.e., functions $v\in L^1(\Omega)$ with
\begin{equation}\label{tv-norm}
|Dv|(\Omega) = \sup_{\xi \in C_0^\infty(\Omega;\mathbb{R}^d), \mbox{ }\|\xi\|_{L^\infty(\Omega)} \le 1}
- \int_\Omega v \diver \xi \dv{x} < \infty.
\end{equation}
For a shape regular triangulation $\mathcal{T}_h$ of the polyhedral domain~$\Omega$ into
simplices, we let
\[
V_h = \big\{v_h\in C(\overline{\Omega}): v_h|_T \in P_1(T) \text{ for all } T\in \mathcal{T}_h\big\},
\]
be the space of piecewise affine, continuous finite element functions on $\mathcal{T}_h$.
The parameter $h>0$ represents the maximal mesh-size of the triangulation.
\subsection{Difference calculus}
Given a sequence $(c^k)_{k=0,\dots,K}$ and a step size $\tau>0$ we
define the backward difference quotient via
\[
d_t c^k = \frac{1}{\tau} \big(c^k- c^{k-1}\big)
\]
for $k=1,2,\dots,K$. We note the discrete product and quotient rules
\[\begin{split}
d_t \big(c^k\cdot b^k\big)
& = \big(d_t c^k\big) \cdot b^{k-1} + c^k \cdot \big(d_t b^k \big), \\
d_t \big(1/c_k\big) &= -d_t c^k / \big(c^{k-1}c^k\big).
\end{split}\]
Moreover, we have the identity
\begin{equation}\label{eq:prod}
c^k \cdot d_t c^k
= \frac12 d_t \big|c^k\big|^2 + \frac{\tau}{2} \big|d_t c^k\big|^2.
\end{equation}
They have been used earlier in deriving \eqref{unconditional-stab}.
\subsection{Regularized euclidean length}
We consider a family of regularizations
$|\cdot|_\varepsilon$, $\varepsilon\in [0,\varepsilon_0]$, of the euclidean length $|\cdot|$
such that for $\varepsilon>0$ the mapping
\[
|\cdot|_\varepsilon : \mathbb{R}^d \to \mathbb{R}_{\ge 0}
\]
is continuously differentiable and convex. We
assume that we have the estimate
\begin{equation}\label{eq:approx_mod}
\big||a|_\varepsilon^p - |a|^p \big|\le c_{p,{\rm r}} \, \varepsilon^p
\end{equation}
for all $a\in \mathbb{R}^d$ with a constant $c_{p,{\rm r}}>0$ that may depend
on $1\le p < 2$.
\begin{examples}
(i) For the {\em standard regularization} $|a|_\varepsilon = (|a|^2+\varepsilon^2)^{1/2}$ we
have for $a\in \mathbb{R}^d$ with $|a|= s \varepsilon$ that
\[
|a|_\varepsilon^{p} - |a|^p
= \big((s^2+1)^{p/2} - (s^2)^{p/2} \big) \varepsilon^p = f(s^2) \varepsilon^p \le \varepsilon^p,
\]
since $f(r) = (r+1)^{p/2} - r^{p/2}$ is monotonically decreasing
with $f(0)=1$. \\
(ii) The {\em truncated regularization} defined for $a\in \mathbb{R}^d$ and
$\varepsilon\ge 0$ via
\[
|a|_\varepsilon^p = \begin{cases}
|a|^p + (p/2-1)\varepsilon^p & \mbox{for } |a| \ge \varepsilon, \\
(p/2) \varepsilon^{p-2} |a|^2 & \mbox{for } |a| \le \varepsilon,
\end{cases}
\]
satisfies~\eqref{eq:approx_mod} with $c_{p,{\rm r}} = (2-p)/2$.
\end{examples}
\subsection{Subgradient flow and regularization}
We interpret the nonlinear evolution equation~\eqref{eq:p_flow} as
a subgradient flow for the possibly regularized $p$-Dirichlet energy
\[
E_{p,\varepsilon}[u] = \frac1p \int_\Omega |\nabla u|_\varepsilon^p \dv{x},
\]
for $u\in X$ with $X=W^{1,p}_\DD(\Omega)$. If $p=1$ and $\varepsilon=0$ we define
$E_{p,\varepsilon}[u]$ as the total variation \eqref{tv-norm}
of $u$ and choose $X=BV(\Omega)$.
The functionals $E_{p,\varepsilon}$ are formally extended to $L^2(\Omega)$ by
assigning the value $+\infty$ to $u\in L^2(\Omega)\setminus X$.
The existence of a unique function $u\in W^{1,2}([0,T];L^2(\Omega)) \cap L^\infty([0,T];X)$
which satisfies $u(0) = u^0$ continuously for a given $u^0\in L^2(\Omega)\cap X$ and
\begin{equation}\label{eq:subflow}
-\partial_t u \in \partial E_{p,\varepsilon}[u],
\end{equation}
for almost every $t\in (0,T)$ is well established
for all $\varepsilon\ge0$, cf.~\cite{Brez73}.
Note that we always consider the subdifferential with respect to the
$L^2$ scalar product, i.e.,
\[
\partial E_{p,\varepsilon}[u] = \big\{ s\in L^2(\Omega): (s,v-u)+ E_{p,\varepsilon}[u] \le E_{p,\varepsilon}[v]
\mbox{ for all } v \in L^2(\Omega) \big\}.
\]
We thus have that the inclusion \eqref{eq:subflow}
is equivalent to the variational inequality
\[
(-\partial_t u, v-u) + E_{p,\varepsilon}[u] \le E_{p,\varepsilon}[v],
\]
for all $v\in L^2(\Omega)$ and $\varepsilon\ge0$. For $\varepsilon>0$, \eqref{eq:subflow} is also
equivalent to the equation
\begin{equation}\label{eq:reggradflow}
(\partial_t u,v) + (|\nabla u|_\varepsilon^{p-2} \nabla u,\nabla v) = 0,
\end{equation}
for all $v\in X$ and $t\in (0,T)$.
Letting $u$ and $u_\varepsilon$ be the solutions
of the subgradient flows for a fixed $p\in [1,2)$, subject to the same initial
condition, and $\varepsilon=0$ and $\varepsilon>0$, respectively, we deduce
from~\eqref{eq:approx_mod} via straightforward calculations that
\[
\sup_{t\in [0,T]} \|u-u_\varepsilon\| \le 2(c_{p,{\rm r}} T)^{1/2} \varepsilon^{p/2}.
\]
\subsection{Implicit time discretization}
Given a time step $\tau>0$, stable approximations of the solution of the
subgradient flow~\eqref{eq:subflow} are defined by the implicit Euler scheme
\[
\widetilde{u}^k = \mbox{argmin}_{v\in X} \, \frac{1}{2\tau} \|v-\widetilde{u}^{k-1}\|^2 + E_{p,\varepsilon}[v],
\]
for $k=1,2,\dots,K$, initialized with $\widetilde{u}^0 = u^0$.
The sequence $(\widetilde{u}^k)_{k=0,\dots,K}$ is uniquely defined and the
iterates satisfy
\[
(-d_t \widetilde{u}^k, v-\widetilde{u}^k) + E_{p,\varepsilon}[\widetilde{u}^k] \le E_{p,\varepsilon}[v],
\]
for all $v\in X$. We have the error estimate, cf.~\cite{Rull96,NoSaVe00},
\[
\max_{k=0,\dots,K} \|u(t_k) - \widetilde{u}^k \|\le c_{{\rm isf}} \tau^\alpha,
\]
with $\alpha = 1/2$ if $E_{p,\varepsilon}[u^0]< \infty$ and
$\alpha=1$ if $\partial E_{p,\varepsilon}[u^0]\neq \emptyset$.
\subsection{Spatial discretization}
A spatial discretization of the implicit time stepping scheme for the
subgradient flow
determines iterates $(\widetilde{u}_h^k)_{k=0,\dots,K} \subset X_h$
with $X_h = V_h \cap X$
for a suitable approximation $\widetilde{u}_h^0$ of $u^0$ via the sequence of minimization
problems
\[
\widetilde{u}_h^k = \mbox{argmin}_{v_h\in X_h} \frac{1}{2\tau} \|v_h - \widetilde{u}_h^{k-1} \|^2
+ E_{p,\varepsilon}[v_h].
\]
Invoking~\cite{BaNoSa14,BaNoSa15,Bart15-book}
for the case $p=1$ and~\cite{DiEbRu07} for the case $p>1$, we have the error estimates
\begin{equation}\label{eq:p_flow_est}
\max_{k=0,\dots,K} \|u(t_k)- \widetilde{u}_h^k\| \le c_{{\rm isf}} \tau^\alpha + 2(c_{p,{\rm r}}T)^{1/2} \varepsilon^{p/2} +
\begin{cases}
c_{1,{\rm i}} h^{\beta} & \mbox{for } p=1, \\
c_{p,{\rm i}} h^{\gamma} & \mbox{for } p>1,
\end{cases}
\end{equation}
for suitable choices of $\widetilde{u}_h^0$ and with $\beta = 1/6$ or $1/4$ and $\gamma=1-d(2-p)/8$.
The estimate of~\cite{BaNoSa14} for $p=1$
assumes homogeneous Neumann boundary conditions,
that $\Omega$ is star-shaped, and that $u^0\in BV(\Omega) \cap L^\infty(\Omega)$,
and holds with $\alpha = 1/2$ and $\beta = 1/6$. The decay rate in space can be
improved to $\beta = 1/4$ upon utilizing a total variation
diminishing interpolation operator, whose construction is discussed
in \cite{BaNoSa15} for special cartesian meshes and definition of the
total variation in terms of $\ell^1$-norms of vectors.
On the other hand,
the estimate of~\cite{DiEbRu07} for $p\in (1,2)$
assumes homogeneous Dirichlet boundary conditions,
that $\Omega$ is convex, and that the initial value satisfies $u^0\in W^{1,2}_0(\Omega)$
and $\diver \big(|\nabla u^0|^{p-2}\nabla u^0)\big)\in L^2(\Omega)$. This result
entails the condition $p>2d/(d+2)$ which can be omitted
when $\partial_t u$ is an admissible test function, i.e., in
case of subgradient flows and smooth right-hand sides.
Note that the assumptions on $u^0$ imply $\partial E_{p,\varepsilon}[u_0]\neq \emptyset$ so that
we may choose $\alpha=1$ \cite{Rull96,NoSaVe00,DiEbRu07}.
We remark that in the error estimate~\eqref{eq:p_flow_est} the function~$u$ may be
replaced by the solution~$u_\varepsilon$ of the regularized evolution equation
in which case the term involving the factor $\varepsilon^{p/2}$ can be omitted.
\section{Generalized unconditional stability estimate}\label{sec:stab}
We next generalize our unconditional stability estimate for
semi-implicit discretizations to a class of gradient flows for convex
energy functionals $E_\varphi: L^2(\Omega) \to \mathbb{R}\cup\{+\infty\}$
defined with functions $\varphi: \mathbb{R}_{\ge 0}\to \mathbb{R}_{\ge 0}$ via
\[
E_\varphi[u] = \int_\Omega \varphi(|\nabla u|) \dv{x}.
\]
We impose the following conditions on the energy density $\varphi$
which define a class of sub-quadratic Orlicz functions:
\begin{itemize}
\item[(C1)] $r\mapsto \varphi(r)$ is convex and continuously differentiable with $\varphi(0)=0$,
\item[(C2)] $r\mapsto \varphi'(r)/r$ is positive, nonincreasing, and continuous on $\mathbb{R}_{\ge 0}$.
\end{itemize}
Condition~(C2) implies that the following semi-implicit time-stepping scheme is
well posed.
\begin{algorithm}[Semi-implicit scheme]\label{alg:simpl}
Let $u^0 \in X$ and $\tau,\varepsilon>0$; set $k=1$. \\
(1) Compute $u^k\in X$ such that for all $v\in X$ we have
\[
(d_t u^k,v)
+ \Big(\frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|}\nabla u^k,\nabla v \Big) = 0.
\]
(2) Stop if $(k+1)\tau > T$; otherwise increase $k\to k+1$ and continue
with~(1).
\end{algorithm}
The regularized $p$-Dirichlet energy occurs as a special case of
(C1) and (C2).
\begin{examples}\label{ex:orlicz_fns}
(i) The regularized $p$-Laplace gradient flow corresponds to the function
\[
\varphi(r) = \frac1p |r|_\varepsilon^p - \frac1p |0|_\varepsilon^p,
\]
and we have
\[
\varphi'(r) = |r|_\varepsilon^{p-2} r \quad \mbox{and} \quad
\varphi'(r) = \max\{\varepsilon,r\}^{p-2} r,
\]
in case of the standard and truncated regularizations of euclidean length, respectively.
In both cases~(C1) and (C2) are satisfied for $1\le p < 2$.
A particular feature of the truncated regularization is that a closed formula
for the convex conjugate of $\varphi(|a|) = (1/p)|a|_\varepsilon^p$ is
available.
\medskip
(ii) The function $\varphi(r) = r \ln(e+r)$ occurs in the modeling of Prandtl--Eyring fluids
and satisfies conditions (C1) and (C2), cf.~\cite{Eyri36,BrDiFu12} for details.
\end{examples}
We remark that a positive $\varepsilon$ is only needed for well-posedness of the semi-implicit
iteration of Algorithm \ref{alg:simpl}. Its
unconditional stability is a consequence of an elementary lemma.
\begin{lemma}\label{la:orlicz_stab}
Under condition $(C2)$ we have for all $a,b\in \mathbb{R}^d$ that
\[
\frac{\varphi'(|a|)}{|a|} b \cdot (b-a) \ge \varphi(|b|)- \varphi(|a|)
+ \frac12 \frac{\varphi'(|a|)}{|a|} |b-a|^2.
\]
\end{lemma}
\begin{proof}
Using the identity $2 b\cdot (b-a) = |b|^2 -|a|^2 + |b-a|^2$ we note that
\[
\frac{\varphi'(|a|)}{|a|} b\cdot (b-a)
= \frac12 \frac{\varphi'(|a|)}{|a|} \big(|b|^2-|a|^2\big)
+ \frac12 \frac{\varphi'(|a|)}{|a|} |b-a|^2.
\]
Since $r\mapsto \varphi'(r)/r$ is nonincreasing,
the function $\psi(y) = \varphi(y^{1/2})$ is concave on $\mathbb{R}_{\ge 0}$,
so that we have
\[
\psi'(y) (z-y) \ge \psi(z)-\psi(y),
\]
for all $y,z\ge 0$. With $y=|a|^2$ and $z=|b|^2$ we deduce that
\[
\frac12 \frac{\varphi'(|a|)}{|a|} \big(|b|^2-|a|^2\big)
\ge \varphi(|b|) -\varphi(|a|).
\]
Combining these inequalities implies the asserted estimate.
\end{proof}
The following proposition states the general unconditional stability
estimate for energy functionals $E_\varphi$ under conditions (C1) and (C2).
The estimate provides control over certain dissipation terms which will
be needed for the error estimates derived in the subsequent section.
\begin{proposition}[Energy stability]\label{prop:ener_stab}
Under conditions (C1) and (C2)
the iterates $(u^k)_{k=1,\dots,K}$ of Algorithm~\eqref{alg:simpl}
satisfy for every $1\le L \le K:=\lfloor T/\tau \rfloor$
\[
E_\varphi[u^L] + \tau \sum_{k=1}^L \|d_t u^k\|^2
+ \frac{\tau^2}{2} \sum_{k=1}^L \int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|}
|d_t \nabla u^k|^2 \dv{x} \le E_\varphi[u^0].
\]
\end{proposition}
\begin{proof}
Using $v=d_t u^k$ in the equation of Algorithm~\ref{alg:simpl} leads to
\[
\|d_t u^k\|^2 +
\int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} \nabla u^k \cdot d_t \nabla u^k \dv{x}
= 0.
\]
Lemma~\ref{la:orlicz_stab} with $a=\nabla u^{k-1}$ and $b=\nabla u^k$ implies that
\[
\|d_t u^k\|^2 + d_t \int_\Omega \varphi(|\nabla u^k|) \dv{x}
+ \frac{1}{2\tau} \int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} |\nabla (u^k-u^{k-1})|^2 \dv{x} \le 0,
\]
and summation over $k=1,2,\dots,L$ and multiplication by $\tau$ prove the estimate.
\end{proof}
\begin{remark}
The stability estimate implies convergence of Richardson-type fixed-point iterations
for the solution of the stationary $p$-Laplace problem where the step size $\tau$
acts as a damping parameter. For this purpose a stronger
metric to define the evolution such as a weighted $H^1$ product, which mimics
the $W^{1,p}$ norm may be employed, instead of the $L^2$ inner product,
which in turn acts as a preconditioner for the nonlinear system of
equations. This is an important application of the semi-implicit scheme.
We refer the reader to~\cite{Bart16} for a related approach to a total variation
regularized problem.
\end{remark}
\section{Error estimates}\label{sec:error}
We derive in this section error estimates for the semi-implicit, regularized
numerical scheme of Algorithm~\ref{alg:simpl} with spatial
discretization for the $p$-Dirichlet energy $E_{p,\varepsilon}$.
We note that all estimates of Section~\ref{sec:stab}
remain valid if spatial discretization is included.
In what follows we assume that $\mathcal{T}_h$ is quasi-uniform and that
$|\cdot|_\varepsilon$ is the standard regularization of euclidean length.
\subsection{Total variation flow}
We derive an error estimate for the approximation of the gradient
flow~\eqref{eq:p_flow} with $p=1$ interpreted as a subgradient flow
by the semi-implicit scheme of Algorithm~\ref{alg:simpl}. For this, we compare the iterates
$(u_h^k)_{k=0,\dots,K} \subset X_h$ in the finite element space
$X_h = V_h$, i.e., defined via
\[
(d_t u_h^k,v_h) + \big(|\nabla u_h^{k-1}|_\varepsilon^{-1} \nabla u_h^k, \nabla v_h) = 0,
\]
for all $v_h\in X_h$, to the iterates $(\widetilde{u}_h^k)_{k=0,\dots,K} \subset X_h$
of the implicit scheme, i.e., defined via
\[
(d_t \widetilde{u}_h^k,v_h) + \big(|\nabla \widetilde{u}_h^k|_\varepsilon^{-1} \nabla \widetilde{u}_h^k, \nabla v_h) = 0,
\]
for all $v_h\in X_h$. We assume that $\widetilde{u}_h^0 = u_h^0$.
\begin{proposition}[Error estimate]\label{prop:diff_impl_simpl_tv}
For the differences of the iterates of the implicit
and the semi-implicit numerical schemes we have that
\[
\max_{k=0,\dots,K} \|\widetilde{u}_h^k-u_h^k\| \le c_{1,{\rm s}} \tau^{1/2} h^{-1} \varepsilon^{-1/2},
\]
where $c_{1,{\rm s}}$ is proportional to $T^{1/2} E_{1,\varepsilon}[u_h^0]$.
\end{proposition}
\begin{proof}
Throughout this proof we omit subscripts~$h$. Taking the difference of the
numerical schemes we find that $\delta^k =\widetilde{u}^k-u^k$ satisfies
\[
(d_t \delta^k, v)
+ \Big(\frac{\nabla \widetilde{u}^k}{|\nabla \widetilde{u}^k|_\varepsilon} -
\frac{\nabla u^k}{|\nabla u^{k-1}|_\varepsilon},\nabla v\Big) = 0,
\]
for all $v\in X_h$. Using monotonicity of $a \mapsto a/|a|_\varepsilon$
and 1-Lipschitz continuity of $a\mapsto |a|_\varepsilon$, i.e.,
$|d_t|\nabla u^k|_\varepsilon| \le |d_t \nabla u^k|$,
for $v=\delta^k$ we deduce that
\begin{equation}\label{eq:error_eq}
\begin{split}
\frac12 d_t \|\delta^k\|^2 + \frac{\tau}{2} \|d_t \delta^k\|^2
&\le - \Big(\frac{\nabla u^k}{|\nabla u^k|_\varepsilon}
- \frac{\nabla u^k}{|\nabla u^{k-1}|_\varepsilon},\nabla \delta^k \Big) \\
&= \tau \Big(\frac{\nabla u^k d_t |\nabla u^k|_\varepsilon}
{|\nabla u^k|_\varepsilon|\nabla u^{k-1}|_\varepsilon},\nabla \delta^k \Big) \\
&\le \tau \Big(\int_\Omega \frac{|\nabla d_t u^k|^2}
{|\nabla u^{k-1}|_\varepsilon} \dv{x}\Big)^{1/2}
\Big(\int_\Omega \frac{|\nabla \delta^k|^2}{|\nabla u^{k-1}|_\varepsilon} \dv{x}\Big)^{1/2}.
\end{split}
\end{equation}
Invoking an inverse estimate and $|\nabla u^{k-1}|_\varepsilon \ge \varepsilon$ we infer that
\[
\int_\Omega \frac{|\nabla \delta^k|^2}{|\nabla u^{k-1}|_\varepsilon} \dv{x}
\le c \varepsilon^{-1} h^{-2} \|\delta^k\|^2.
\]
Let $1\le L \le K$ be such that $\|\delta^L\| = \max_{k=1,\dots,K} \|\delta^k\|$.
Multiplying~\eqref{eq:error_eq} by $\tau$ and summing over $k=1,2,\dots,L$ shows that
\[\begin{split}
\|\delta^L\|^2 & \le c \tau^2 h^{-1} \varepsilon^{-1/2} \sum_{k=1}^L
\Big(\int_\Omega \frac{|\nabla d_t u^k|^2}{|\nabla u^{k-1}|_\varepsilon}
\dv{x}\Big)^{1/2} \|\delta^k\| \\
& \le c \tau^{1/2} h^{-1} \varepsilon^{-1/2}
\Big(\tau^2 \sum_{k=1}^L \int_\Omega \frac{|\nabla d_t u^k|^2}
{|\nabla u^{k-1}|_\varepsilon} \dv{x}\Big)^{1/2}
\Big(\tau \sum_{k=1}^L \|\delta^k\|^2\Big)^{1/2} \\
&\le c \tau^{1/2} h^{-1} \varepsilon^{-1/2} C_0 (L \tau)^{1/2} \|\delta^L\|,
\end{split}\]
where we incorporated the estimate of Proposition~\ref{prop:ener_stab}
with $C_0 = E_{1,\varepsilon}[u^0]$ and $\varphi(r) = |r|_\varepsilon$ so that
$\varphi'(r)/r = |r|_\varepsilon^{-1}$. Dividing by $\|\delta^L\|$ and noting
$L\tau \le T$ implies the asserted estimate.
\end{proof}
\begin{remark}
In~\cite{FieVee03} a precise characterization of the monotonicity
of the regularized 1-Laplace operator is provided, i.e., we have
\[
\Big(\frac{a}{|a|_\varepsilon} - \frac{b}{|b|_\varepsilon}\Big) \cdot (a-b)
= \Big|\frac{(a,\varepsilon)}{|a|_\varepsilon} - \frac{(b,\varepsilon)}{|b|_\varepsilon} \Big|^2
\frac{|a|_\varepsilon + |b|_\varepsilon}{2}.
\]
Unfortunately, we did not succeed in deriving a sharper error estimate making
use of the identity.
\end{remark}
An error estimate follows from combining Proposition~\ref{prop:diff_impl_simpl_tv}
with the error estimate~\eqref{eq:p_flow_est} for the implicit scheme
from~\cite{BaNoSa14,BaNoSa15}.
\begin{corollary}\label{thm:tv_flow_simpl}
Let $\Omega$ be star-shaped and $u^0\in BV(\Omega)\cap L^\infty(\Omega)$. Assume that
$\mathcal{T}_h$ is quasi-uniform and $u_h^0 \in V_h$ is such that $|Du_h^0|(\Omega) \le c|Du^0|(\Omega)$.
If~$u$ solves~\eqref{eq:p_flow} with $p=1$ then we have for the iterates
$(u_h^k)_{k=0,\dots,K}$ of Algorithm~\ref{alg:simpl} with $\varphi(r) = |r|_\varepsilon$
and the standard regularization $|\cdot|_\varepsilon$ that
\[
\max_{k=0,\dots,K} \|u(t_k)-u_h^k\|
\le c_{{\rm isf}} \tau^{1/2} + 2(c_{1,{\rm r}} T)^{1/2} \varepsilon^{1/2} + c_{1,{\rm i}} h^{1/6}
+ c_{1,{\rm s}} \tau^{1/2} h^{-1} \varepsilon^{-1/2}.
\]
The factor $\tau^{1/2}$ in the first term can be replaced by $\tau$ if
$\partial E_{1,0}[u^0] \neq \emptyset$. The factor $h^{1/6}$ in the
third term can be replaced by $h^{1/4}$ for special uniform cartesian
meshes and definition of total variation using the $\ell^1$-norm in $\mathbb{R}^d$.
\end{corollary}
\subsection{$p$-Laplace gradient flow}
In case $p>1$ a stronger estimate follows from the
strong monotonicity of the $p$-Laplace operator. We argue as
in the previous subsection and compare the finite element
iterates of the semi-implicit scheme defined via
\[
(d_t u_h^k, v_h) + \Big(\frac{\varphi'(|\nabla u_h^{k-1}|)}{|\nabla u_h^{k-1}|} \nabla u_h^k, \nabla v_h\Big) = 0,
\]
for all $v_h\in X_h$, to those of the implicit scheme
\[
(d_t \widetilde{u}^k,v_h) + \Big(\frac{\varphi'(|\nabla \widetilde{u}_h^k|)}{|\nabla \widetilde{u}_h^k|} \nabla \widetilde{u}_h^k, \nabla v_h\Big) = 0,
\]
for all $v_h\in X_h$, where we assume that $u_h^0 = \widetilde{u}_h^0$. To
simplify our calculations, we define the operator $A:\mathbb{R}^d\to \mathbb{R}^d$ via
\[
A(a) = \frac{\varphi'(|a|)}{|a|} a,
\]
and the function $\varphi_\alpha:[0,\infty)\to [0,\infty)$ given for
$\alpha,s\ge 0$ by $\varphi_\alpha(0)=0$ and
\[
\varphi_\alpha'(s) = \frac{\varphi'(\alpha+ s)}{\alpha + s} s.
\]
We also use the notation $a\lesssim b$ if there exists a constant $c>0$ such
that $a\le cb$; we write $a\eqsim b$ if $a\lesssim b$ and $b\lesssim a$.
We assume further properties of $\varphi$.
\medskip
{\bf Condition} (C3){\bf .} The function $\varphi \in C(\mathbb{R}_{\ge 0}) \cap C^2(\mathbb{R}_{>0})$ is
convex and positive on $(0,\infty)$,
satisfies $\varphi(0)=0$, and $\lim_{s\to 0} \varphi(s)/s = 0$ and
$\lim_{s\to \infty} \varphi(s)/s=\infty$; moreover $\varphi$ and its convex
conjugate $\varphi^*$ satisfy $\varphi(2s)\lesssim \varphi(s)$ and
$\varphi^*(2r)\lesssim \varphi(r)$
for all $r,s \in \mathbb{R}_{\ge 0}$; additionally we have $\varphi''(s) s \eqsim \varphi'(s)$.
\medskip
The functions defined in Example~\ref{ex:orlicz_fns} satisfy~(C3) for $p>1$
with constants that deteriorate as $p\searrow 1$.
\begin{lemma}\label{la:mon_props_b}
If $\varphi$ satisfies~(C3), then the following statements are valid. \\
(i) For all $a,b\in \mathbb{R}^d$ we have
\begin{align}
\big(A(a)-A(b)\big)\cdot (a-b) & \eqsim \varphi_{|a|} (|a-b|), \label{eq:op_A_mon} \\
\big|A(a)-A(b)\big| &\lesssim \varphi_{|a|}'(|a-b|), \label{eq:op_A_cont}
\end{align}
and
\begin{equation}\label{eq:sim_phi}
\varphi_{|a|} (|a-b|) \eqsim \frac{\varphi'(|a|+|b|)}{|a|+|b|} |a-b|^2.
\end{equation}
(ii) For all $\alpha,r,s \ge 0$ and $\delta>0$ we have
\begin{equation}\label{eq:young_phi}
\varphi_\alpha'(r) s \le c_\delta \varphi_\alpha(r) + \delta \varphi_\alpha(s).
\end{equation}
\end{lemma}
\begin{proof}
We refer the reader to~\cite{DieEtt08} for proofs of the estimates.
\end{proof}
The relations of Lemma~\ref{la:mon_props_b} lead to the following result.
\begin{proposition}[Error estimate]\label{prop:diff_impl_simpl_p}
Suppose that $\varphi$ satisfies (C1)-(C3) and that there exist constants
$c_1,c_2>0$ such that
\begin{equation}\label{vphi'(s)/s}
c_1 \max\{s,\varepsilon\}^{p-2} \le \frac{\varphi'(s)}{s} \le c_2 \, \varepsilon^{p-2}
\end{equation}
for all $s\ge 0$. Assume further that $\mathcal{T}_h$ is quasi-uniform and
there exists $c_\infty>0$ such that
\begin{equation}\label{max-norm-bound}
\max_{k=0,\dots,K} \|u_h^k\|_{L^\infty(\Omega)}
+ \max_{k=0,\dots,K} \|\widetilde{u}_h^k\|_{L^\infty(\Omega)} \le c_\infty.
\end{equation}
Then, for the differences of the iterates of the implicit
and the semi-implicit numerical schemes we have that
\[
\max_{k=0,\dots,K} \|\widetilde{u}_h^k-u_h^k\| \le c_{p,{\rm s}} \tau^{1/2} (h\varepsilon)^{(p-2)/2},
\]
where $c_{p,{\rm s}}$ is proportional to $\big(E_{\varphi}[u_h^0]\big)^{1/2}$.
\end{proposition}
\begin{proof}
We omit the subscripts $h$ in what follows. To derive an estimate
for $\delta^k = u^k - \widetilde{u}^k$ we test the difference of the equations that define $u^k$ and $\widetilde{u}^k$
with $\delta^k$ and use~\eqref{eq:prod} in conjunction with
\eqref{eq:op_A_mon} to verify that
\[\begin{split}
\frac{d_t}{2} & \|\delta^k\|^2 + \frac{\tau}{2} \|d_t \delta^k\|^2 + \int_\Omega \varphi_{|\nabla u^k|} (|\nabla \delta^k|) \dv{x} \\
&\lesssim (d_t \delta^k,\delta^k) + \big(A(\nabla u^k)-A(\nabla \widetilde{u}^k),\nabla\delta^k\big) \\
&= \big(A(\nabla u^k) -A(\nabla u^{k-1}), \nabla \delta^k\big)
+\Big(\frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} \nabla [u^{k-1}-u^k],\nabla \delta^k\Big) \\& = R_1 + R_2.
\end{split}\]
To bound $R_1$ we use that~\eqref{eq:op_A_cont}
and~\eqref{eq:young_phi} imply that
\[\begin{split}
R_1 &\eqsim \int_\Omega \varphi_{|\nabla u^k|}'(|\nabla [u^k-u^{k-1}]|) |\nabla \delta^k| \dv{x} \\
& \le \delta \int_\Omega \varphi_{|\nabla u^k|} (|\nabla \delta^k|)\dv{x}
+ c_\delta \int_\Omega \varphi_{|\nabla u^k|} (|\nabla [u^k-u^{k-1}]|)\dv{x}.
\end{split}\]
Invoking the equivalence~\eqref{eq:sim_phi}, the property
that $s\mapsto \varphi'(s)/s$ is nonincreasing, and the relation $\tau d_t u^k = u^k-u^{k-1}$,
we deduce that
\[\begin{split}
R_1 &\lesssim \delta \int_\Omega \varphi_{|\nabla u^k|} (|\nabla \delta^k|)\dv{x}
+ c_\delta \tau^2 \int_\Omega \frac{\varphi'(|\nabla u^k| + |\nabla u^{k-1}|)}{|\nabla u^k| + |\nabla u^{k-1}|}
|d_t \nabla u^k|^2 \dv{x} \\
&\le \delta \int_\Omega \varphi_{|\nabla u^k|} (|\nabla \delta^k|)\dv{x}
+ c_\delta \tau^2 \int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} |d_t \nabla u^k|^2 \dv{x}.
\end{split}\]
For the term $R_2$ we employ Young's inequality
$st\le \delta s^2 + c_\delta t^2$ to obtain
\[\begin{split}
&R_2 \le \delta \int_\Omega \frac{\varphi'(|\nabla u^k|+|\nabla \widetilde{u}^k|)}{|\nabla u^k|+|\nabla \widetilde{u}^k|}
|\nabla \delta^k|^2 \dv{x} \\
& + c_\delta \tau^2 \Big\|\frac{|\nabla u^k|+|\nabla \widetilde{u}^k|}{\varphi'(|\nabla u^k|+|\nabla \widetilde{u}^k|)}
\frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} \Big\|_{L^\infty(\Omega)}
\int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} |d_t \nabla u^k|^2 \dv{x}.
\end{split}\]
Utilizing an inverse estimate in conjunction with
\eqref{max-norm-bound} yields
\[
\|\nabla u^k\|_{L^\infty(\Omega)} +
\|\nabla\widetilde{u}^k\|_{L^\infty(\Omega)} \lesssim c_\infty h^{-1},
\]
whence \eqref{vphi'(s)/s} gives
\[
\Big\|\frac{|\nabla u^k|+|\nabla \widetilde{u}^k|}{\varphi'(|\nabla u^k|+|\nabla
\widetilde{u}^k|)}\Big\|_{L^\infty(\Omega)} \lesssim c_\infty h^{p-2},
\quad
\Big\|\frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|}
\Big\|_{L^\infty(\Omega)} \lesssim \varepsilon^{p-2}.
\]
In view of~\eqref{eq:sim_phi}, these bounds lead to
\[\begin{split}
R_2 \le \delta \int_\Omega \varphi_{|\nabla u^k|}(|\nabla \delta^k|) \dv{x}
+ c c_\delta \tau^2 h^{p-2} \varepsilon^{p-2}
\int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} |d_t \nabla u^k|^2 \dv{x}.
\end{split}\]
Combining the first estimate with those of $R_1$ and $R_2$ we
obtain the following bound
after summation over $k=1,2,\dots,L$ and multiplication by $\tau$
\[\begin{split}
\|\delta^L\|^2 + & \tau^2 \sum_{k=1}^L \|d_t \delta^k\|^2
+ \tau \sum_{k=1}^L \int_\Omega \varphi_{|\nabla u^k|} (|\nabla \delta^k|) \dv{x} \\
& \lesssim \tau \big(1 + (h\varepsilon)^{p-2}\big) \tau^2 \sum_{k=1}^L
\int_\Omega \frac{\varphi'(|\nabla u^{k-1}|)}{|\nabla u^{k-1}|} |d_t \nabla u^k|^2 \dv{x},
\end{split}\]
where we have also used that $\delta^0=0$. The bound of Proposition~\ref{prop:ener_stab}
for the sum on the right-hand side implies the asserted estimate.
\end{proof}
A complete error estimate follows from combining Proposition~\ref{prop:diff_impl_simpl_p}
with the error estimate~\eqref{eq:p_flow_est} for the implicit scheme
from~\cite{DiEbRu07}.
\begin{corollary}\label{thm:p_flow_simpl}
Let $\Omega$ be convex, $X=W^{1,p}_0(\Omega)$, and let $u^0\in W^{1,2}_0(\Omega)$ and
$\diver \big(|\nabla u^0|^{p-2}\nabla u^0\big) \in L^2(\Omega)$.
Let $\mathcal{T}_h$ be quasi-uniform, $u_h^0 \in X_h$ be such that
$\|\nabla u_h^0\|_{L^p(\Omega)} \le c \|\nabla u^0\|_{L^p(\Omega)}$, and
$c_\infty>0$ satisfy
\[
\max_{k=0,\dots,K} \|u_h^k\|_{L^\infty(\Omega)}
+ \max_{k=0,\dots,K} \|\widetilde{u}_h^k\|_{L^\infty(\Omega)} \le c_\infty.
\]
If $u$ is the solution of~\eqref{eq:p_flow} with $p\in (1,2)$ and
$(u_h^k)_{k=0,\dots,K}$ are the iterates of Algorithm~\ref{alg:simpl} with
$\varphi(r) = (|r|_\varepsilon^p-|0|_\varepsilon^p)/p$, then we have
\[
\max_{k=0,\dots,K} \|u(t_k) - u_h^k\| \le c_{{\rm isf}} \tau + c_{p,{\rm i}} h
+ 2(c_{p,{\rm r}}T)^{1/2} \varepsilon^{p/2} + c_{p,{\rm s}} \tau^{1/2} (h\varepsilon)^{(p-2)/2}.
\]
\end{corollary}
Establishing rigorously the $L^\infty$ bounds \eqref{max-norm-bound}
requires further conditions. Such
bounds can be avoided if in the proof of Proposition~\ref{prop:diff_impl_simpl_p}
inverse estimates $\|\nabla v_h\|_{L^\infty(\Omega)} \le c h^{-d/p} \|\nabla v_h\|_{L^p(\Omega)}$
are used, which leads to a weaker error estimate since $d/p>1$.
\begin{remark}
The $L^\infty$ bounds \eqref{max-norm-bound}
can be obtained via discrete maximum principles
provided that $u^0\in L^\infty(\Omega)$.
For the semi-implicit scheme it is sufficient to guarantee that the system matrix
in every time step is an $M$-matrix, which holds if quadrature (mass lumping)
is used, the triangulation is (strongly) acute, and $\tau$ is sufficiently small.
For the implicit scheme this follows from monotonicity properties
of the minimization problems at each time step, which are available if
quadrature is used and the mesh is acute.
\end{remark}
\section{Numerical experiments}\label{sec:numex}
We illustrate our theoretical findings by numerical experiments for the
most singular case $p=1$. For this, we construct explicit solutions and
then compare errors for approximations obtained with the implicit
scheme and the semi-implicit scheme of Algorithm~\ref{alg:simpl}
and different regularization parameters.
The nonlinear systems of equations in the time steps of the
implicit scheme were solved with an alternating direction
method of multipliers (ADMM) with variable step sizes proposed and analyzed
in~\cite{BarMil17-pre}.
\subsection{Explicit solutions}
We consider~\eqref{eq:p_flow} with $p=1$ and Dirichlet boundary conditions, i.e.,
formally, we consider
\begin{equation}\label{eq:tv_flow_db}
\partial_t u = \diver \frac{\nabla u}{|\nabla u|}, \quad
u(0,\cdot) = u^0, \quad u(t,\cdot)|_\pO = 0.
\end{equation}
Establishing
the existence of solutions subject to Dirichlet boundary
conditions is a difficult task but the stability and error estimates
remain valid whenever a solution exists. To
construct explicit, nontrivial solutions we use the equivalent
formulation
\begin{equation}\label{eq:tv_flow_mixed_a}
u_t = \diver p, \quad \nabla u \in \partial I_K(p),
\end{equation}
where $K=\overline{B_1(0)}$.
The inclusion follows from its equivalence to $p \in \partial |\nabla u|$
and means that $p\in L^\infty(\Omega;\mathbb{R}^d)$ with $|p|\le 1$ satisfies
\[
(\nabla u, q-p) \le 0
\]
for all $q\in L^\infty(\Omega;\mathbb{R}^d)$ with $|q|\le 1$, provided that
$\nabla u\in L^1(\Omega;\mathbb{R}^d)$.
For the case that $u\in BV(\Omega)\cap L^2(\Omega)$ with $u|_\pO=0$ we may formulate
it as
\begin{equation}\label{eq:tv_flow_mixed_b}
-\big(u,\diver(q-p)\big) \le 0,
\end{equation}
requiring that $p,q\in H(\diver;\Omega)$ with $|p|,|q|\le 1$.
We refer the reader to~\cite{BeCaNo02,BaNoSa14} for further details.
The following examples use that for regular solutions of~\eqref{eq:tv_flow_db}
the change of height $\partial_t u$ at a noncritical point $x\in \Omega$ equals
the negative mean curvature~$-H = \diver(\nabla u/|\nabla u|)$ of the
corresponding level set,
and that jump sets, along which gradients are unbounded, have vanishing
normal velocity $V = \partial_t u/|\nabla u| = -H/|\nabla u|$.
\begin{example}[Decreasing disk,~\cite{BeCaNo02}]\label{ex:decr_disk}
Let $\Omega\subset \mathbb{R}^d$ such that $B_1(0)\subset \Omega$ and
\[
u(t,x) = \max\big\{1-td,0\big\} \chi_{B_1(0)}(x).
\]
Then $u$ solves~\eqref{eq:tv_flow_db} with $u^0 = \chi_{B_1(0)}$.
\end{example}
\begin{proof}
For $t\le 1/d$ and $x\in \Omega$ we define
\[
p(t,x) = -
\begin{cases}
x, & |x|\le 1, \\
x/|x|^d, & |x| \ge 1.
\end{cases}
\]
For $t>1/d$ we set $p(t,x)=0$. We have that $p(t,\cdot)$ is
continuous in $\Omega$ with $|p|\le 1$ and $\partial_t u = \diver p$ in~$\Omega$.
To show that $u$ solves~\eqref{eq:tv_flow_mixed_a} it remains to
verify~\eqref{eq:tv_flow_mixed_b}. For $q\in H(\diver;\Omega)$ with
$|q|\le 1$ we have
\[
-\big(u,\diver(q-p)\big)
= -(1-td) \int_{\partial B_1(0)} (q-p)\cdot n \dv{s} \le 0,
\]
since $p=-n$ on $\partial B_1(0)$ and $q \cdot n \le 1$.
\end{proof}
The solution constructed in the second example is Lipschitz
continuous at all times but the discontinuity set of $\nabla u$
is nonstationary. Moreover, we have that $\partial_t u(0) \not \in L^2$
so that only the suboptimal convergence rate $\mathcal{O}(\tau^{1/2})$
for the time-discretization error can be expected.
\begin{example}[Decreasing cone]\label{ex:decr_cone}
Let $\Omega \subset \mathbb{R}^d$ such that $B_1(0)\subset \Omega$ and
\[
u^0(x) = \max\big\{1-|x|,0\}.
\]
If
\[
s(t) = (d+1)^{1/2} t^{1/2}, \quad
r(t) = \frac12 \big(1 + (1-4t(d-1)\big)^{1/2}\big),
\]
then for $t\le (d+1)/(4d^2)$ we have
\[
u(t,x) =
\begin{cases}
1-s(t)-t(d-1)/s(t), & |x|\le s(t), \\
1-|x|- t(d-1)/|x|, & s(t)\le |x| \le r(t), \\
0, & r(t) \le |x|.
\end{cases}
\]
For $t \ge (d+1)/(4d^2)$, we have $u(t,x)= 0$ for all $x\in \Omega$.
\end{example}
\begin{proof}
We first note that for a nondegenerate point $x\in \Omega$ for
a solution of~\eqref{eq:tv_flow_db} we have
that the mean curvature of its level set equals $(d-1)/|x|$, whence
\[
\partial_t u(t,x) = -\frac{d-1}{|x|},
\]
as long as $\nabla u(t,x)\neq 0$; hence, $u(t,x) = 1-|x|- t(d-1)/|x|$.
To prove that $u$ is a solution of \eqref{eq:tv_flow_mixed_a},
we construct an appropriate vector field $p$. We define
\[
p(t,x) = -
\begin{cases}
x/s(t), & |x| \le s(t), \\
x/|x|, & s(t) \le |x| \le r(t), \\
xr(t)^{d-1}/|x|^d, & r(t)\le |x|,
\end{cases}
\]
and note that $p(t,\cdot)$ is continuous in $\Omega$ with $|p|\le 1$ and
\[
\diver p(t,x) = -
\begin{cases}
d/s(t), & |x|< s(t), \\
(d-1)/|x|, & s(t) < |x| < r(t), \\
0, & |x| > r(t) .
\end{cases}
\]
The differential equation $\partial_t u = \diver p$ is obviously
satisfied for $|x| > s(t)$. For $0\le |x| < s(t)$ we obtain the condition
\[
-s' - \frac{d-1}{s} + \frac{t(d-1)}{s^2}s' = -\frac{d}{s} \quad \Longleftrightarrow \quad
s' = \frac{s}{s^2-(d-1)t},
\]
which is satisfied by definition of $s$. We finally note
that, since $u(t,\cdot) \in W^{1,\infty}(\Omega)$ with $u(t,\cdot)|_{\pO}=0$
and $p= \nabla u/|\nabla u|$ for $s(t)\le |x| \le r(t)$
and $\nabla u = 0$ otherwise, we have
\[\begin{split}
-\big(u,\diver(q-p)\big) = \int_\Omega \nabla u \cdot (q-p)\dv{x}
\le 0,
\end{split}\]
provided that $|q|\le 1$. This proves the statement.
\end{proof}
Snapshots of implicit approximations of the total variation flow
with $\varepsilon=0$ on a triangulation~$\mathcal{T}_\ell$ of
$\Omega=(-3/2,3/2)^2$ obtained from $\ell=5$ uniform refinements of an initial
partitions $\mathcal{T}_0$ into two triangles and with $\tau = h/4$ are shown
in Figures~\ref{fig:decr_disk} and~\ref{fig:decr_cone}.
\begin{figure}[p]
\includegraphics[width=.45\linewidth]{ex_1_red_5_sol_t_0.eps} \hspace*{2mm}
\includegraphics[width=.45\linewidth]{ex_1_red_5_sol_t_0_1.eps} \\
\includegraphics[width=.45\linewidth]{ex_1_red_5_sol_t_0_2.eps} \hspace*{2mm}
\includegraphics[width=.45\linewidth]{ex_1_red_5_sol_t_0_3.eps}
\caption{\label{fig:decr_disk} Numerical solutions for $t\approx 0.0, 0.1, 0.2, 0.3$
in Example~\ref{ex:decr_disk} computed with the implicit scheme and $\varepsilon=0$.}
\end{figure}
\begin{figure}[p]
\includegraphics[width=.45\linewidth]{ex_2_red_5_sol_t_0.eps} \hspace*{2mm}
\includegraphics[width=.45\linewidth]{ex_2_red_5_sol_t_0_05.eps} \\
\includegraphics[width=.45\linewidth]{ex_2_red_5_sol_t_0_1.eps} \hspace*{2mm}
\includegraphics[width=.45\linewidth]{ex_2_red_5_sol_t_0_15.eps}
\caption{\label{fig:decr_cone} Numerical solutions for $t \approx 0.0, 0.05, 0.1, 0.15$
in Example~\ref{ex:decr_cone} computed with the implicit scheme and $\varepsilon=0$.}
\end{figure}
\begin{figure}[p]
\includegraphics[width=.48\linewidth]{ex_1_red_4_l2_errs.eps} \hspace*{2mm}
\includegraphics[width=.48\linewidth]{ex_1_red_5_l2_errs.eps}
\caption{\label{fig:ex_1_l2_errs_time} $L^2$ errors as functions of
$t\in [0,1]$ in Example~\ref{ex:decr_disk} for the semi-implicit scheme with
$\varepsilon = h^\alpha$, $\alpha=1/2,1,2$, and implicit approximations on triangulations
$\mathcal{T}_\ell$, $\ell=4$ (left) and $\ell=5$ (right).}
\end{figure}
\begin{figure}[p]
\begin{minipage}{.55\linewidth}
{\small \begin{tabular}{|c|c|c|c|c|} \hline
$\ell$ & implicit & ${\varepsilon = h^{1/2}}$ & ${\varepsilon = h}$ & ${\varepsilon = h^2}$ \\\hline\hline
3 & 0.3135 & 0.4024 & 0.2515 & 0.1342 \\\hline
4 & 0.1999 & 0.3179 & 0.1495 & 0.1197 \\\hline
5 & 0.1421 & 0.2276 & 0.1139 & 0.1030 \\\hline
6 & 0.1313 & 0.1882 & 0.1005 & 0.0980 \\\hline
7 & -- & 0.1487 & 0.0813 & 0.0786 \\\hline
8 & -- & 0.1172 & 0.0701 & 0.0679 \\\hline
9 & -- & 0.0908 & 0.0595 & 0.0576 \\\hline
10 & -- & 0.0710 & 0.0510 & 0.0496 \\\hline
\end{tabular}}
\end{minipage}
\begin{minipage}{.44\linewidth}
\includegraphics[width=\linewidth]{ex_1_conv_rates_plot.eps}
\end{minipage}
\vspace*{-2mm}
\caption{\label{tab:ex_1_l2_errs} Maximal $L^2$ errors for different choices of
$\varepsilon$ and on different triangulations~$\mathcal{T}_\ell$ of level $\ell$
in Example~\ref{ex:decr_disk}.}
\vspace*{2mm}
\end{figure}
\begin{figure}[p]
\includegraphics[width=.3\linewidth]{ex_1_red_5_sol_t_0_2_eps_pow_0_5.eps} \hspace*{2mm}
\includegraphics[width=.3\linewidth]{ex_1_red_5_sol_t_0_2_eps_pow_1.eps} \hspace*{2mm}
\includegraphics[width=.3\linewidth]{ex_1_red_5_sol_t_0_2_eps_pow_2.eps}
\caption{\label{fig:ex_1_comp_reg} Numerical approximations at $t\approx 0.2$ for
$\varepsilon = h^\alpha$, $\alpha=1/2,1,2$ (left to right) in Example~\ref{ex:decr_disk}.
In comparison with the solution obtained with the implicit scheme
shown in Figure~\ref{fig:decr_disk} we observe a smoothing of the discontinuity.}
\end{figure}
\begin{figure}[p]
\includegraphics[width=.48\linewidth]{ex_2_red_4_l2_errs.eps} \hspace*{2mm}
\includegraphics[width=.48\linewidth]{ex_2_red_5_l2_errs.eps}
\caption{\label{fig:ex_2_l2_errs_time} $L^2$ errors as functions of
$t\in [0,1]$ in Example~\ref{ex:decr_cone} for the semi-implicit scheme with
$\varepsilon = h^\alpha$, $\alpha=1/2,1,2$, and implicit approximations on triangulations
$\mathcal{T}_\ell$, $\ell=4$ (left) and $\ell=5$ (right).}
\end{figure}
\begin{figure}[p]
\begin{minipage}{.55\linewidth}
{\small \begin{tabular}{|c|c|c|c|c|} \hline
$\ell$ & implicit & ${\varepsilon = h^{1/2}}$ & ${\varepsilon = h}$ & ${\varepsilon = h^2}$ \\\hline\hline
3 & 0.1100 & 0.3368 & 0.2936 & 0.1809 \\\hline
4 & 0.0753 & 0.3432 & 0.2729 & 0.1490 \\\hline
5 & 0.0129 & 0.2795 & 0.1808 & 0.0956 \\\hline
6 & 0.0066 & 0.2169 & 0.1087 & 0.0588 \\\hline
7 & -- & 0.1615 & 0.0617 & 0.0364 \\\hline
8 & -- & 0.1161 & 0.0341 & 0.0241 \\\hline
9 & -- & 0.0814 & 0.0186 & 0.0149 \\\hline
10 & -- & 0.0585 & 0.0101 & 0.0089 \\\hline
\end{tabular}}
\end{minipage}
\begin{minipage}{.44\linewidth}
\includegraphics[width=\linewidth]{ex_2_conv_rates_plot.eps}
\end{minipage}
\vspace*{-2mm}
\caption{\label{tab:ex_2_l2_errs} Maximal $L^2$ errors for different choices of
$\varepsilon$ and on different triangulations~$\mathcal{T}_\ell$ of level $\ell$
in Example~\ref{ex:decr_cone}.}
\vspace*{2mm}
\end{figure}
\begin{figure}[htb]
\includegraphics[width=.3\linewidth]{ex_2_red_5_sol_t_0_05_eps_pow_0_5.eps} \hspace*{2mm}
\includegraphics[width=.3\linewidth]{ex_2_red_5_sol_t_0_05_eps_pow_1.eps} \hspace*{2mm}
\includegraphics[width=.3\linewidth]{ex_2_red_5_sol_t_0_05_eps_pow_2.eps}
\caption{\label{fig:ex_2_comp_reg} Numerical approximations at $t\approx 0.1$ for
$\varepsilon = h^\alpha$, $\alpha=1/2,1,2$ (left to right) in Example~\ref{ex:decr_cone}.
In comparison with the solution obtained with the implicit scheme
shown in Figure~\ref{fig:decr_cone} we observe a rounding of the kinks.}
\end{figure}
\subsection{Experimental observations}
We computed numerical approximations with implicit and the semi-implicit
schemes on sequences of quasi-uniform triangulations with mesh-size $h$,
using different regularization parameters $\varepsilon$, and the fixed relation
$\tau = h/4$.
\subsubsection{Results for Example~\ref{ex:decr_disk}}
In Figure~\ref{fig:ex_1_l2_errs_time} we plotted the $L^2$ errors
for the implicit and the semi-implicit schemes with regularization
parameters $\varepsilon=h^\alpha$, $\alpha=1,1/2,2$, as functions of $t\in [0,T]$, $T=1$,
obtained for the triangulations $\mathcal{T}_4$ and $\mathcal{T}_5$.
We observe that the $L^2$ errors decrease monotonically with~$\varepsilon$ during most of
the evolution with a certain stagnation, and that the errors obtained with the implicit scheme
are comparable as long as the solution is nontrivial. In particular, the
implicit scheme predicts accurately the extinction time $t=0.5$ in contrast to the
approximations obtained with the regularized, semi-implicit method.
The maximal $L^2$~errors on $t\in [0,T]$ for several triangulations of
decreasing mesh size displayed in Figure~\ref{tab:ex_1_l2_errs} show that
for a larger value of~$\varepsilon$ we obtain a better experimental convergence rate.
This confirms the critical dependence of our error estimates on the
regularization parameter $\varepsilon$. No clear experimental convergence rate can be
deduced for the implicit approach although we used a
stringent stopping criterion (residual
less than $\delta_{\rm stop} = h^5$ in the $\ell^2$-norm).
This condition is dictated by theory of the alternating direction
method of multipliers (ADMM) of~\cite{BarMil17-pre}, and
guarantees that the computational results are not due to poor
resolution, but prevents ADMM from converging beyond~6 uniform
refinements, namely for $h\le 5\cdot 10^{-2}$.
Figure~\ref{fig:ex_1_comp_reg} displays snapshots of numerical solutions
on the same triangulation $\mathcal{T}_5$ but with different regularization parameters
$\varepsilon$ at $t\approx 0.2$. As expected, the smearing effect across
the jump discontinuity set of the exact solution depends on $\varepsilon$.
The choice $\varepsilon = h^2$ appears to give very accurate approximations on $\mathcal{T}_5$
although, as depicted in Figure~\ref{tab:ex_1_l2_errs}, it exhibits
the worse experimental convergence rate.
\subsubsection{Results for Example~\ref{ex:decr_cone}}
The results of our numerical experiments shown in
Figures~\ref{fig:ex_2_l2_errs_time}, \ref{tab:ex_2_l2_errs},
and~\ref{fig:ex_2_comp_reg} are similar to those for Example~\ref{ex:decr_disk}.
Here, we observe the best experimental convergence rate for the
choice $\varepsilon=h$ instead of $\varepsilon = h^{1/2}$ which may be explained
by the uniform Lipschitz continuity of the solution. The implicit treatment
leads to smaller approximation errors but, as in
Example~\ref{ex:decr_disk}, the stringent stopping criterion for
ADMM prevents its convergence beyond six uniform mesh
refinements.
\subsubsection{Conclusions}
Our numerical experiments confirm that the error estimates
for the semi-implicit scheme depend on the inverse of the regularization
parameter $\varepsilon$. The experimental convergence rates
are better than those predicted by theory: for $\tau$
proportional to $h$ we expect no convergence (see Corollary \ref{thm:tv_flow_simpl}).
This feature appears to be related to special
regularity properties of the explicit solutions such as $\partial_t u(t) \in L^\infty(\Omega)$
and $u(t)\in W^{1,\infty}(\Omega)$ for all $t\in (0,T)$
in Examples~\ref{ex:decr_disk} and~\ref{ex:decr_cone}, respectively.
The implicit scheme leads to highly accurate approximations that
provide good predictions of extinction times, but
require a substantially larger computational effort. In fact, finding
reliable stopping criteria for the iterative solver, the alternating direction
method of multipliers, is a challenging task. Therefore, the semi-implicit scheme
may also be applied as iterative solver for each time step of
the implicit scheme.
\bigskip
{\em Acknowledgments.} SB and RHN acknowledge hospitality
of the Hausdorff Research Institute for Mathematics within the trimester program
{\em Multiscale Problems: Algorithms, Numerical Analysis and Computation}.
RHN was partially supported as Simons Visiting Professor, in connection
with the Oberwolfach Workshop {\em Emerging Developments in Interfaces and
Free Boundaries}, as well as by the NSF grant DMS-1411808. SB also
acknowledges support by the DFG priority programme SPP-1962.
\bibliographystyle{amsalpha}
|
1,314,259,994,989 | arxiv | \section*{Background \& Summary} \label{sec:introduction}
Breast cancer is among the most prevalent cancers and accounts for the largest portion of cancer deaths, with an estimated 2.2 million new cases in 2020~\cite{sung2021global}. Treatment is most successful when breast cancer is at its early stage. Biennial screening can reduce breast cancer mortality rate by 30\%~\cite{mandelblatt2016collaborative}. Among standard imaging examinations for breast cancer screening, namely mammography, ultrasound, digital breast tomosynthesis, and magnetic resonance, mammography is the recommended modality for cancer screening~\cite{siu2016screening}. Interpreting mammography for breast cancer screening is a challenging task. The recall rate of mammogram screening is around 11\% with a sensitivity of 86.9\%, while the cancer detection rate is 5.1 per 1,000 screens ~\cite{lehman2017national}. It means that 95\% of called-back cases are false-positive.
With recent advancements of learning-based algorithms for image analysis~\cite{krizhevsky2012imagenet,lecun2015deep}, several works have adapted deep learning networks for mammography interpretation and showed potential to use in clinical practices~\cite{mckinney2020international,dembrower2020effect,rodriguez2019stand,rodriguez2019detection}. In retrospective settings, the CAD tool as an independent reader can achieve a performance comparable to an average mammographer~\cite{rodriguez2019stand}. It can be leveraged as a decision support tool that helps enhance radiologists' cancer detection with the reading time being unchanged~\cite{rodriguez2019detection}. Furthermore, there was evidence that shows a machine learning model developed by training on data from a specific population (UK) can generalize and perform well on another population (US)~\cite{mckinney2020international}.
While mammography interpretation has drawn much attention, only a few datasets are publicly available to the research community. Some of the most widely used datasets are Digital Database for Screening Mammography (DDSM)~\cite{bowyer1996digital}, Mammographic Image Analysis Society (MIAS) dataset~\cite{suckling1994mammographic}, and INbreast~\cite{moreira2012inbreast}. Although these datasets were created with precise annotations of breast abnormalities, their sample sizes are rather limited, which might not fully leverage the potential of recent deep learning networks~\cite{sun2017revisiting}. DDSM is prevalently used for learning-based approaches due to its sizable number of exams, with 10,480 images (2,620 exams). However, DDSM was released as a digitalized scan of screen-film mammography. At the same time, the image acquisition mode used for CAD tools in clinical practice is usually full-field digital mammography. A summary of the key characteristics of these datasets is given in Table \ref{tab:existing-data}.
\begin{table}[ht]
\footnotesize
\caption{\textsf{Commonly used datasets of mammography.}}
\setlength{\tabcolsep}{5pt}
\renewcommand{\arraystretch}{1.5}
\begin{tabular}{P{90pt}P{90pt}P{90pt}P{90pt}P{90pt}}
\hline
\textbf{Dataset} & \textbf{MIAS~\cite{suckling1994mammographic}}
& \textbf{INBreast~\cite{moreira2012inbreast}}
& \textbf{DDSM~\cite{bowyer1996digital}}
& \textbf{VinDr-Mammo (ours)} \\ \hline
\textbf{Origin} & United Kingdom& Portugal & United States & Vietnam \\
\textbf{Release year} & 1994 & 2012 & 1996 & 2022 \\
\textbf{Number of studies} & 161 & 115 & 2,620 & 5,000 \\
\textbf{Number of images} & 322 & 410 & 10,480 & 20,000 \\
\textbf{Finding types} & Mass, calcification, asymmetry, and distortion
& Mass, calcification, asymmetry, and distortion & Mass and Calcification
& Mass, calcification, asymmetry, distortion, and other associated features\\
\textbf{Annotation} & Circle around the finding, specified by center and radius
& Contour enclosing the finding
& Contour enclosing the finding
& Rectangle bounding box around the finding \\
\textbf{BI-RADS assessment} & No & Yes & Yes & Yes \\
\textbf{Breast density} & Yes & Yes & Yes & Yes \\
\textbf{Mode of image acquisition} & SFM & FFDM & SFM & FFDM \\ \hline
\end{tabular}
\label{tab:existing-data}
\end{table}
To overcome these challenges, we introduce and release the VinDr-Mammo dataset, a large-scale benchmark dataset of full-field digital mammography consisting of 5,000 four-view exams with breast-level assessment and findings annotations. Mammographies were acquired retrospectively from two primary hospitals in Vietnam, namely Hospital 108 (H108) and Hanoi Medical University Hospital (HMUH). Breast cancer assessment and density are reported following Breast Imaging Reporting and Data System~\cite{birads_2013}. Breast abnormalities that need short-term follow-up or are suspicious for malignancy are marked by bounding rectangles. Following European guideline~\cite{amendoeira2013european}, mammography exams were independently double read. Any discordance between the two radiologists would be resolved by arbitration with the involvement of a third radiologist. To the best of our knowledge, VinDr-Mammo is currently the largest public dataset (20,000 scans) of full-field digital mammography that provides breast-level BI-RADS assessment category along with suspicious or probably benign findings that need follow-up examination. By introducing the dataset, we contribute a benchmarking imaging dataset to evaluate and compare algorithmic support systems for breast cancer screening based on FFDM.
\section*{Methods}
This study was approved by the Institutional Review Board of the HMUH and H108. All the personally identifiable information and protected health information of patients were removed. Additionally, this project did not affect clinical care at these two hospitals; hence patient consent was waived. The creation of the VinDr-Mammo dataset involves three stages: data acquisition, mammography reading, and data stratification. An overview of the data creation process is illustrated in Figure \ref{fig:overview}.
\begin{figure}[h]
\centering
\includegraphics[width=0.9\linewidth]{images/mammo_dataset.png}
\caption{Overview of the data creation process. First, raw mammograms in DICOM format were collected retrospectively from the
hospital’s PACS. These scans then got de-identified to protect patients’ privacy. Next, a web-based labeling
tool called VinDr Lab was developed to store, manage, and remotely annotate DICOM data. Finally, the annotated exams were split to a training set of 4,000 exams and a test set of 1,000 exams.}
\label{fig:overview}
\end{figure}
\subsection*{Data acquisition}
In this step, 20,000 mammography images in DICOM format from 5,000 mammography examinations were sampled and collected from the pool of all mammography examinations taken between 2018 and 2020 via the Picture Archiving and Communication System (PACS) of HMUH and H108. To ensure patient privacy is protected, identifiable patient information in DICOM tags is fully removed via a Python script. Only necessary information used for loading and processing DICOM images and patient demographic information, i.e., age, is retained. Besides DICOM meta-data, associated information might appear in image pixel data, such as laterality and view position of the image and sometimes patient's name. As this textual information usually appears in the corners of the image, we remove them by setting to black all pixels in a rectangle at each corner. The size of the rectangle is determined by visually inspecting a subset of the collected dataset. To validate the de-identification stage, both DICOM metadata and pixel data are manually reviewed by human readers.
\subsection*{Mammography reading}
This dataset aims to provide both the overall assessment of the breast and information of abnormal findings, which are essential to developing CADx and CADe systems for breast cancer screening. To this end, the 5,000 sampled exams containing 20,000 images were re-read, as the associated radiology reports do not indicate the exact locations of the findings.
The reading results follow the schema and lexicon of the Breast Imaging Reporting and Data System~\cite{birads_2013}. At the breast level, the overall BI-RADS assessment categories and breast density level (also termed breast composition) are provided. There are seven BI-RADS assessment categories, namely BI-RADS 0 (need additional imaging or prior examinations), BI-RADS 1 (negative), BI-RADS 2 (benign), BI-RADS 3 (probably benign), BI-RADS 4 (Suspicious), BI-RADS 5 (highly suggestive of malignancy) and BI-RADS 6 (known biopsy-proven). Since the tissue diagnosis results are not available, there is no presence of BI-RADS 6 in the re-reading process. Regarding the breast density level, its four categories are A (almost entirely fatty), B (scattered areas of fibroglandular), C (heterogeneously dense), and D (extremely dense). For the mammography findings, the list of findings provided in this dataset includes the mass, calcification, asymmetries, architectural distortion, and other associated features, namely suspicious lymph node, skin thickening, skin retraction, nipple retraction. Each finding is marked by a bounding box to localize the abnormal finding. In the given finding list, BI-RADS assessment is provided for mass, calcification, asymmetries, architectural distortion. Since the purpose of this dataset is for breast cancer screening, benign findings, i.e., findings of BI-RADS 2, are not reported to reduce the annotating time. Only findings of BI-RADS categories greater than 2, which are not confident of benign or likely to be malignant, are marked. More details of the reading reports are provided in supplementary materials. Figure \ref{fig:sample_exam} illustrates a sample mammography exam with both finding annotations and breast-level assessments reported by radiologists.
\begin{figure}[h]
\centering
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[width=\linewidth]{images/RMLO.png}
\caption{Right MLO}
\end{subfigure} %
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[width=\linewidth]{images/RCC.png}
\caption{Right CC}
\end{subfigure} %
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[width=\linewidth]{images/LCC.png}
\caption{Left CC}
\end{subfigure} %
\begin{subfigure}[b]{0.22\textwidth}
\centering
\includegraphics[width=\linewidth]{images/LMLO.png}
\caption{Left MLO}
\end{subfigure} %
\caption{A sample mammography exam with the right breast assessed with BI-RADS 5, density B and the left breast with BI-RADS 1, density B. CC denotes craniocaudal and MLO denotes mediolateral oblique.}
\label{fig:sample_exam}
\end{figure}
The mammography reading process was facilitated by a web-based annotation tool, called VinDr Lab~\cite{VinDrLab}, which was specifically designed for viewing and annotating medical images. The three participating radiologists can remotely access the data for reading and annotating. All three radiologists have received healthcare profession certificates provided by the Vietnamese Ministry of Health and have more than ten years of experience. Each mammography exam was assigned to two mammographers and read independently. In cases of discordance, the exam would be assigned to the third radiologist at a higher senior experience level, to make the final decision taking into account annotations of previous readers. After the reading process had been completed, the breast level categories and finding annotations were exported in JavaScipt Object Notation (JSON) format. Subsequently, we parsed the exported file to discard unnecessary information, namely annotation timestamp, radiologist's identifier, then simplified the file's structure and transformed it to comma-separated values (CSV) file so that it could be easily parsed.
\subsection*{Data stratification}
Recent CADx and CADe solutions are mostly learning-based approaches that require separating the dataset into disjoint subsets for training and evaluation. A pre-define training/test split would help guarantee that different research works will use the same exams for training and testing. Otherwise, inconsistent or unstated splits in different research works might hinder the reproducibility and comparison of these works. For an appropriate stratification, both the training and test sets should reflex the assessment, composition, and distribution of findings of the whole dataset. However, stratifying that dataset while preserving the correlation between various data characteristics is a challenging task as the number of combinations of different attributes grows exponentially with the number of attributes (in this case are BI-RADS, breast composition, and findings categories). Hence, we split the dataset by an algorithm called iterative stratification~\cite{sechidis2011stratification} which bases on a relaxed target that only retains the fraction of appearance of each attribute while ignoring their co-occurrence. One-fifth of the dataset, equivalent to 1,000 exams, is for testing and the rest for training. The attributes that are taken into account for splitting include breast-level BI-RADS categories, tissue composition, findings categories, and the attached BI-RADS categories (if any). The distribution of breast-level BI-RADS categories, breast composition, and findings for each subset are provided in Table~\ref{tab:birads_dis}, Table~\ref{tab:density_dis}, and Table~\ref{tab:finding_dis}, respectively. The BI-RADS assessment of finding and patient age distribution are also depicted in Figure~\ref{fig:finding_birads} and Figure~\ref{fig:patient_age}.
\begin{table}[ht]
\centering
\setlength{\tabcolsep}{5pt}
\renewcommand{\arraystretch}{1.5}
\caption{\label{tab:birads_dis} Statsitics of breast-level BI-RADS assessment.}
\begin{tabular}{rrrrrrr}
\hline
& \multicolumn{5}{c}{\textbf{Breast BI-RADS}} & \multicolumn{1}{l}{} \\ \cline{2-6}
& \multicolumn{1}{c}{\textbf{1}} & \multicolumn{1}{c}{\textbf{2}} & \multicolumn{1}{c}{\textbf{3}} & \multicolumn{1}{c}{\textbf{4}} & \multicolumn{1}{c}{\textbf{5}} & \multicolumn{1}{c}{\textbf{Total}} \\ \hline
\textbf{Training} & 5,362 (67.03\%) & 1,871 (23.39\%) & 372 (04.65\%) & 305 (03.81\%) & 90 (01.12\%) & 8,000 \\
\textbf{Test} & 1,341 (67.05\%) & 467 (23.35\%) & 93 (04.65\%) & 76 (03.80\%) & 23 (01.15\%) & 2,000 \\ \hline
\textbf{Overall} & 6,703 (67.03\%) & 2,338 (23.38\%) & 465 (04.65\%) & 381 (03.81\%) & 113 (01.13\%) & 10,000 \\ \hline
\end{tabular}
\end{table}
\begin{table}[ht]
\centering
\setlength{\tabcolsep}{5pt}
\renewcommand{\arraystretch}{1.5}
\caption{\label{tab:density_dis} Statistics of breast density.}
\begin{tabular}{rrrrrr}
\hline
& \multicolumn{4}{c}{\textbf{Breast Density}} & \\ \cline{2-5}
& \multicolumn{1}{c}{\textbf{A}} & \multicolumn{1}{c}{\textbf{B}} & \multicolumn{1}{c}{\textbf{C}} & \multicolumn{1}{c}{\textbf{D}} & \multicolumn{1}{c}{\textbf{Total}} \\ \hline
\textbf{Training} & 40 (00.50\%) & 764 (09.55\%) & 6,116 (76.45\%) & 1,080 (13.50\%) & 8,000 \\
\textbf{Test} & 10 (00.50\%) & 190 (09.50) & 1,530 (76.50\%) & 270 (13.50\%) & 2,000 \\ \hline
\textbf{Overall} & 50 (00.50\%) & 954 (09.54\%) & 7,646 (76.46\%) & 1,350 (13.50\%) & 10,000 \\ \hline
\end{tabular}
\end{table}
\begin{table}[ht]
\centering
\setlength{\tabcolsep}{3pt}
\renewcommand{\arraystretch}{1.5}
\caption{\label{tab:finding_dis}Findings statistics on the VinDr-Mammo dataset. The number of findings and the rate of findings per 100 images are provided for the training set, test set, and the whole dataset.}
\begin{tabular}{rrrr}
\hline
\multicolumn{1}{c}{\textbf{}} & \multicolumn{2}{c}{\textbf{Split}} & \\ \cline{2-3}
\multicolumn{1}{c}{\textbf{Finding}} & \multicolumn{1}{c}{\textbf{Training}} & \multicolumn{1}{c}{\textbf{Test}} & \multicolumn{1}{c}{\textbf{Total}} \\ \hline
Mass & 989 (6.181) & 237 (5.925) & 1,226 (6.130) \\
Suspicious Calcification & 428 (2.675) & 115 (2.875) & 543 (2.715) \\
Asymmetry & 77 (0.481) & 20 (0.500) & 97 (0.485) \\
Focal Asymmetry & 216 (1.350) & 53 (1.325) & 269 (1.345) \\
Global Asymmetry & 20 (0.125) & 6 (0.150) & 26 (0.130) \\
Architectural Distortion & 95 (0.594) & 24 (0.600) & 119 (0.595) \\
Skin Thickening & 45 (0.281) & 12 (0.300) & 57 (0.285) \\
Skin Retraction & 15 (0.094) & 3 (0.075) & 18 (0.090) \\
Nipple Retraction & 30 (0.188) & 7 (0.175) & 37 (0.185) \\
Suspicious Lymph Node & 46 (0.288) & 11 (0.275) & 57 (0.285) \\ \hline
\end{tabular}
\end{table}
\begin{figure}[h]
\centering
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/mass.png}
\caption{Mass}
\end{subfigure} %
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/suspicious_calcification.png}
\caption{Suspicious Calcification}
\end{subfigure} %
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/architectural_distortion.png}
\caption{Architectural Distortion}
\end{subfigure} %
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/asymmetry.png}
\caption{Asymmetry}
\end{subfigure} %
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/focal_asymmetry.png}
\caption{Focal Asymmetry}
\end{subfigure} %
\begin{subfigure}[b]{0.3\textwidth}
\centering
\includegraphics[width=\linewidth]{images/global_asymmetry.png}
\caption{Global Asymmetry}
\end{subfigure} %
\caption{Statistics of BI-RADS assessment of findings.}
\label{fig:finding_birads}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.6\linewidth]{images/patient_age.png}
\caption{Distribution of patient age. This statistic is calculated overall all exams in which patient's age is available.}
\label{fig:patient_age}
\end{figure}
\newpage
\section*{Data Records}
\label{sec:data_records}
Both DICOM images and radiologists' annotations of the dataset have been submitted to PhysioNet\footnote{\url{https://physionet.org/}} for public access. Breast-level and finding annotations of the whole dataset are stored in CSV files \verb|breast-level_annotations.csv| and \verb|finding_annotations.csv|, respectively. The images are structured into subfolders according to the encoded study identifiers, each of which contains four images corresponding to four views of the exam. The subfolder name and image file name are named following the study identifier and image identifier. The information of the breast-level annotations is provided for each image even though there is redundancy since each breast is associated with two images of different view positions, i.e., MLO and CC. We find this representation more convenient because other metadata of the image, namely laterality and view position, can also be included, eliminating the need of parsing this information from the DICOM tags. Metadata for each image in the \verb|breast-level_annotations.csv| file includes:
\begin{itemize}
\item \verb|study_id|: The encoded study identifier.
\item \verb|series_id|: The encoded series identifier.
\item \verb|image_id|: The encoded image identifier.
\item \verb|laterality|: Laterality of the breast depicted in the image. Either \verb|L| or \verb|R|.
\item \verb|view_position|: Orientation with respect to the breast of the image. Standard views are CC and MLO.
\item \verb|height|: Height of the image.
\item \verb|width|: Width of the image.
\item \verb|breast_birads|: BI-RADS assessment of the breast that the image depicts.
\item \verb|breast_density|: Density category of the breast that the image depicts.
\item \verb|split|: Indicating the split to which the image belongs. Either \verb|training| or \verb|test|.
\end{itemize}
Regarding breast findings, each annotation represents the occurrence of breast abnormality at a region, represented by a bounding box, in a specific image. This means that a single finding may associate with annotations from different views, yet this linking information is not acquired in the annotation process. Metadata for each finding annotation in the \verb|finding_annotations.csv| file contains:
\begin{itemize}
\item \verb|image_id|: The encoded identifier of the image in which the finding appears.
\item \verb|study_id|: The encoded identifier of the associated study.
\item \verb|series_id|: The encoded identifier of the associated series.
\item \verb|laterality|: Laterality of the breast in which the finding appears.
\item \verb|view_position|: Orientation with respect to the breast of the image.
\item \verb|height|: Height of the image.
\item \verb|width|: Width of the image.
\item \verb|breast_birads|: BI-RADS assessment of the breast that the image depicts.
\item \verb|breast_density|: Density category of the breast that the image depicts.
\item \verb|finding_categories|: List of finding categories attached to the region, e.g., mass with skin retraction.
\item \verb|finding_birads|: BI-RADS assessment of the marked finding.
\item \verb|xmin|: Left boundary of the box.
\item \verb|ymin|: Top boundary of the box.
\item \verb|xmax|: Right boundary of the box.
\item \verb|ymax|: Bottom boundary of the box.
\item \verb|split|: Indicating the split to which the image belongs. Either \verb|training| or \verb|test|.
\end{itemize}
\section*{Technical Validation}
The data de-identification and the quality of the labeling process were strictly controlled. First, all meta-data was manually reviewed to ensure that all individually identifiable health information or PHI~\cite{isola2019protected} of the patients has been fully removed to meet data privacy regulations such as the U.S. HIPAA~\cite{assistance2003summary} and the European GDPR~\cite{gdpr}. In addition, pixel values of all mammograms were manually reviewed case-by-case by human readers. We developed a set of rules underlying our labeling tool to reduce mislabeling. These rules allowed to verify the radiologist-generated labels automatically. Specifically, they prevent annotators from mechanical mistakes like forgetting to choose global labels or marking lesions on the image while choosing ``\verb|BI-RADS 1|'' as the breast-level assessment.
\section*{Usage Notes}
The VinDr-Mammo dataset was created for the purpose of developing and evaluating computer-aided detection and diagnosis algorithms based on full-field digital mammography. In addition, it can also be used for general tasks in computer vision, such as object detection and multiple label image classification. To download and explore this dataset, users are required to accept a Date Usage Agreement (DUA) called PhysioNet Credentialed Health Data License 1.5.0 (\href{https://www.physionet.org/about/licenses/physionet-credentialed-health-data-license-150/}{https://www.physionet.org/about/licenses/physionet-credentialed-health-data-license-150/}). By accepting this DUA, users agree that the dataset can be used for scientific research and educational purposes only and will not attempt to re-identify any patients, institutions or hospitals. Additionally, the authors must cite this original paper for any publication that explores this dataset.
One limitation of the VinDr-Mammo dataset is that some abnormalities, namely skin retraction, and nipple retraction, have less than 40 samples, making the studies of these abnormalities on this dataset might not be reliable.
\section*{Code Availability}
\label{sec:code}
The codes used in this study were made publicly available. The scripts used for loading and processing DICOM images are based on the following open-source repositories: Python 3.8.0 (\href{https://www.python.org/}{https://www.python.org/}); Pydicom 1.2.0 (\href{https://pydicom.github.io/}{https://pydicom.github.io/}); and Python hashlib (\href{https://docs.python.org/3/library/hashlib.html}{https://docs.python.org/3/library/hashlib.html}). The code for data de-identification and stratification was made publicly available at \href{https://github.com/vinbigdata-medical/vindr-mammo}{https://github.com/vinbigdata-medical/vindr-mammo}.
|
1,314,259,994,990 | arxiv | \section{Introduction}\label{sec:1}
Compound option is a standard option with mother
standard option being the underlying asset. Compound options have been extensively used in
corporate fiance. When the total value of a firm's assets is
regarded as the risky underlying asset, the various
corporate securities can be valued as claim contingent on underlying asset, the option on the security is termed a compound option.
The compound
option models were first used by Geske \cite{geske1979valuation} to value
option on a share of common stock. Richard \cite{roll1977analytic} extended
Geske's work and obtained a closed-form solution for the
price of an American call. Selby and Hodges \cite{selby1987evaluation} studied
the valuation of compound options.
Extendible options are a generalized form of compound options whose maturities can be
extended on the maturity date, at the choice of the option holder, and this extension may
require the payment of an additional premium. They are widely applied in financial fields such
as real-estate, junk bonds, warrants with exercise price changes, and shared-equity mortgages,
so many researchers carry out the theoretical models for pricing the options.
Prior valuation of extendible bonds was presented by Brennan et al \cite{brennan}
and Ananthanaray et al \cite{ananthanarayanan}. Longstal \cite{longstaff} extended their work to develop a set of
pricing model for a wide variety of extendible options. Since these models assume the asset
price follows geometric Brownian motion, they are unlikely to translate the abnormal
vibrations in asset price when the arrival of important new information come out. Merton \cite{merton}
considered the impact of a sudden event on the asset price in the financial market and proposed
a geometric Brownian motion with jumps to match the abnormal fluctuation of financial asset
price, which was introduced into derivation of the option pricing model. Based on this theory,
Dias and Rocha \cite{dias} considered the problem of pricing extendible options under petroleum
concessions in the presence of jumps. Kou
\cite{kou2002jump} and Cont and Tankov \cite{cont} also considered the problem of
pricing options under a jump diffusion environment in a larger setting.
Moreover, Gukhal \cite{gukhal} derived a pricing model for extendible
options when the asset dynamics were driven by jump diffusion process. Hence, the analysis of compound and extendible
options by applying jump process is a significant issue
and provides the motivation for this paper.
All this research above assumes that the logarithmic returns of the exchange rate are independent identically distributed
normal random variables.
However, the empirical studies
demonstrated that the distributions of the logarithmic
returns in the asset market generally reveal excess kurtosis
with more probability mass around the origin and in the tails
and less in the flanks than what would occur for normally
distributed data \cite{cont}. It can be said that the properties of
financial return series are nonnormal, nonindependent, and
nonlinear, self-similar, with heavy tails, in both autocorrelations
and cross-correlations, and volatility clustering \cite{huang,cajueiro,kang1,kang2,ding}. Since fractional Brownian motion $(FBM)$ has two substantial
features such as self-similarity and long-range dependence,
thus using it is more applicable to capture behavior from
financial asset \cite{podobnik,carbone,wang1,wang2,xiao3}. Unfortunately, due to $FBM$ is neither a Markov process nor a semimartingale,
we are unable to apply the classical stochastic calculus to analyze it \cite{bjork}. To get around this problem and to take into account the long
memory property, it is reasonable to use the mixed fractional Brownian motion $(MFBM)$ to capture fluctuations of
the financial asset \cite{cheridito1,el}. The $MFBM$ is a linear combination of Brownian motion and $FBM$ processes. Cheridito \cite{cheridito1} proved that, for $H \in(3/4,1)$, the mixed model with dependent Brownian motion and $FBM$ was equivalent to one with Brownian motion, and hence it is arbitrage-free. For $H\in(\frac{1}{2},1)$, Mishura and Valkeila \cite{mishura2002absence} proved that, the mixed model is arbitrage-free.
In this paper, to capture the long range property, to exclude the arbitrage in
the environment of $FBM$ and to get the jump or discontinuous component of asset prices, we consider the problem of compound option in a jump mixed fractional Brownian motion $(JMFBM)$ environment. We then exert the result to value
extendible options. We also provide representative numerical results. The $JMFBM$ is based on the
assumption that the underlying asset price is generated by a two-part
stochastic process: (1) small, continuous price movements are
generated by a $MFBM$ process, and (2) large, infrequent
price jumps are generated by a Poisson process. This two-part process
is intuitively appealing, as it is consistent with an efficient market in
which major information arrives infrequently and randomly. The rest of this paper is as follows.
In Section \ref{sec:2}, we briefly state some definitions related to $MFBM$ that will be used in forthcoming sections. In Section \ref{sec:2-1}, we analyze the problem of pricing compound option whose values follow a $JMFBM$ process and present an explicit pricing formula for compound options. In Section \ref{sec:3}, we derive an analytical valuation formula for pricing extendible option by compound option approach with only one extendible maturity under risk neutral measure, then extend this result to the valuation of an option with $N$
extendible maturity. Section \ref{sec:4} deals with the simulation studies for our pricing formula. Moreover, the comparison of our $JMFBM$ model and traditional models is undertaken in this Section. Section \ref{sec:5} is assigned to conclusion.
\section{Auxiliary facts}\label{sec:2}
In this section we recall some definitions and results which we need for the rest of paper \cite{mishura2002absence,el,xiao3}.
\textbf{Definition 2.1:} A $MFBM$ of
parameters $\epsilon, \alpha$ and $H$ is a linear combination of
$FBM$ and Brownian motion, under probability space $(\Omega ,F,P)$ for any $t\in
R^+$ by:
\begin{eqnarray}
M_t^H=\epsilon B_t+\alpha B_t^H,
\end{eqnarray}
where $B_t$ is a Brownian motion , $B_t^H$ is an independent
$FBM$ with Hurst parameter $H\in(0,1)$,
$\epsilon$ and $\alpha$ are two real invariant such that
$(\epsilon,\alpha)\neq (0,0)$.\\
Consider a frictionless continuous time economy where information arrives both
continuously and discontinuously. This is modeled as a continuous component and as
a discontinuous component in the price process. Assume that the asset does not pay
any dividends. The price process can hence be specified as a superposition of these two components and can be represented as follows:
\begin{eqnarray}
dS_t&=&S_t(\mu-\lambda\kappa) dt+\sigma S_tdB_t\nonumber\\
&+&\sigma
S_tdB_t^H+(J-1)S_tdN_t,\,0<t\leq T,\,S_{T_0}=S_0,
\label{eq:1}
\end{eqnarray}
where $\mu,\sigma, \lambda$ are constant, $B_t$ is a standard Brownian motion, $B_t^H$ is a independent $FBM$ and with Hurst parameter $H$, $N_t$ is a Poisson process with rate $\lambda$, $J-1$ is the proportional change due to the jump and $k\sim N(\mu_ J=\ln(1+k)-\frac{1}{2}\sigma_J^2, \sigma_J^2)$. The Brownian motion $B_t$, the $FBM$, $B_t^H$, the poisson process $N_t$ and the jump amplitude $J$ are independent.
Using Ito Lemma \cite{li}, the solution for stochastic differential equation (\ref{eq:1}) is
\begin{eqnarray}
S_t=S_0\exp\Big[(r-\lambda k)t+\sigma B_t+\sigma B_t^H-\frac{1}{2}\sigma^2t-\frac{1}{2}\sigma^2t^{2H}\Big]J(n)^{N(t)}.
\label{eq:2}
\end{eqnarray}
where $J(n)=\prod_{i=1}^nJ_i$ for $n\geq 1$, $J_t$ is independently and identically distributed and $J_0=1$; $n$ is the Poisson distributed with parameter $\lambda t$.
Let $x_t=\ln\frac{S_t}{S_0}$. From Eq. (\ref{eq:2}) easily get
\begin{eqnarray}
dx_t=\big(r-\lambda k-\frac{1}{2}\sigma^2-H\sigma^2t^{2H-1}\big)dt+\sigma dB
_t+\sigma dB_t^H+\ln(J)dN_t.
\label{eq:3}
\end{eqnarray}
Consider a European call option with maturity $T$ and the strike price $K$ written on the
stock whose price process evolves as in Eq. (\ref{eq:1}). The value of this call option is known from \cite{shokrollahi1} and is given by
\begin{eqnarray}
&&C(S_0,K,T-T_0)\nonumber\\
&&=\sum_{n=0}^\infty\frac{e^{-\lambda'(T-T_0)}(\lambda'(T-T_0))^n}{n!} S_0\Phi(d_1)-Ke^{r(T-T_0)}\Phi(d_2),
\label{eq:4}
\end{eqnarray}
where
\begin{eqnarray}
d_1&=&\frac{\ln\frac{S_0}{K}+r_n(T-T_0)+\frac{1}{2}[\sigma^2(T-T_0)+\sigma^2(T^{2H}-T_0^{2H})+n\sigma_J^2)]}{\sqrt{\sigma^2(T-T_0)+\sigma^2(T^{2H}-T_0^{2H})+n\sigma_J^2}},\nonumber\\
d_2&=&d_1-\sqrt{\sigma^2(T-T_0)+\sigma^2(T^{2H}-T_0^{2H})+n\sigma_J^2}\nonumber,
\label{eq:5}
\end{eqnarray}
$\lambda'=\lambda (1+k)$, $r_n=r-\lambda k+\frac{n\ln(1+k)}{T-T_0}$and $\Phi(.)$ is the cumulative normal distribution.
\section{Compound options}\label{sec:2-1}
In order to derive a compound option pricing formula in a jump mixed fractional market, we
make the following assumptions.
\begin{enumerate}
\item[(i)] There are no transaction costs or taxes and all securities are perfectly divisible;
\item[(ii)] security trading is continuous;
\item[(iii)] there are no riskless arbitrage opportunities;
\item[(iv)] the short-term interest rate $r$ is known and constant through time;
\item[(v)] the underlying asset price $S_t$ is governed by the following stochastic differential equation
\end{enumerate}
Consider a compound call option written on the European call $C(K,T_2)$ with expiration date $T_1$ and exercise price $K_1$, where $T_1<T_2$. Assume $CC\left[C(K,T_2), K_1, T_1\right]$ denotes this compound option. This compound option is exercised at time $T_1$ when the
value of the underlying asset, $C(S_1, K, T_1, T_2)$, exceeds the strike price $K_1$. When
$C(S_1, K, T_1, T_2)<K_1$, it is not optimal to exercise the compound option and hence
expires worthless. The asset price at which one is indifferent between exercising and
not exercising is specified by the following relation:
\begin{eqnarray}
C(S_1, K, T_1, T_2)=K_1.
\label{eq:6}
\end{eqnarray}
Let, $S_1^*$ shows the price of indifference which can be obtained as the numerical solution of the Eq. (\ref{eq:6}). When it is optimal to exercise the compound option at time $T_1$, the option holder
pays $K_1$ and receives the European call $C(K, T_1, T_2)$. This European call can in turn be
exercised at time $T_2$ when $S_T$ exceeds $K$ and expires worthless otherwise. Hence, the
cashflows to the compound option are an outflow of $K_1$ at time $T_1$ when $S_1>S_1^*$, a net
cashflow at time $T_2$ of $S_T -K$ when $S_1>S_1^*$ and $S_T>K$, and none in the other states. The value of the compound option is the expected present value of these cashflows as follows:
\begin{eqnarray}
&&CC\left[C(K,T_2), K_1, T_0, T_1\right]\nonumber\\
&=&E_{T_0}\left[e^{-r(T_2-T_0)}(S_T-K)\textbf{1}_{S_T>K}\right]+E_{T_0}\left[e^{-r(T_1-T_0)}(-K_1)\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&=&E_{T_0}\left[e^{-r(T_1-T_0)}E_{T_1}\left[e^{-r(T_2-T_1)}(S_T-K)\textbf{1}_{S_T>K}\right]\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&&-E_{T_0}\left[e^{-r(T_1-T_0)}K_1\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&=&E_{T_0}\left[e^{-r(T_2-T_0)}C(S_1, K, T_1, T_2)\textbf{1}_{S_1>S_1^*}\right]-E_{T_0}\left[e^{-r(T_1-T_0)}K_1\textbf{1}_{S_1>S_1^*}\right]
\label{eq:7}
\end{eqnarray}
where $C(S_1, K, T_1, T_2)$ is given in Eq. (\ref{eq:4}).
Let, the number of jumps in the intervals $[T_0,T_1)$ and $[T_1,T_2]$ denoted by $n_1$ and $n_2$, respectively and $m=n_1+n_2$ shows the number of jumps in the interval $[T_0,T_2]$. Then, use the Poisson probabilities, we have
\begin{eqnarray}
&&E_{T_0}\left[e^{-r(T_2-T_0)}C(S_1, K, T_1, T_2)\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&=&E_{T_0}\left[e^{-r(T_1-T_0)}E_{T_1}\left[e^{-r(T_2-T_1)}(S_T-K)\textbf{1}_{S_T>K}\right]\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&=&\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&&\times E_{T_0}\left[e^{-r(T_1-T_0)}E_{T_1}\left[e^{-r(T_2-T_1)}(S_T-K)\textbf{1}_{S_T>K}\right]\textbf{1}_{S_1>S_1^*}|n_1,n_2\right]\nonumber\\
&=&\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T-T_1))^{n_2}}{n_2!}\nonumber\\
&&\times E_{T_0}\left[e^{-r(T_2-T_0)}(S_T-K)\textbf{1}_{S_T>K}\textbf{1}_{S_1>S_1^*}|n_1,n_2\right]\nonumber
\label{eq:8}
\end{eqnarray}
The evaluation of this expectation requires the joint density of two Poisson weighted sums
of correlated normal. From this point, we work with the logarithmic return, $x_t=\ln\frac{S_t}{S_0}$,
rather than the stock price. It is important to know that the correlation between the logarithmic return $x_{T_1}$ and $x_{T_2}$ depend on the number of jumps in the intervals $[T_0,T_1)$ and $[T_1,T_2]$. Conditioning on the number of jumps $n_1$ and $n_2$, $x_{T_1}$ has a normal distribution with mean
\begin{eqnarray}
\mu_{J_{T_1-T_0}}&=&(r-\lambda k)(T_1-T_0)-\frac{1}{2}\sigma^2(T_1-T_0)\nonumber\\
&-&\frac{1}{2}\sigma^2(T_1^{2H}-T_0^{2H})+n_1[\ln(1+k)-\frac{1}{2}\sigma_J^2]\nonumber\\
\sigma_{J_{T_1-T_0}}^2&=&\sigma^2(T_1-T_0)+\sigma^2(T_1^{2H}-T_0^{2H})+n_1\sigma_J^2,\nonumber
\label{eq:9}
\end{eqnarray}
and $x_{T_2}\sim N(\mu_{J_{T_2-T_0}},\sigma_{J_{T_2-T_0}}^2)$
where
\begin{eqnarray}
\mu_{J_{T_2-T_0}}&=&(r-\lambda k)(T_2-T_0)-\frac{1}{2}\sigma^2(T_2-T_0)\nonumber\\
&-&\frac{1}{2}\sigma^2(T_2^{2H}-T_0^{2H})+m[\ln(1+k)-\frac{1}{2}\sigma_J^2]\nonumber\\
\sigma_{J_{T_2-T_0}}^2&=&\sigma^2(T_2-T_0)+\sigma^2(T_2^{2H}-T_0^{2H})+m\sigma_J^2\nonumber.
\label{eq:10}
\end{eqnarray}
The correlation coefficient between $x_{T_2}$ and $x_{T_1}$ is as follows
\begin{eqnarray}
\rho=\frac{cov(x_{T_1},x_{T_2})}{\sqrt{var(x_{T_1})\times var(x_{T_2})}}\nonumber.
\label{eq:11}
\end{eqnarray}
Evaluation the first expectation in Eq. (\ref{eq:7}) gives
\begin{eqnarray}
&&E_{T_0}\left[e^{-r(T_2-T_0)}C(S_1, K, T_1, T)\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&&=\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&&\times\Big[S_0\Phi_2(a_1,b_1,\rho)-Ke^{-r(T_2-T_0)}\Phi_2(a_2,b_2,\rho)\Big]
\label{eq:12}
\end{eqnarray}
where
\begin{eqnarray}
a_1&=&\frac{\ln\frac{S_0}{S_1^\ast}+\mu_{J_{T_1-T_0}}+\sigma_{J_{T_1-T_0}}^2}{\sqrt{\sigma_{J_{T_1-T_0}}^2}},\quad a_2=a_1-\sqrt{\sigma_{J_{T_1-T_0}}^2}\nonumber\\
b_1&=&\frac{\ln\frac{S_0}{K}+\mu_{J_{T_2-T_0}}+\sigma_{J_{T_2-T_0}}^2}{\sqrt{\sigma_{J_{T_2-T_0}}^2}},\quad b_2=b_1-\sqrt{\sigma_{J_{T_2-T_0}}^2}\nonumber
\label{eq:13}
\end{eqnarray}
$\Phi(x)$ is the standard univariate cumulative normal
distribution function and $\Phi_2(x,y,\rho)$ is the standard bivariate cumulative normal
distribution function with correlation coefficient $\rho$.
The second expectation in Eq. (\ref{eq:7}) can be evaluate to give
\begin{eqnarray}
&&E_{T_0}\left[e^{-r(T_1-T_0)}K_1\textbf{1}_{S_1>S_1^*}\right]\nonumber\\
&&=\sum_{n_1=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}E_{T_0}\left[e^{-r(T_1-T_0)}K_1\textbf{1}_{S_1>S_1^*}|n_1\right]\nonumber\\
&&=\sum_{n_1=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}K_1e^{-r(T_1-T_0)}\Phi(a_2),
\label{eq:14}
\end{eqnarray}
where $a_2$ is defined above. Then, the following result for a compound call option is obtained.
\begin{thm}
The value of a compound call option with maturity $T_1$ and strike price
$K_1$ written on a call option, with maturity $T_2$, strike $K$, and whose underlying asset follows the process in Eq. (\ref{eq:1}), is given by
\begin{eqnarray}
&&CC\left[C(K,T_2), K_1, T_0, T_1\right]\nonumber\\
&&=\Big\{\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&&\times\Big[S_0\Phi_2(a_1,b_1,\rho)-Ke^{-r(T_2-T_0)}\Phi_2(a_2,b_2,\rho)\Big]\Big\}\nonumber\\
&&-\sum_{n_1=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}K_1e^{-r(T_1-T_0)}\Phi(a_2)\nonumber
\label{eq:15}
\end{eqnarray}
where $a_1, a_2, b_1, b_2,$ and $\rho$ are as defined previously.
\label{th:1}
\end{thm}
For a compound option with dividend payment rate $q$, the result is similar with Theorem \ref{th:1}, only $r$ replaces with $r-q$.
\section{Extendible option pricing formulae}\label{sec:3}
Based on the assumptions in the last Section, let $EC$ be the value of an extendible call option with time
to expiration of $T_1$. At the time to expiration $T_1$, the holder of
the extendible call can
\begin{enumerate}
\item[(1)] let the call expire worthless if $S_{T_1}<L$, or
\item[(2)] exercise the call and get $S_{T_1}-K_1$ if $S_{T_1}> M$, or
\item[(3)] make a payment of an additional premium $A$ to
extend the call to $T_2$ with a new strike of $K_2$ if $ L\leq S_{T_1}\leq M$,
\end{enumerate}
where $S_{T_1}$ is the underlying asset price and strike price at time $T_1$
, $K_1$ is the strike price at time $T_1$, and Longstaff \cite{longstaff} refers to
$L$ and $M$ as critical values, where $L<M$.
If at expiration time $T_1$ the option is worth more than
the extendible value with a new strike price of $K_2$ for a fee
of $A$ for extending the expiration time $T_1$ to $T_2$, then it is
best to exercise; that is, $S_{T_1}-K_1\geq C(S_{T_1},K_2,T_2-T_1)-A$.
Otherwise, it is best to extend the expiration time of the
option to $T_2$ and exercise when it is worth more than zero;
that is, $ C(S_{T_1},K_2,T_2-T_1)-A> 0$. Moreover, the holder of
the option should be impartial between extending and not
exercising at value $L$ and impartial between exercising and
extending at value $M$. Therefore, the critical values $L$ and $M$
are unique solutions of $M-K_1= C(M,K_2,T_2-T_1)-A$ and $M-K_1= C(L,K_2,T_2-T_1)-A=0$. See Longstaff \cite{longstaff} and Gukhal \cite{gukhal}
for an analysis of the conditions.
The value of a call option, $C$ at time $T_1$
with a time to expiration extended to $T_2$, as the discounted
conditional expected payoff is given by
\begin{eqnarray}
EC(S_0,K_1,T_1,K_2,T_2,A)&=& E_{T_0}\Big[e^{-r(T_1-T_0)}(S_{T_1}-K_1)\textbf{1}_{S_{T_1}>M}\Big]\nonumber\\
&+&E_{T_0}\Big[e^{-r(T_1-T_0)}\Big(C(S_{T_1},K_2,T_2-T_1)-A\Big)\textbf{1}_{L \leq S_{T_1}\leq M}\Big]\nonumber\\
&=&E_{T_0}\Big[e^{-r(T_1-T_0)}(S_{T_1}-K_1)\textbf{1}_{S_{T_1}>M}\Big]\nonumber\\
&+&E_{T_0}\Big[e^{-r(T_1-T_0)}\Big(C(S_{T_1},K_2,T_2-T_1)-A\Big)\nonumber\\
&\times&\Big(\textbf{1}_{S_{T_1}\geq L}-\textbf{1}_{S_{T_1}\geq M}\Big)\Big].
\label{eq:16}
\end{eqnarray}
Then, by the same way of the call compound option, we have
\begin{eqnarray}
&&E_{T_0}\Big[e^{-r(T_1-T_0)}(S_{T_1}-K_1)\textbf{1}_{S_{T_1}>M}\Big]\nonumber\\
&=&\sum_{n_1=0}^\infty \frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!} E_{T_0}\Big[e^{-r(T_1-T_0)}(S_{T_1}-K_1)\textbf{1}_{S_{T_1}>M}|n_1\Big],
\label{eq:17}
\end{eqnarray}
\begin{eqnarray}
&&E_{T_0}\Big[e^{-r(T_1-T_0)}\Big(C(S_{T_1},K_2,T_2-T_1)-A\Big)\Big(\textbf{1}_{S_{T_1}\geq L}-\textbf{1}_{S_{T_1}\geq M}\Big)\Big]\nonumber\\
&=&E_{T_0}\Big[e^{-r(T_1-T_0)}E_{T_1}\Big(e^{-r(T_2-T_1)}(S_{T_2}-K_2)\textbf{1}_{S_{T_2}>K_2}\Big)\Big(\textbf{1}_{S_{T_1}\geq L}-\textbf{1}_{S_{T_1}\geq M}\Big)\Big]\nonumber\\
&-&E_{T_0}\Big[e^{-r(T_1-T_0)}A\Big(\textbf{1}_{S_{T_1}\geq L}-\textbf{1}_{S_{T_1}\geq M}\Big)\Big]\nonumber\\
&=&\Big\{\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&\times&E_{T_0}\big[e^{-r(T_2-T_0)}(S_{T_2}-K_2)\textbf{1}_{S_{T_2}>K_2}\textbf{1}_{S_{T_1}>L}|n_1,n_2\big]\Big\}\nonumber\\
&-&\Big\{\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&\times&E_{T_0}\big[e^{-r(T_2-T_0)}(S_{T_2}-K_2)\textbf{1}_{S_{T_2}>K_2}\textbf{1}_{S_{T_1}>M}|n_1,n_2\big]\Big\}\nonumber\\
&-&\Big\{\sum_{n_1=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}E_{T_0}\big[e^{-r(T_1-T_0)}A(\textbf{1}_{S_{T_1}>L}|n_1-\textbf{1}_{S_{T_1}>M}|n_1)\big]\Big\}.
\label{eq:18}
\end{eqnarray}
Now, we assume that the asset price satisfies in Eq. (\ref{eq:1}). Then, by calculating the expectations in Eqs. (\ref{eq:17}) and (\ref{eq:18}), the following result is derived.
\begin{thm} The price of an extendible call option with time to
expiration $T_1$ and strike price $K_1$, whose expiration time can extended to $T_2$ with a new strike price $K_2$ by the payment of an additional premium
$A$, is given by
\begin{eqnarray}
&&EC(S_t,K_1,T_1,K_2,T_2,A)\nonumber\\
&=&\sum_{n_1=0}^\infty \frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^n1}{n1!}\Big[S_0\Phi(a_1)-K_1e^{-r(T_1-T_0)}\Phi(a_2)\Big]\nonumber\\
&&+\Big\{\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&&\times\Big[S_0\Phi_2(b_1,c_1,\rho)-K_2e^{-r(T_2-T_0)}\Phi(b_2,c_2,\rho)\Big]\Big\}\nonumber\\
&&-\Big\{\sum_{n_1=0}^\infty\sum_{n_2=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}\frac{e^{-\lambda'(T_2-T_1)}(\lambda'(T_2-T_1))^{n_2}}{n_2!}\nonumber\\
&&-\Big[S_0\Phi_2(a_1,c_1,\rho)-K_2e^{-r(T_2-T_0)}\Phi(a_2,c_2,\rho)\Big]\Big\}\nonumber\\
&&-\Big\{\sum_{n_1=0}^\infty\frac{e^{-\lambda'(T_1-T_0)}(\lambda'(T_1-T_0))^{n_1}}{n_1!}S_0Ae^{-r(T_1-T_0)}\nonumber\\
&&\times\Big[\Phi(b_2)- \Phi(a_2)\Big]\Big\},
\label{eq:19}
\end{eqnarray}
where
\begin{eqnarray}
a_1&=&\frac{\ln\frac{S_0}{M}+\mu_{J_{T_1-T_0}}+\sigma_{J_{T_1-T_0}}^2}{\sqrt{\sigma_{J_{T_1-T_0}}^2}},\quad a_2=a_1-\sqrt{\sigma_{J_{T_1-T_0}}^2}\nonumber\\
b_1&=&\frac{\ln\frac{S_0}{L}+\mu_{J_{T_1-T_0}}+\sigma_{J_{T_1-T_0}}^2}{\sqrt{\sigma_{J_{T_1-T_0}}^2}},\quad b_2=b_1-\sqrt{\sigma_{J_{T_1-T_0}}^2}\nonumber\\
c_1&=&\frac{\ln\frac{S_0}{K_2}+\mu_{J_{T_2-T_0}}+\sigma_{J_{T_2-T_0}}^2}{\sqrt{\sigma_{J_{T_2-T_0}}^2}},\quad c_2=c_1-\sqrt{\sigma_{J_{T_2-T_0}}^2}\nonumber
\label{eq:20}
\end{eqnarray}
$\Phi(x)$ is the standard univariate cumulative normal
distribution function and $\Phi_2(x,y,\rho)$ is the standard bivariate cumulative normal
distribution function with correlation coefficient $\rho$.
\label{th:1}
\end{thm}
\begin{cor}
If $H=\frac{1}{2}$, the asset price satisfies the Merton jump diffusion equation
\begin{eqnarray}
dS_t&=&S_t(\mu-\lambda\kappa) dt+\sigma S_tdB_t+(J-1)S_tdN_t,\,0<t\leq T,\,S_{T_0}=S_0,
\label{eq:20-2}
\end{eqnarray}
then, our results is consistent with the findings in \cite{gukhal}.
\end{cor}
When $\lambda=0$ , the asset price follows the $MFBM$ model shown below
\begin{eqnarray}
dS_t=S_tr dt+\sigma S_tdB_t+\sigma
S_tdB_t^H.
\label{eq:21}
\end{eqnarray}
and the formula (\ref{eq:8}) reduces to the diffusion case. The result is in the following.
\begin{cor}
The price of an extendible call option with time to
expiration $T_1$ and strike price $K_1$, whose expiration time can extended to $T_2$ with a new strike price $K_2$ by the payment of an additional premium
$A$ and written on an asset following Eq. (\ref{eq:21}) is
\begin{eqnarray}
&&EC(S_t,K_1,T_1,K_2,T_2,A)\nonumber\\
&=&S_0\Phi(a_1)-K_1e^{-r(T_1-T_0)}\Phi(a_2)\nonumber\\
&&+S_0\Phi_2(b_1,c_1,\rho)-K_2e^{-r(T_2-T_0)}\Phi(b_2,c_2,\rho)\nonumber\\
&&-\Big[S_0\Phi_2(a_1,c_1,\rho)-K_2e^{-r(T_2-T_0)}\Phi(a_2,c_2,\rho)\Big]\nonumber\\
&&-Ae^{-r(T_1-T_0)}\Big[\Phi(b_2)- \Phi(a_2)\Big],
\label{eq:22}
\end{eqnarray}
where
\begin{eqnarray*}
a_1&=&\frac{\ln\frac{S_0}{M}+r(T_1-T_0)+\frac{\sigma^2}{2}(T_1-T_0)+\frac{\sigma^2}{2}(T_1^{2H}-T_0^{2H})}{\sqrt{\sigma^2(T_1-T_0)+\sigma^2(T_1^{2H}-T_0^{2H})}},\nonumber\\
a_2&=&a_1-\sigma\sqrt{T_1^{2H}-T_0^{2H}+T_1-T_0}\nonumber\\
b_1&=&\frac{\ln\frac{S_0}{L}+r(T_1-T_0)+\frac{\sigma^2}{2}(T_1-T_0)+\frac{\sigma^2}{2}(T_1^{2H}-T_0^{2H})}{\sqrt{\sigma^2(T_1-T_0)+\sigma^2(T_1^{2H}-T_0^{2H})}},\nonumber\\
b_2&=&b_1-\sigma\sqrt{T_1^{2H}-T_0^{2H}+T_1-T_0}\nonumber\\
c_1&=&\frac{\ln\frac{S_0}{K_2}+r(T_2-T_0)+\frac{\sigma^2}{2}(T_2 -T_0)+\frac{\sigma^2}{2}(T_2^{2H}-T_0^{2H})}{\sqrt{\sigma^2(T_2-T_0)+\sigma^2(T_2^{2H}-T_0^{2H})}}.\nonumber\\
c_2&=&c_1-\sigma\sqrt{T_2^{2H}-T_0^{2H}+T_2-T_0}.
\label{eq:23}
\end{eqnarray*}
\end{cor}
Let us consider an extendible option with $N$ extended maturity times, the result is presented in the following corollary.
\begin{cor}
The value of the extendible call expiring at time $T_1$, written on an asset
whose price is governed by equation (\ref{eq:1}) and whose maturity extend to $T_2 < T_3 <,...,<
T_{N+1}$ with new strike of $K_2,K_3,...,K_{N+1}$ by the payment of corresponding premium of
$A_1,A_2,...,A_{N+1}$, is given by
\begin{eqnarray}
EC_N(S_0,K_1,T_0,T_1)&=&\sum_{j=1}^{N+1}\Big\{\Big[S_0\Phi_j(a_{1j}^*,R_j^*)-K_je^{r(T_j-t)}\Phi(a_{2j}^*,R_j^*) \Big]\nonumber\\
&-&\Big[S_0\Phi_j(c_{1j}^*,R_j^*)-K_je^{r(T_j-t)}\Phi(c_{2j}^*,R_j^*)\Big]\nonumber\\
&-&A_je^{r(T_j-t)}\Big[\Phi(b_{2j}^*,R_{-1j}^*)-\Phi(a_{2j}^*,R_{-1j}^*)\Big]
\Big\}
\label{eq:27}
\end{eqnarray}
where $A_0=0, \Phi_j(a_{1j}^*,R_j^*)$ is the $j$-dimensional multivariate normal integral with upper limits of integration given by the $j$-dimensional
vector $a_{1j}^*$ and correlation matrix $R_j^*$ and define $a_{1j}^*= \big[a_1(M_1,T_1-t),-a_1(M_2,T_2-t),...,-a_1(M_j,T_j-t)\big]$. The same as $\Phi_j(c_{1j}^*,R_j^*)$ and $\Phi_j(b_{2j}^*,R_j^*)$ and define
\begin{eqnarray}
c_{1j}^*&=& \big[b_1(L_1,T_1-t),a_1(M_2,T_2-t),...,b_1(L_{j-1},T_{j-1}-t),a_1(M_j,T_j-t)\big]\nonumber\\
b_{2j}^*&=& \big[b_2(L_1,T_1-t),b_2(M_2,T_2-t),...,b_2(L_j,T_j-t)\big]\nonumber
\label{eq:28}
\end{eqnarray}
and $\Phi_1(c_{1j}^*,R_j^*)$. $R_j^*$
is a $j \times j$ diagonal matrix with correlated coefficient $\rho_{p-1,p}$ as the $p$th
diagonal element, $0$ and negative correlated coefficient $\rho_{j-1,j}$ , respectively, as the first and the
last diagonal element, and correlated coefficient $\rho_{p-1,s}(s = p + 1,..., j)$. As to the rest of
the elements, we note that $\rho_{p-1,s}$ is equal to negative correlated coefficient $\rho_{pj}$ when $s=j$
and $\rho_{p-1,s}$ is equal to zero when $p=1,s = 0,..., p-1,$ the term $T_j$ and $M_j, L_j$ respectively
represents the $j$th “time instant” and the critical price as defined previously.
\label{cor:2}
\end{cor}
As $N$ increases to infinity the exercise opportunities become continuous and hence the
value of the approximate option will converge in the limit to the value of the extendible option. Thus, the values $EC_1, EC_2, EC_3,...$ form a converging sequence and the limit of this
sequence is the value of the extendible, i.e. $\lim_{N\rightarrow \infty}EC_N(S_0,K_1,T_0,T_1)=EC(S_0,K_1,T_0,T_1)$. To minimize the impact of this computational complexity, we use the Richardson extrapolation method \cite{geske1984american} with two points. This technique uses the first two
values of a sequence of a sequence to obtain the limit of the sequence and leads to the following
equation,
\begin{eqnarray}
EC_2=2EC_1-EC_0,
\label{eq:29}
\end{eqnarray}
where $EC_2$ stands for the extrapolated limit using $EC_1$ and $EC_0$.
\section{Numerical studies}\label{sec:4}
Table \ref{table:1} provides numerical results for extendible call options when the underlying asset
pays no dividends. Column (3) displays the value obtained using the Merton model and column (4) shows the results using the Gukhal \cite{gukhal} method.
Column (5) indicates the results by the $JMFBM$ model and values using the Richardson extrapolation
technique for $EC_1$ and $EC_0$ are shown in column (6). By comparing columns Merton, Gukhal, $JMFBM$ and Richardson in Table \ref{table:1} for the low- and
high-maturity cases, we conclude that the call option prices
obtained by these valuation methods are close to each other.
\begin{table}[H]
\centering
\caption{Results by different pricing models. Here, $r=0.1, \sigma=0.1, L=5, M=15, A=0.05, H=0.8,S=12, \sigma_J=0.3, k=-0.004 $.}
\begin{tabular}{|c c c c c c |}
\hline
$T_1$ & $K$ & Merton & Gukhal &$JMFBM$ & Richardson \\[0.5ex]
\hline
1 & 10& 0.1127& 0.11143& 0.1228 & 0.1330 \\
1 & 11& 0.0960& 0.0997& 0.1075 & 0.1190 \\
1 &12& 0.0812& 0.0852& 0.0922 & 0.1031 \\
1 &13& 0.0687& 0.0707& 0.0768 & 0.0850 \\
1 &14& 0.0587& 0.0561& 0.0615 & 0.0566 \\
0.5 &10& 1.0347& 0.7521& 0.7799 & 0.5250 \\
0.5 &11& 0.8387& 0.6541& 0.6783 & 0.5180 \\
0.5 &12& 0.6662& 0.5560& 0.5768 & 0.4875 \\
0.5 &13& 0.5412& 0.4579& 0.4753 & 0.4094 \\
0.5 &14& 0.4598& 0.3598& 0.3738 & 0.2871 \\[1ex]
\hline
\end{tabular}
\label{table:1}
\vspace{-2mm}
\end{table}
Fig \ref{fig:4} displays
the price of extendible call option difference by the Merton, Guukhal and $JMFBM$ models, according to the primary exercise date $T_1$ and strike price $K_1$.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{M.eps}
\caption{The relative difference between our $JMFBM$, Guukhal and Merton models. Parameters fixed are $r=0.3, \sigma=0.4, L=.1, M=1.5, A=0.02, H=0.8,S=1.2, \sigma_J=0.05, k=0.4 $ and
$t=0.1.$ }
\label{fig:4}
\end{figure}
\section{Conclusions}\label{sec:5}
Mixed fractional Brownian motion is a strongly correlated stochastic process and jump is a significant component in financial markets. The combination of them provides better fit to evident
observations because it can fully describe high frequency financial returns display, potential
jumps, long memory, volatility clustering, skewness, and excess kurtosis. In this paper, we use a
jump mixed fractional Brownian motion to capture the behavior of the underlying asset price dynamics and deduce the pricing formula for compound options. We then apply this result to the valuation of extendible options under a jump mixed fractional Brownian motion environment. Numerical results and some special cases are provided for extendible call options.
\bibliographystyle{siam}
|
1,314,259,994,991 | arxiv | \section{Introduction}
The elusive nature of high-temperature superconductivity continues to attract significant attention from the scientific community. At the heart of most of these fascinating materials lies the copper-oxygen building block. To understand the electronic correlations originating from such plaquettes, closely related compounds, broadly referred to as cuprates, have received much attention. These materials are layered, possessing a square arrangement of Cu coordinated by O ions with various atoms separating the layers. Doping holes into the CuO$_2$ planes has been shown to drive many of these systems into complex phase space with regions in which the superconducting state is stabilized.
Inelastic neutron scattering (INS) allows the study of excitations out of the magnetic groundstate thereby giving insight into the fundamental interactions at play. Using INS to study the magnetic correlations in cuprates has revealed some unusual phenomena that are not captured by spin-wave theory. Along the antiferromagnetic zone-boundary there exists: (i) an anomalous dispersion of spin-waves and (ii) a wavevector-dependent continuum that results in a redistribution of the spectral weight.
The former has been attributed to quantum corrections to linear spin-wave theory, second neighbor exchange interactions, or four-spin interactions \cite{singh-prb-1995, syljuasen-jpcm-2000, sandvik-prl-2001, zheng-prb-2005, coldea-prl-2001, huberman-prb-2005, delannoy-prb-2009, guarise-prl-2010, headings-prl-2010, babkevich-prb-2010, dallapiazza-prb-2012, moser-prb-2015}.
The latter phenomena has recently been proposed to originate from spinon deconfinement \cite{dallapiazza-nature-2015} or strong attractive magnon-magnon interaction, leading to two-magnon-bound states as well as enhanced multi-magnon continua \cite{powalski-prl-2015}. In both cases, the appearance of the continuum is closely related to the only weakly broken SU(2) symmetry.
(Ba,Sr)$_2$Cu$_3$O$_4$Cl$_2$ is an interesting variation of the CuO motif. The crystal structure consists of Cu$_3$O$_4$ layers that are separated by (Ba,Sr) and Cl ions \cite{pitschke-powdiff-1995}, very much the same as \SCOC. However, in the middle of every second Cu square there is an additional Cu ion. These additional intermediate Cu ions form another, larger, penetrating square lattice with exchange couplings that are an order of magnitude lower than the Cu ions in the \SCOC-like framework \cite{kim-prb-2001}.
\SrCu\ has been extensively studied both experimentally and theoretically, but the magnetic spectrum has only been measured up to around 25\,meV thus far \cite{kim-prl-1999, kim-prb-2001, kim-prl-2001, harris-prb-2001, harris-jmmm-2001}.
Herein we present a thorough neutron scattering investigation of \BaCu\ using the latest generation of neutron diffractometers and spectrometers. Thus armed we determine the magnetic structure of \BaCu. The first inelastic neutron scattering measurements to trace out the high-energy dispersion in this compound reveal that the fluctuations are remarkably similar to \SCOC\ and other simpler square lattice Cu antiferromagnets (AFMs). We also perform a detailed study of the low-energy excitations -- mapping out their dispersion in zero and applied magnetic field. In order to quantify our results we consider the extended single-band Hubbard model. Our model, whose \CuA\ subsystem is described purely using \SCOC\ parameters, is able to give a good quantitative account of the magnetic spectrum. Moreover, our magnetic field-dependent studies reveal anomalous mode sharpening at the magnetic zone-boundary of the weakly-coupled \CuB\ spins that cannot be readily explained by conventional spin-wave theory. We argue that this could be evidence for spinon reconfinement in an applied magnetic field.
\section{Experimental details}
\BaCu\ melts incongruently at 975$^\circ$C \cite{ruck-jssc-1998}, and all the samples were grown by a laser floating-zone method in an oxygen atmosphere \cite{ito-jcg-2013}. First, polycrystalline \BaCu\ was synthesized from high purity CuO (99.99\%), BaCO$_3$ (99.995\%) and anhydrous BaCl$_2$ (99.999\%) as starting materials. A stoichiometric mixture of these materials was calcinated at 765$^\circ$C in air for 8h. After regrinding, it was sintered at 900$^\circ$C in air for 20h. Then, the polycrystalline rods for the floating-zone process were sintered at 900$^\circ$C in air for 20 h \cite{yamada-physicab-2007}. In the floating-zone process, the molten zone was self-adjusted soon after the start of crystal growth, and the feeding speed of the rods was 6.0\,mm/h. Powder X-ray diffraction patterns of the pulverized single crystals confirmed that the samples were of a single phase.
The magnetic and crystal structure determination were carried out at ISIS using the WISH time-of-flight diffractometer \cite{chapon-wish-2011}. A 3.6\,g polycrystalline sample from a crushed single-crystal for phase purity was mounted inside a CCR. Data were collected between 5 and 450\,K.
The triple-axis spectrometer (TAS) IN20 was used for magnetic field studies with both horizontal and vertical focusing of the Si(111) monochromator and PG(002) analyzer \cite{kulda-apa-2002}. A PG(002) filter was placed before the analyzer. A sample of coaligned crystals totalling 8\,g was mounted inside a 10\,T vertical field magnet. The crystals were mounted to access the $(h,k,0)$ scattering plane. A fixed final neutron energy of 14.7\,meV was used for the measurements.
Inelastic time-of-flight (TOF) neutron scattering measurements were performed using spectrometers: HRC at J-PARC \cite{itoh-nucinst-2011}, MAPS and MERLIN at ISIS \cite{perring-maps-2004, bewley-physicab-2006}. In the HRC measurements 5 crystals with a total mass of 4\,g were coaligned. A Fermi chopper operating at 300\,Hz was employed for the data presented herein. The MAPS and MERLIN measurements were carried out on 10 pieces of \BaCu\ with a combined mass of around 8\,g. For both spectrometers, multi-rep mode was employed to obtain data at additional incident neutron energies $E_i$. The Fermi chopper was set to 550\,Hz and 500\,Hz for the MERLIN and MAPS measurements, respectively. The crystallographic $c$-axis was aligned along $\mathbf{k}_i$ in all TOF measurements. Data analysis of the TOF measurements was carried out using Horace \cite{ewings-horace} and diagonalization of the spin-only Hamiltonian to determine the spin-wave dispersion was performed using the SpinW package \cite{toth-spinw}.
\section{Neutron diffraction measurements}
\label{sec:neutron_diffraction}
\squeezetable
\begin{table}
\caption{Nuclear and magnetic structure parameters determined from powder neutron diffraction for \BaCu. The refinement was performed in the tetragonal space group $I4/mmm$, where the positions of the ions were: Ba $(0,0,z_{\rm Ba})$, \CuA\ $(0,0.5,0)$, \CuB\ $(0,0,0)$, O $(0.25,0.25,0)$, Cl $(0,0.5,0.25)$. The numbers in parentheses are statistical uncertainties in the last digit of the refined parameters.
}
\begin{tabular}{cc|C{1.9cm}C{1.9cm}C{1.9cm}}
\hline
\hline
& & 5\,K & 100\,K & 450\,K \\
\hline
$a'$ & (\AA) & 5.5141(2) & 5.5146(2) & 5.5208(3)\\
$c'$ & (\AA) & 13.7319(5) & 13.7473(4) & 13.8906(5)\\
$a'b'c'$ & (\AA$^3$) & 417.52(3) & 418.06(3) & 423.37(3)\\
Ba & $B_{\rm iso}$ (\AA$^2$) & 0.78(13) & 0.88(13) & 1.32(15)\\
& $z_{\rm Ba}$ & 0.3611(3) & 0.3611(3) & 0.3614(3)\\
\CuA & $B_{\rm iso}$ (\AA$^2$) & 1.11(10) & 1.16(12) & 2.02(11)\\
& $\mu$ ($\mu_B$) & 0.61(4) & 0.68(4) & 0\\
\CuB & $B_{\rm iso}$ (\AA$^2$) & 1.04(12) & 1.16(10) & 1.77(14)\\
& $\mu$ ($\mu_B$) & 0.58(11) & 0 & 0\\
O & $B_{\rm iso}$ (\AA$^2$) & 1.11(8) & 1.20(8) & 1.95(10)\\
Cl & $B_{\rm iso}$ (\AA$^2$) & 0.82(8) & 1.01(8) & 2.47(10)\\
\hline
\hline
\end{tabular}
\label{tab:crystal}
\end{table}
\begin{figure}
\includegraphics[clip= ,width=0.65\columnwidth]{fig0.eps}
\caption{Coordination of atoms in the crystallographic unit cell described in Table~\ref{tab:crystal}.}
\label{fig:0}
\end{figure}
\BaCu\ crystalizes in a tetragonal structure ($I4/mmm$) where the lattice constants are $a' = 5.51$\,\AA\ and $c' = 13.73$\,\AA\ at 5\,K.
The primed basis denotes the crystallographic unit cell in Fig.~\ref{fig:0}.
The atoms are arranged in a layered structure composed of Cu$_3$O$_4$, Ba, and Cl planes. What makes \BaCu\ special compared to \LCO\ and \SCOC\ is that additional \CuB\ atoms occupy the centers of every second \CuA-O plaquette forming an additional interpenetrating square-lattice. The \CuA\ are coordinated octahedrally by four O ions at a distance of 1.95\,\AA\ in the basal plane and two Cl ions 3.43\,\AA\ away at the apices. The planar coordination of \CuA\ ions in \BaCu\ is similar to that of \LCO\ and \SCOC. The \CuB\ ions share the O ions in the plane with two Ba ions above and below at a distance of 4.94\,\AA. The exchange interaction between \CuA\ ions is through a 180$^\circ$ \CuA-O-\CuA\ bond, while the \CuB\ ions are connected through a 90$^\circ$ \CuA-O-\CuB\ interaction. Out-of-plane the \CuA\ ions are spaced by $c'/2$. Conversely, the \CuB\ ions are separated by $c'$ along the tetragonal axis. The symmetry of the lattice does not change over the temperature range studied. Small traces of unidentified impurities are observed in our diffraction patterns but these do not affect the results of the analysis. Table~\ref{tab:crystal} shows the refined crystal structure of \BaCu\ at 5, 100, and 450\,K. The lowest temperature corresponds to a state where \CuA\ and \CuB\ spins are ordered. At 100\,K, only \CuA\ spins are long-range ordered and at 450\,K the system is in the paramagnetic state.
\begin{figure*}[t]
\includegraphics[bb=30 0 1640 570, clip= ,width=0.9\textwidth]{fig1.eps}
\caption{(a) Depiction of the magnetic structure in \BaCu. The circles filled by blue and red colors represent \CuA\ and \CuB\ sites, respectively. Empty circles denote intermediate O atoms. The shaded blue and red outlines represent the magnetic unit cells when \CuA\ and \CuA-\CuB\ are magnetically ordered, respectively. The hopping terms connected by colored lines are discussed in the text. (b) Reciprocal space of \BaCu\ projected onto the $(h,k)$ plane. The shaded blue and red outlines represent the magnetic Brillouin zones of the two sublattices: \CuA\ and \CuB\ respectively. Bragg scattering from each sublattice is shown at the magnetic zone center.}
\label{fig:1}
\end{figure*}
In order to have an easier comparison with cuprate square-lattice AFMs, we shall from here on in consider a coordinate system with axes along the \CuA-O-\CuA\ bonds where $a=b\approx3.9$\,\AA\ and $c\approx13.7$\,\AA, as shown in Fig.~\ref{fig:1}.
Neutron diffraction allows us to detect the onset of long-range magnetic order on the two sublattices composed of \CuA\ and \CuB\ ions. Figures~\ref{fig:23}(a) and \ref{fig:23}(c) show the temperature dependence of $(0.5,0.5,1)$ and $(0.5,0,0)$ Bragg peaks and reflect the magnetic ordering temperatures of the \CuA\ and \CuB\ ions, respectively. The \CuA\ sites are found to order in an antiferromagnetic arrangement at $\TNA = 324(4)$\,K -- lower than 386(2)\,K reported previously in \SrCu\ \cite{kim-prb-2001}. We show $\beta=0.3$ in Fig.~\ref{fig:23}(a) in accordance with \SrCu\ results \cite{kim-prb-2001}. At 100\,K, the \CuA\ moment is 0.68(4)\,\muB.
At 5\,K, our measurements reveal the emergence of additional magnetic reflections from the \CuB\ magnetic structure which can be indexed as $(h,k,l)\pm(0.5,0,0)$ and equivalently $(h,k,l)\pm(0,0.5,0)$ from the twin domain. Single-crystal measurements on \SrCu\ showed that below \TNB, the moments are collinear along $[1,0,0]$ (or equivalently along $[0,1,0]$), as shown in Fig.~\ref{fig:1}. Our refinement at 5\,K is consistent with this magnetic structure and we find an ordered moment of around 0.6\,$\mu_B$ on both \CuA\ and \CuB. This is indicative of the presence of quantum fluctuations, which have been demonstrated in ideal $S=1/2$ square-lattice Heisenberg antiferromagnet to reduce the staggered magnetization to 60\% of the saturation value \cite{reger-prb-1988}.
\begin{figure}
\includegraphics[width=0.9\columnwidth]{fig23.eps}
\caption{(a) Temperature dependence of the $(0.5,0.5,1)$ reflection obtained from neutron powder diffraction measurements to show ordering on the \CuA\ sublattice. The dashed line is a guide to the eye. Data collected using WISH. (b) Temperature dependence of the spin wave gap of the \CuA\ excitation. On warming above \TNB, the gap softens. {The spin-gap was fitted using a phenomenological function of a heaviside function convoluted with a Gaussian.} (c) Measurements of the $(0.5,0,0)$ Bragg peak integrated intensity and softening of the \CuA\ spin gap on warming through \TNB. The dashed line shows a power-law fit. Data in panels (b) and (c) were collected using IN20.}
\label{fig:23}
\end{figure}
It was previously argued that in \SrCu\ quantum fluctuations cause two-dimensional ordering of \CuB\ ions by lifting the otherwise frustrated interaction between \CuA\ and \CuB\ sublattices \cite{kim-prl-1999}. The ordered \CuA\ ions create an Ising-like anisotropy which then causes \CuB s to order. This appears to be valid also for \BaCu. By tracking the staggered magnetization of the \CuB\ spins, we find a critical exponent $\beta = 0.10(5)$ at \TNB, shown in Fig.~\ref{fig:23}(c). This is in good agreement with the value of $\beta = 0.13(1)$ reported for \SrCu\ and $\beta=1/8$ expected for the 2D Ising universality class \cite{kim-prb-2001}. We leave discussion of the concomitant change of the \CuA\ spin gap until later in this article.
\section{Inelastic neutron scattering}
\subsection{Description of spin dynamics}
In order to describe the experimentally observed spin dynamics in \BaCu\ we use the approach developed previously for a number of cuprate systems by starting from the one-band Hubbard model in order to establish the connection between magnetism and electronic correlations \cite{delannoy-prb-2009, dallapiazza-prb-2012}. Following MacDonald {\it et~al.} \cite{macdonald-prb-1988}, we can project the Hubbard Hamiltonian into a spin Hamiltonian, which contains a series of spin terms with couplings proportional to $t_{ij}^n/U^{n-1}$ and for $t_{ij}/U<<1$, the higher order terms can be ignored. In our present work we consider the Hamiltonian up to fourth order in $t_{ij}$, which is given by,
\begin{widetext}
\begin{eqnarray}
\hat{\mathcal{H}}^{(4)}=\sum_{
1 \leftrightarrows 2}\left(\frac{4t_{12}^2}{U}-\frac{16t_{12}^4}{U^3}\right) \left(\mathbf{S}_1 \cdot \mathbf{S}_{2}-\frac{1}{4}\right)+\sum_{
1 \leftrightarrows 2 \leftrightarrows 3} \frac{4t_{12}^2t_{23}^2}{U^3}\left(\mathbf{S}_1 \cdot \mathbf{S}_3-\frac{1}{4}\right)
\nonumber \\
-\sum_{1 \rightarrow 2 \rightarrow 3 \rightarrow 4 \rightarrow 1
}\frac{4t_{12}t_{23}t_{34}t_{41}}{U^3}\left\lbrace\ \sum_{i,j=1 ,i\neq j}^4\mathbf{S}_i \cdot \mathbf{S}_j-20\left[(\mathbf{S}_1 \cdot \mathbf{S}_2)(\mathbf{S}_3 \cdot \mathbf{S}_4)+(\mathbf{S}_1\cdot \mathbf{S}_4)(\mathbf{S}_2\cdot \mathbf{S}_3)
-(\mathbf{S}_1\cdot \mathbf{S}_3)(\mathbf{S}_2\cdot \mathbf{S}_4)\right]\right\rbrace.
\label{eq:HubHeis}
\end{eqnarray}
\end{widetext}
The summations are taken as ensembles of {all possible} two, three, and four site loops. $t_{ij}$ represents the hopping parameter {connecting site $i$ to site $j$}, $U$ represents the on-site Coloumb repulsion energy. {We emphasize that Eq.~(\ref{eq:HubHeis}) is general in the case of the strong-coupling Hubbard model at half-fillings for any lattice and any ensemble of hopping parameters $t_{ij}$.} In the above Hamiltonian we observe that, apart from the Heisenberg-like terms we also have quartic spin terms. {We make an approximation using linear spin-wave theory by expanding the quartic terms into Heisenberg like terms with an effective coupling.} Hence, given a set of hopping parameters, one can calculate the effective coupling between two sites by identifying all the hopping paths containing these sites and adding their contributions.
In order to describe the magnetic excitations in \BaCu\, we need to consider first ($t$), second ($t'$) and third ($t''$) nearest hopping terms in the Hubbard model. This is the minimal set that was found to account for the magnetic excitations in a number of different cuprate systems while at the same time having a realistic on-site interaction term $U$ \cite{kim-prl-1998, dallapiazza-prb-2012}. Figure~\ref{fig:1} depicts the hopping terms for the present case of \BaCu. It is immediately evident that due to the symmetry of the crystal structure, we have in general three different $t'$ and two different $t''$ terms that we must consider. To simplify matters, we set $t'_1 = t'_2$ that both connect \CuA\ ions. When $t'_1 \neq t'_2$, the \CuA\ modes are predicted to split which is not observed in our measurements. As will be discussed later, we expect that $t''_2$ is small and so set it to zero.
\begin{table}
\caption{
Single-band Hubbard model hopping terms $t$ and on-site Coulomb interaction $U$ given in units of eV. The hopping parameters for \BaCu\ were fixed to the values of \SCOC, except for $t'_{3}$, which was refined using our measurements. The table includes comparative values for La$_2$CuO$_4$ and tetragonal-CuO. The model includes: out-of-plane exchange interaction $J_\perp = 0.025(1)$\,meV, coupling between \CuA\ and \CuB\ sublattices $J_{\rm AB} = -10.3(1)$\,meV, anisotropic exchange interaction between \CuA\ spins $\epsilon_{\rm A} = 2.0(2)\times 10^{-4}$ and
$\epsilon_{\rm B} = 0.026(2)$ for \CuB\ spins.
\label{tab:tU}}
\begin{tabular}{lccccccccr}
\hline
\hline
& $U$ & $t$ & $t'_{1}$ & $t'_{2}$ & $t'_{3}$ & $t''_{1}$ & $t''_{2}$ & Ref.\\
\hline
La$_2$CuO$_4$
& 3.5 & 0.492 & -0.207 & - & - & 0.045 & -
& \cite{dallapiazza-prb-2012}\\
& 3.34 & 0.422 & -0.138 & - & - & 0.066 & -
& \cite{delannoy-prb-2009}\\
\SCOC
& 3.5 & 0.48 & -0.2 & - & - & 0.075 & -
& \cite{dallapiazza-prb-2012}\\
T-CuO
& 3.5 & 0.49 & -0.2 & - & - & 0.075 & -
& \cite{moser-prb-2015}\\
\BaCu
& 3.5 & 0.48 & -0.2 & -0.2 & -0.086(1) & 0.075 & 0
& $\ast$ \\
\hline
\hline
\end{tabular}
\end{table}
\begin{table}
\caption{The effective spin-spin exchange coupling parameters used in Eq.~(\ref{eq:Ham}) derived from the single-band Hubbard model parameters given in Table~\ref{tab:tU}. The superscript of $J$ refers to the sublattice of \CuA\ or \CuB\ spins and the superscript the order of the neighbor within the given sublattice. Values are in meV.
\label{tab:effJ}}
\begin{tabular}{lrlr}
\hline
\hline
$J^{\rm A}_1$ & 169.0 & \qquad $J^{\rm B}_1$ & 8.4\\
$J^{\rm A}_2$ & 26.7 & \qquad $J^{\rm B}_2$ & 0.0\\
$J^{\rm A}_3$ & 30.8 & \\
$J^{\rm A}_4$ & 7.2 & \\
$J^{\rm A}_5$ & 0.0 & \\
\hline
\hline
\end{tabular}
\end{table}
To a first approximation, we expect that the parameter set $t$-$t'$-$t''$-$U$ of the one-band Hubbard model do not to change significantly from one two-dimensional copper oxide system to another. In Table~\ref{tab:tU} we present the values of one-band Hubbard model parameters obtained from independent measurements on closely related cuprates. We expect that the coupling on the \CuA\ sublattice is similar in \SCOC\ (SCOC) and \BaCu. However, the coupling interactions between \CuB\ ions and \CuA-\CuB\ remain to be determined from experiments.
The smallest in-plane real-space unit cell that tiles the whole \BaCu\ lattice contains 2\CuA\ and 1\CuB, as shown by the blue outline in Fig.~\ref{fig:1}(a). For temperatures in the range of $\TNB < T < \TNA$, the magnetic unit cell is the same as the smallest unit cell, containing 2\CuA\ and results in a single doubly degenerate mode (assuming no anisotropy) dispersing up to around 300\,meV. Once \CuB\ spins order at $T<\TNB$, the magnetic unit cell is doubled and shown by the red outline in Fig.~\ref{fig:1}(a). This magnetic unit cell contains 4\CuA\ and 2\CuB. Therefore, at the lowest temperature 4\CuA\ and 2\CuB\ branches are expected.
From the single-band Hubbard model, we can derive the effective spin Hamiltonian that only involves bilinear spin-spin exchange interactions taking care to include all the hopping processes of $t$-$t'$-$t''$ up to $1/U^3$ order.
The quartic spin terms in Eq.~(\ref{eq:HubHeis}) are expanded using linear spin-wave theory and truncated up to second order in boson operators to provide effective Heisenberg-like terms. In the case of nearest-neighbor hopping only, the effective nearest- and next-nearest-neighbor spin-spin exchange interaction are $J_1 = 4t^2/U - 64t^4/U^3$ and $J_2 = -16t^4/U^3$, respectively. More generally, we consider an effective Hamiltonian of the form,
\begin{align}
\mathcal{H} =& \sum_{i,j \in {\rm A}}J^{\rm A}_{n} \mathbf{S}^{\rm A}_i \cdot \mathbf{S}^{\rm A}_j
+ \sum_{i,j \in {\rm B}}J^{\rm B}_{n} \mathbf{S}^{\rm B}_i \cdot \mathbf{S}^{\rm B}_j\nonumber\\
& + \sum_{i,j \in {\rm A}} J_\perp \mathbf{S}^{\rm A}_i \cdot \mathbf{S}^{\rm A}_j
+ \sum_{i,j \in {\rm A,B}}J_{\rm AB} \mathbf{S}^{\rm A}_i \cdot \mathbf{S}^{\rm B}_j,
\label{eq:Ham}
\end{align}
where $i \in A$ denotes the summation over site $i$ of the \CuA\ lattice sites. The effective coupling terms $J^{\rm A}_{n}$ and $J^{\rm B}_{n}$ are derived from the single-band Hubbard model described above. The values of these are given in Table~\ref{tab:effJ}. We consider up to 4th order neighbor exchange interactions between \CuA\ ions and up to 2nd order exchange interactions between \CuB.
The advantage of this method is that it goes beyond first pairwise Heisenberg exchange interaction and the so-called ring exchange { -- which is a special case of a four-site interaction}. The latter corresponds to electron hopping around the perimeter of the Cu-O square motif that leads to the dispersion of spin-waves along the magnetic zone boundary and a larger spin-wave velocity at the zone center. By considering more fundamental electronic correlations, we include not only the leading order ring exchange but other four-spin (and three) exchange interactions.
We include a small coupling between \CuA\ layers along the $c$-axis in our calculation as $J_\perp$. Since \CuB\ ions are spaced even further apart and connected by frustrated exchange paths, we do not consider out-of-plane \CuB-\CuB\ interactions.
We next consider the \CuA-\CuB\ interaction. Within linear spin-wave or mean-field theory, the interaction between \CuA\ and \CuB\ ions is completely frustrated. However, as we shall discuss later, there is clear evidence that there is a finite coupling between \CuA\ and \CuB\ spins, which cannot be understood without fluctuations. As has been shown by Shender, despite frustration leading to a degenerate ground-state, fluctuations (either thermal or quantum) may partially or completely lift the degeneracy \cite{shender-jetp-1982}. As demonstrated in previous work on \SrCu, this effect leads to collinear ordering of the \CuA\ and \CuB\ sublattices \cite{kim-prl-1999, harris-jmmm-2001, harris-prb-2001}. To account for this effect, we include a $J_{\rm AB}$ coupling term in our model.
On cooling below \TNB, both \CuA\ and \CuB\ excitations are found to have a gap at the magnetic zone center. To account for this, we introduce a small Ising anisotropy in the 1st order effective exchange interactions $J_1^{\rm A}$ and $J_1^{\rm B}$, such that $(J_x,J_y,J_z) = J(1+\epsilon,1,1)$. This accounts for the spin alignment in the magnetic structure and has the effect of opening a gap in the spectrum without leading to extra branches.
In order to calculate the magnetic spectrum, we use linear-spin wave theory which assumes that (i) the magnetic groundstate is long-range ordered and (ii) quantum fluctuations are small. Whether these approximations hold for spin-1/2 and two-dimensional systems has not been proven. Nevertheless, linear spin-wave theory has been shown to work surprisingly well when compared with numerical works using exact diagonalization, series expansion, quantum Monte Carlo, etc. for a Heisenberg model \cite{manousakis-rmp-1991}. In the case of nearest-neighbor coupled spin-1/2 Heisenberg AFM on a square lattice, magnon dispersion calculated from linear spin-wave theory requires corrections to account for the magnetic spectrum.
The diagonalization of the Hamiltonian in Eq.~(\ref{eq:Ham}) is performed using the SpinW package which truncates the Holstein-Primakoff operators at the quadratic order \cite{toth-spinw}. Within first-order perturbation theory to the quadratic spin-wave Hamiltonian, quartic order magnon operators renormalize the magnon energy by a factor $Z_c(\Qb)$ \cite{delannoy-prb-2009}. Typically, $Z_c$ depends weakly on \Qb\ and $t$-$t'$-$t''$ \cite{delannoy-prb-2009, dallapiazza-prb-2012} and for simplicity in this work we use $Z_c = 1.18$ \cite{singh-prb-1989, igarashi-prb-1992, syljuasen-jpcm-2000}.
A renormalization of the spectral weight due to charge fluctuations of the Hubbard model must also be considered. The magnetic signal is expected to be weakened by these as empty and doubly occupied sites do not couple to a magnetic probe such as in INS. The dynamical structure factor obtained from the Heisenberg model must therefore be renormalized by a factor of $1/|R_{\rm eff}(\mathbf{q})|^2$ approximated as,
\begin{equation}
R_{\rm eff}(\mathbf{q}) \approx 1 + \sum_{\boldsymbol{\tau}} \left(\frac{t_{\boldsymbol{\tau}}}{U}\right)^2 (1-e^{\rm i \mathbf{q}\cdot \boldsymbol{\tau}})
+ \mathcal{O}\left(\frac{t_{\boldsymbol{\tau}}}{U}\right)^4,
\label{eq:renorm}
\end{equation}
where ${\boldsymbol{\tau}}$ are the real-space hopping paths. At the magnetic zone-center, where this effect contributes most strongly, $1/|R_{\rm eff}(\mathbf{q})|^2 = 0.76$ for the \CuA\ sublattice with $t = 0.48$\,eV and $U = 3.5$\,eV. Contributions to the dynamic structure factor from $t'_1$ and $t'_2$ cancel at the magnetic zone center. Away from the magnetic zone center, the renormalization tends to 1. Treating \CuB\ as a completely independent lattice yields $1/|R_{\rm eff}(\mathbf{q})|^2 = 1$ as $(t'_3/U)^2$ is much smaller than than $(t/U)^2$.
\subsection{Excitations of the strongly-coupled \CuA\ sublattice}
\label{subsub:strong}
\begin{figure}
\includegraphics[width=0.9\columnwidth]{fig8.eps}
\caption{Spin gap in the \CuA\ dispersion at the magnetic zone center. (a) Data shows energy cuts collected at different $E_i$ using TOF spectrometers: MERLIN, MAPS, and HRC. Solid lines show a convolution of a heaviside function and a Gaussian to account for the instrumental energy broadening that has been used to extract the out-of-plane dispersion. (b) Spin-wave dispersion along $(0.5,0.5,l)$ extracted from TOF measurements at using incident neutron energies $E_i$ between 25 and 54\,meV. We show the trajectories of the $(0.5,0.5,l)$ dependence with $E$ as colored lines through the data points. The solid and dashed line show simulations for the spin-wave model using parameters discussed in the text. The modes plotted by a dashed or dotted lines carry no spectral weight. The simulations are plotted as black and red lines to show calculations using $\epsilon_{\rm A} = 2\times10^{-4}$ and 0, respectively. We plot TAS measurements at 2\,K (black circle) and 50\,K (red square) obtained at $(0.5,0.5,0)$ using the IN20 spectrometer. All TOF measurements were recorded at 6\,K.}
\label{fig:8}
\end{figure}
{In order to examine the strength of coupling between \CuA\ layers, we consider what happens above and below \TNB. At $T > \TNB$, the \CuB\ spins are disordered and the spin fluctuations can be treated as arising purely from a long-range ordered \CuA\ system. Our measurements at $(0.5,0.5,0)$ and 50\,K($>\TNB$) find excitations above 7.1(1)\,meV. This can be captured in our model by fitting $J_\perp = 0.025(1)$\,meV. Figure~\ref{fig:8}(b) shows the calculated out-of-plane spin-waves of \CuA\ centered on $(0.5,0.5,1)$.}
As the \CuB\ sublattice orders below \TNB, the \CuA\ excitations at $(0.5,0.5,0)$ are shifted up in energy by about 2\,meV, see Fig.~\ref{fig:8}. In such case our model must also account for \CuA-\CuB\ coupling originating from quantum fluctuations.
To examine the out-of-plane dispersion in the \CuB\ ordered state, we have collected TOF data shown in Fig.~\ref{fig:8}. At a given $(h,k)$ point the value of $l$ depends on both energy transfer $E$ and incident energy $E_i$. Therefore, collecting data with several $E_i$ allows the out-of-plane dispersion to be quantified. Figure~\ref{fig:8}(a) shows how the measured spin-gap at $(0.5,0.5)$ changes with $E_i$. We perform a fit to the data using a heaviside function convoluted with a Gaussian whose width is fixed by incoherent scattering at the elastic line to approximate the energy-dependent resolution. In doing so, for $E_i$ between 25 and 54\,meV, we obtain the dispersion along $(0.5,0.5,l)$ shown in Fig.~\ref{fig:8}(b).
Using the spin-wave model discussed in the preceding section, we can obtain a good agreement to our results by fixing $J_\perp = 0.025$\,meV and fitting the nearest-neighbor \CuA\ exchange anisotropy which gives $\epsilon_{\rm A} = 2.0(2)\times 10^{-4}$. The value of $J_\perp$ does not include the $1/S$ renormalization due to spin-wave interactions that lead to $J_\perp \rightarrow \tilde{Z}_\perp \tilde{J}_\perp$ \cite{harris-prb-2001}. Taking this into account, where $\tilde{Z}_\perp\approx 0.6$, gives $\tilde{J}_\perp\approx 0.042(2)$\,meV -- somewhat smaller than the reported value $\tilde{J}_\perp=0.14(2)$\,meV for \SrCu. However, more careful measurements of the dispersion along $l$ and above \TNB\ would be necessary to confirm this.
The exchange pathway between the nearest-neighbor \CuA\ and \CuB\ ions involves two O orbitals connected by perpendicular Cu-O bonds. According to the Goodenough-Kanamori-Anderson rules, exchange coupling is weakly ferromagnetic when it passes through a 90$^\circ$ bond between two magnetic ions. The extended single-band Hubbard model can no longer be applied in this circumstance as this gives an effective nearest \CuA-\CuB\ coupling that is antiferromagnetic, in contradiction to experimental findings \cite{kim-prb-2001}. Based on the theoretical work developed for \SrCu, we can use the spin-gap as an estimate of the coupling strength between the two sublattices $J_{\rm AB}$ \cite{harris-prb-2001, harris-jmmm-2001}. From Fig.~\ref{fig:8}(b), we estimate that the energy gap at the zone center of $(0.5,0.5,1)$ is approximately 8.3(1)\,meV. For an anisotropic Heisenberg model, the zero wavevector energy is given by $w^2 = 2H_{\rm E} H_{\rm A}$, where $H_{\rm E}$ and $H_{\rm A}$ are the exchange and anisotropy fields, respectively. Using the results in Ref.~\onlinecite{harris-prb-2001} for the present mode, $H_{\rm E} = 2J$ and $H_{\rm A} = CJ_{\rm AB}^2/J$, where the constant $C\approx 0.16$. This yields a value of $|J_{\rm AB}|\approx 10.3(1)$\,meV, which is close to the values obtained for \SrCu\ of approximately $10$\,meV \cite{chou-prl-1997, kastner-prb-1999, kim-prl-1999}. Considering the strong resemblance of \BaCu\ to \SrCu, we adopt $J_{\rm AB}$ to be ferromagnetic.
\begin{figure*}
\centering
\includegraphics[width=0.7\textwidth]{fig6.eps}
\caption{Dispersion along high-symmetry directions in the 2D Brillouin zone obtained at 6\,K. Extracted dispersion was obtained from TOF measurements using neutron incident energies in the 28--400\,meV range. The simulated spin-wave spectrum is shown for different parameters of $t'_3$ and $J_{\rm AB}$ in units of meV. Other parameters were fixed to those shown in Table~\ref{tab:tU} and $J_\perp = 0.025$\,meV. Data collected using MERLIN, MAPS, HRC, and IN20 spectrometers.}
\label{fig:6}
\end{figure*}
In order to determine the high-energy magnetic excitations of the \CuA\ spins, we have performed inelastic TOF neutron scattering measurements on \BaCu\ at 6\,K. A range of incident energies were employed to map out the spectrum up to 300\,meV with sufficiently high resolution, typically on the order of 5\% of $E_i$ at the elastic line.
Since $J_\perp$ is small, in analyzing the TOF data we average over the out-of-plane component $l$ and use the $(h,k)$ coordinate system to simplify the notation.
To improve the statistics of the \CuA\ excitations, the data were folded in the $(h,k)$ plane. Constant energy cuts were fitted to a Gaussian lineshape above 100\,meV. Below 100\,meV, we employed a ring-like spectral lineshape in the $(h,k)$ plane in order to accurately determine the steeply rising zone center dispersion.
Figure~\ref{fig:6} shows the extracted magnon dispersion in \BaCu. Our measurements suggest that the interaction between \CuA\ and \CuB\ must be rather small as we do not observe any magnetic zone folding or \CuA\ branch splitting that would be otherwise expected. Strongly dispersive spin-waves emerge from the $(0.5,0.5)$ point due to coupling between \CuA\ spins as would be expected in the absence of \CuB\ sublattice. Varying $t'_3$, as shown in Fig.~\ref{fig:6}, has negligible effect on the \CuA\ dispersion.
Tracking this dispersion in energy transfer shows a maximum of around 250\,meV at $(0.75,0.25)$ and close to 300\,meV at $(0.5,0)$. The statistics of the data are poor above 300\,meV and additional measurements using RIXS are in progress to complement this neutron scattering study \cite{fatale-rixs}. A magnetic zone-boundary dispersion of at least 50\,meV between $(0.5,0)$ and $(0.75,0.25)$ is found from our measurements. This effect has also been observed in closely related \LCO\ and \SCOC\ compounds and explained in terms of multi-spin exchange \cite{coldea-prl-2001, guarise-prl-2010}. We find that our model accounts well for the \CuA\ dispersion over the entire magnetic Brillouin zone, without any adjustable parameters. Small differences, such as lower calculated spin-wave velocity at the zone center may be accounted for by (i) taking higher-order hopping parameters or (ii) inclusion of \Qb-dependence of the renormalization $Z_c$.
\subsection{Excitations of the weakly-coupled \CuB\ sublattice}
\label{subsub:weak}
\begin{figure}
\centering
\includegraphics[bb=0 0 660 381, clip=,width=0.9\columnwidth]{fig9.eps}
\caption{(a) Measurements of the \CuB\ excitations close to $(0.5,0)$ obtained at 6\,K using $E_i = 14$\,meV. (b) An energy cut at $(0.5,0)$ to show the spin gap of 3.8(2)\,meV at the zone center of \CuB\ excitations. The solid line is a guide to the eye. Data collected using MERLIN.}
\label{fig:9}
\end{figure}
\begin{figure*}
\centering
\includegraphics[bb = 9 98 627 791,clip=, width=0.8\textwidth]{fig5.eps}
\caption{Magnetic excitation of the weakly-coupled \CuB\ sublattice recorded at 6\,K using $E_i = 28$\,meV. Panels (a)--(c) show constant energy slices through the dispersion at energy transfers of 4.5, 12.5 and 19.5 meV, respectively. (d) and (e) show slices as a function of energy transfer along high-symmetry directions. (f) strong dispersion along the \CuB\ magnetic zone boundary between 18 and 20 meV.
Comparative slices from model magnetic spectrum are shown in panels (g)-(l). Calculated magnon-dispersion was convoluted with the instrumental resolution. A renormalization of the \CuA\ modes, discussed in the text, was included in the calculation. Data collected using MERLIN.}
\label{fig:5}
\end{figure*}
Now we turn to the low-energy dynamics of the weakly-coupled \CuB\ sublattice at 6\,K.
Figure~\ref{fig:9} shows high-resolution measurements (FHWM at elastic line of 0.5\,meV) close to the magnetic zone center of the \CuB\ excitations that are able to resolve the spin-gap of 3.8(2)\,meV in the \CuB\ excitations.
The \CuB\ spin-waves emerge from $(0.5,0)$, and equivalent, positions in reciprocal space up to around 19\,meV, as shown in Figs.~\ref{fig:5}(a)-\ref{fig:5}(f). In Figs.~\ref{fig:5}(b) and \ref{fig:5}(d) we observe spin-waves from \CuB\ as well as steeply rising \CuA\ excitations at $(0.5,0.5)$. A strong magnetic zone boundary dispersion is found along $(h,0.25)$ which is shown in Fig.~\ref{fig:5}(f). The experimental results of the low-energy fluctuations are similar to the previously reported inelastic neutron scattering measurements on \SrCu\ but with a bandwidth which is lower than \SrCu\ where excitations extend up to a maximum of 25\,meV \cite{kim-prb-2001}. We do not find evidence of a continuum -- broad scattering above the single-magnon dispersion. However, polarised neutron spectroscopy would be necessary to confirm this.
Further discussions of the magnetic zone boundary are found in Section~\ref{subsec:magn_field}.
Scattering from phonons is observed between 15 and 20 meV, see Section~\ref{sub:temperature}.
To account for the \CuB\ magnetic spectra recorded, we include an additional hopping parameter $t'_3$ in our projected Hubbard model with other parameters fixed to those of \SCOC\ and given in Table~\ref{tab:tU}. It is clear that in the first approximation, setting $t'_1 = t'_2 = t'_3=-0.2$\,eV produces modes that are far too high in energy (see dot-dash line in Fig.~\ref{fig:6}). Instead, we find tuning $t'_3 = -0.086(1)$\,eV is able to reproduce the bandwidth of the low-energy \CuB\ modes. A small exchange anisotropy $\epsilon_{\rm B} = 0.026(2)$ is necessary to account for the spin-gap at $(0.5,0)$.
The simulated slices, equivalent to Figs.~\ref{fig:5}(a)--\ref{fig:5}(f), are shown in Figs.~\ref{fig:5}(g)--\ref{fig:5}(l). The model parameters used in the calculation are found in Table~\ref{tab:tU} and the effective exchange coupling parameters given in Table~\ref{tab:effJ}. We present one of the first simulations that combine SpinW and Tobyfit programs to produce a convolution of the instrumental resolution calculated with the magnon dispersion. An isotropic free Cu$^{2+}$ magnetic form factor is taken for \CuA\ and \CuB\ spins. A renormalization of the calculated dynamic structure factor by charge fluctuations, given by Eq.~(\ref{eq:renorm}), is included in the calculations. Our model is able to account for the salient features of the low-energy magnetic spectrum. However, two discrepancies remain.
First, comparing Figs.~\ref{fig:5}(d) and \ref{fig:5}(j), we find that the calculated \CuA\ modes at $(\pm0.5,0.5)$ are predicted to be more intense than observed experimentally, particularly when comparing the scattering just above the \CuA\ and \CuB\ spin gaps. It is unclear what the origin of this is. For a plate-like sample of \BaCu, we would expect the beam attenuation over the energy transfer range studied to be uniform to within 10\%.
A possible origin of this could be related to the nature of the \CuA\ and \CuB\ electronic orbitals. The isotropic magnetic form factor is only a good approximation at small $|\Qb|$ as the $3d_{x^2 - y^2}$ orbital is anisotropic. Furthermore, since different ions are situated above and below \CuA\ and \CuB, the magnetic form factor need not be the same for the two Cu sites. {In addition, strong Cu-O covalent bonding has been shown to modify the magnetic form factor in such a way that could result in large discrepancies in the intensity \cite{walters-natphys-2009}. In the present case this would affect the \CuA\ but not the \CuB\ sublattice.}
A second discrepancy between measurements and model is along the magnetic zone boundary, shown in Figs.~\ref{fig:5}(f) and \ref{fig:5}(l). Introducing an exchange coupling between \CuA\ and \CuB\ sublattice, $J_{\rm AB} = -10$\,meV gives a small dispersion (see Fig.~\ref{fig:6}) but is clearly insufficient to account for the measured spectrum. The magnetic zone boundary dispersion in \SrCu\ and other realizations of nearest-neighbor $S=1/2$ square-lattice AFMs is now well established as a quantum effect that is not included in our model.
\subsection{Temperature dependence}
\label{sub:temperature}
\begin{figure}
\includegraphics[bb= 9 12 336 670, clip=, width=0.8\columnwidth]{fig7.eps}
\caption{(a)--(c) Temperature dependence of the magnetic spectrum from 6 to 120\,K measured along $(h,0.5)$ wavevector. (d) Constant-energy cuts between 4 and 5\,meV through the magnetic scattering. The solid lines show a Gaussian fit to the lineshapes for each temperature. For clarity, the scans have been displaced vertically. Data collected using MERLIN.}
\label{fig:7}
\end{figure}
Figures~\ref{fig:7} shows the change of the magnetic spectrum between 6 and 120\,K along the $(h,0.5)$ direction. At 6\,K we observe gapped \CuA\ excitations at $(\pm0.5,0.5)$, as expected.
Within the resolution of our TOF measurements (1.5\,meV FWHM at the elastic line) the spin-gap is closed upon warming above the \CuB-sublattice ordering temperature, as shown in Figs.~\ref{fig:7}(b) and \ref{fig:7}(c). Comparing these results to the TAS measurements at $(0.5,0.5,0)$, we find that the \CuA\ spin-gap is 7.1(1)\,meV at 50\,K [Fig.~\ref{fig:23}(b)].
The seemingly contradictory observations of the \CuA\ excitations come from the difference between TAS and TOF measurement techniques. The TOF data presented in Fig.~\ref{fig:7} has an $l$ component which varies with energy transfer, as a result we pick up scattering from the mode close to $(0.5,0.5,1)$.
Therefore, our observations are consistent with the scenario where the out-of-plane modes become either gapless or nearly gapless at $(0.5,0.5,1)$ above \TNB. Increase of intensity below 10\,meV at $4\TNB\approx120$\,K of the \CuA\ modes is consistent with thermally activated scattering of magnons.
We now turn to the temperature dependence of the \CuB\ excitations. At 6\,K we observe clear spin-wave dispersion arising from the \CuB\ sublattice. Above \TNB, we observe \CuB\ correlations but whose spectrum is heavily damped and appears to soften to lower energies. Our results are qualitatively similar to Cu(DCOO)$_2$4D$_2$O (CFTD), which is a good realization of $S=1/2$ square-lattice AFM. In CFTD, a clear broadening of the excitation spectrum was found with increasing temperature \cite{ronnow-prl-2001}. This could then be related to a scaling theory \cite{tyc-prl-1989}. Whilst the current data does not allow for a quantitative comparison with the theory, it could be a potential avenue for further investigation.
Our temperature dependence measurements of the inelastic spectrum also reveal that additional modes between 15 and 20\,meV are most likely to be phononic in origin with no noticeable change in dispersion between 6 and 120\,K. We do not observe any signs of hybridization between spin and lattice degrees of freedom.
\subsection{Magnetic field dependence}
\label{subsec:magn_field}
\begin{figure}
\includegraphics[width=0.8\columnwidth]{fig4.eps}
\caption{Magnetic field dependence of \CuB\ sublattice. Constant-wavevector measurements at high-symmetry points on the magnetic zone boundary, at $(1.25,0,0)$ and $(1.25,0.25,0)$ are shown in panels (a) and (b), respectively. {Error bars on points for base temperature in 0 and 10\,T are smaller than the point size.} A Gaussian lineshape is fitted to the magnetic excitations in panel (a). (c) subtraction of measurements at 0 and 10\,T recorded at base temperature. The solid line characterizes the broadening of the lineshape as discussed in the text. Data collected using IN20.}
\label{fig:4}
\end{figure}
The magnetic zone-boundary in $S=1/2$ square-lattice AFMs displays a number of intriguing quantum effects \cite{kim-prl-1999, kim-prb-2001, ronnow-prl-2001, christensen-pnas-2007, tsyrulin-prl-2009, headings-prl-2010, wang-prb-2012, dallapiazza-nature-2015, babkevich-prl-2016b}. This is reflected on the spectrum in the following ways: (i) the zone-boundary at $(\pi,0)$ is around 8\% lower than at $(\pi/2,\pi/2)$; (ii) half of the single-magnon intensity at $(\pi/2,\pi/2)$ is missing; and (iii) a continuum of intensity is found at $(\pi/2,\pi/2)$.
\footnote{To ease the comparison with theoretical studies concerning the quantum effects along the magnetic zone boundary, we employ the convention where the magnetic Brillouin zone of the \CuB\ sublattice is in units of $2\pi$. In this case $(1.25, 0.25, 0)$ and $(1.25,0,0)$ are equivalent to $(\pi,0)$ and $(\pi/2,\pi/2)$, respectively.}
The first is reproduced by inclusion of quantum fluctuations using series expansion and quantum Monte Carlo methods for $S=1/2$ square lattice AFMs \cite{singh-prb-1995, syljuasen-jpcm-2000, sandvik-prl-2001, zheng-prb-2005, babkevich-prl-2016b}. The zone-boundary dispersion can also be modified by further neighbor interactions, as found for the \CuA\ sublattice in \BaCu\ and other related materials \cite{coldea-prl-2001,guarise-prl-2010, babkevich-prb-2010, dallapiazza-prb-2012, moser-prb-2015}. However, the latter (ii) and (iii) seem to be robust for all realizations of $S=1/2$ square-lattice AFMs studied in sufficient detail thus far. One possible origin of this effect is spinon deconfinement \cite{dallapiazza-nature-2015}, though it may also be a spin-wave interaction effect \cite{powalski-prl-2015}.
In this framework, for sufficiently large magnetic fields, it may be possible to observe the confinement of $\Delta S = 1/2$ spinons into $\Delta S = 1$ spin waves. This is well out of reach for systems such as \LCO\ and \SCOC\ that would require magnetic fields far in excess of what is experimentally possible. In Cu(pz)$_2$(ClO$_4$)$_2$ it was observed that a magnetic field of 14.9\,T, corresponding to $H\approx J$ restores the intensity at $(\pi,0)$ and seems to suppress the continuum \cite{tsyrulin-prl-2009}, which in the spinon scenario would correspond to reconfinement. However, the effects on the magnetic zone boundary are found at fields less than $H\approx J$. To address this effect in \BaCu, we have performed TAS measurements using a 10\,T magnet with field along the crystallographic $c$-axis.
In Figs.~\ref{fig:4}(a) and \ref{fig:4}(b) we show that magnetic scattering along the zone boundary is strongly dispersive -- ranging from 20.12(2)\,meV to 18.52(3)\,meV between $(1.25,0,0)$ and $(1.25,0.25,0)$ in zero applied field at 1.5\,K. We see the expected reduction of intensity at $(1.25,0.25,0)$ compared to $(1.25,0,0)$, which are equivalent to $(\pi,0)$ and $(\pi/2,\pi/2)$, respectively. Hence manifestations of (i) and (ii) of the zone boundary effects are present. However, our data does not reveal a continuum. A broad peak around 21\,meV at $(1.25,0.25,0)$ comes from the phonons, see Section~\ref{sub:temperature}.
On applying a magnetic field of 10\,T, we see very small effects. At the $(1.25,0,0)$ position, the spin waves mode is slightly softened from 20.12(2) to 19.94(2)\,meV. In addition, there appears to be a 5\% sharpening of the mode -- from a FWHM of 1.24(7) in zero field to 1.18(5)\,meV at 10\,T.
There is no discernible shift in energy of the peak at $(1.25,0.25,0)$. The difference plot in Fig.~\ref{fig:4}(c) does reveal a change, which can be modelled as a 12\% sharpening, but which could also reflect a tiny hardening combined with a small decrease in a higher-energy tail.
In interpreting our results, we first reflect on the seemingly missing continuum. Due to the phonon contribution, we would require polarized neutrons to conclusively exclude a continuum.
The following considerations apply if indeed the continuum is missing.
If the continuum is already suppressed in zero field, this would then explain why we also see no significant change upon applying a magnetic field.
The reduction in quantum fluctuations would also impact the size of the ordered moment. Treating the \CuB\ sublattice as a purely nearest-neighbor coupled Heisenberg AFM with an Ising anisotropy of $\epsilon_{\rm B}=0.026$ would result in an ordered moment of 0.73\,\muB, compared to 0.6\,\muB for isotropic exchange coupling. However, our diffraction results that find \CuB\ moment of 0.58(11)\,\muB are not able to reliably distinguish between the two scenarios.
The potential absence of a continuum could provide a promising direction for theoretical studies aiming to uncover the nature of quantum effects and we suggest both spinon and interacting-spin-wave based theories should investigate the effect of adding anisotropy.
\section{Conclusion}
Using neutron diffraction and spectroscopy we have characterized the static and dynamic magnetic properties of \BaCu. Magnetic excitations emerge from interpenetrating laminar sublattices of \CuA\ and \CuB\ spins each of which is arranged on a square-lattice. Low-energy excitations between 3 and 20\,meV originate from the weakly coupled \CuB\ spins and closely resemble the \SrCu\ spectra \cite{kim-prl-1999,kim-prb-2001}. In addition, we track the \CuA-like excitations up to 300\,meV, which have not been previously studied in this family of materials. To characterize the spin dynamics we employ a single-band Hubbard model from which we derive an effective spin Hamiltonian. A suitable parametrization of the magnetic spectrum is found using linear spin-wave theory. Careful analysis of the \CuA\ and \CuB\ spin-gaps provides us with the out-of-plane coupling, the strength of the \CuA\ and \CuB\ coupling as well as the exchange anisotropies. The interpenetrating \CuB\ sublattice is found to be only weakly coupled to the \CuA\ spins. Taking advantage of the recent developments in software, namely SpinW and Tobyfit, we convolute calculated magnon spectra with the instrumental resolution function to obtain an accurate comparison between measurements and theory. Along the magnetic Brillouin zone boundary of weakly-coupled \CuB\ spins we find a significant dispersion that we argue is a quantum effect that is beyond linear spin wave theory. On applying a magnetic field of 10\,T we see a tiny magnon energy shift and sharpening. However, the effects are smaller than expected, which hints that anisotropy could be a useful parameter to tune and better understand this quantum effect.
\begin{acknowledgments}
We wish to thank S. Fatale, M. Grioni, and C. G. Fatuzzo for helpful discussions. We also are grateful to S. Toth for his help with the SpinW calculations. Experiments at the ISIS Pulsed Neutron and Muon Source were supported by a beamtime allocation from the Science and Technology Facilities Council. The study was supported by the Swiss National Science Foundation (SNSF) and its Synergia network Mott Physics Beyond the Heisenberg Model (MPBH).
\end{acknowledgments}
|
1,314,259,994,992 | arxiv |
\section{Background and Related Work}~\label{sec:bg}
In contrast to robots used by manufacturers or specialists,
service robots are close
to people and easy to operate,
providing a variety of services,
such as housekeeping and entertainment~\cite{robot:service:RAS-2013, robot:service:RAS-2017}.
According to ISO standard~\cite{robot:ISO-standard:2012},
a service robot is a class
of robots that ``perform useful tasks for humans or equipment
excluding industrial automation applications''.
A movable autonomous service robot, such as the typical cleaning robot,
has following components: 1) a digital controller
such as Raspberry Pi or Arduino Mega where a control program runs,
2) numerous sensors to sense surroundings, 3) wheels
to move the robot around, and 4) cyber accessories for network connection.
Autonomous service robot puts reliance on the
control program to decide the next move of it in accordance
with sensor values obtained from time to time.
Regarding
sensors installed in a service robot,
they quantitatively measure and report the environmental parameters
the robot is encountering. For example, a distance sensor
tells whether the robot is too close to any obstacle.
Sensors may work in different modes. A sensor working in the proactive
mode alerts the robot control program periodically or in case of emergency
while a sensor working in the passive mode pends the robot control program
to ask for sensor value.
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{./Figures/arch.pdf}
\caption{A Illustration of Autonomous Service Robot and Adversaries}~\label{fig:arch}
\end{figure}
Robots fall into the broad category of
CPS.
One outstanding characteristic of CPS is the vast heterogeneity
of building blocks in different CPS for different usages~\cite{security:CPS:DATE-2017, security:CPS-survey:IEEE-2017, CPS:R2U2:Spinger-2017, CPS:survey:Springer-2018}.
An autonomous service robot is significantly distinct from
typical CPS such as power grids, handheld smartphones or 3D printer~\cite{security:smart-home:ICCAD-2014, security:CPS-fuzzing:DAC-2014, security:smart-grid:ICCAD-2015}.
First, an autonomous service robot is generally a simple system
with an economical micro-controller and a few hardware components including
sensors, actuators, and network modules.~\autoref{fig:arch} shows a classic architecture of
autonomous service robot.
Secondly, autonomous service robots gain worldwide popularity in our daily life.
For example, iRobot\ has sold more than 20 million cleaning robots
since its foundation~\cite{robot:irobot-sale:2018}
while the sales volume of Xiaomi Mi robots has reached
one million in 18 months since its release date~\cite{robot:Mi-sale:2018}.
Assuming that a critical flaw of cleaning robot
is uncovered, a large population of users would
be affected.
Thirdly, unlike CPS that undergo frequent
maintenance services
in subways, hospitals, and power stations~\cite{robot:surgical-robot:2015, security:smart-grid:ICCAD-2015, security:fuzzing-transportation:DAC-2015},
many
service robots are unlikely to be promptly
upgraded with security patches.
To update a large number of robots or do remote attestation for each
of them is also challenging
and costly for a manufacturer.
Finally,
a movable service robot is not stationary like 3D printer or handheld smartphone.
Once compromised, it might be manipluated
to incur physical damages to surrounding people.
\vspace{2ex}
In summary, the demand to study security-related issues
for autonomous service robots is actual and critical.
Recently, researchers have looked into the cyber security of
robots~\cite{robot:service:RHIC-2010, robot:security:IntechOpen-2017, SABALIAUSKAITE2017174, robot:hazards:CoRR-2018}.
The security issues should be
considered in the design phase of a service robot due to the ever-increasing
popularity of service robots and the ever-growing strengths of adversaries.
In this paper, we first proceed at the standpoint of
developers to explore how to reveal as many flaws as possible
for an autonomous service robot.
Then we continue to contemplate cost-efficient methods for detection
and mitigation while retaining the robot's work efficacy.
\section{Threats to Validity}~\label{sec:threats}
In this paper, we focus on protecting movable autonomous service robots. We use \textsc{Robo\-Fuzz}\xspace to
fuzz sensor values that would impact the physical movement of robot.
We leverage the historical records of obstacles to detect fuzzed sensor values
and navigate the robot to retain work efficacy. The limits of our proposals are
twofold. First, they are not directly applicable
to non-movable autonomous robots.
Second, \textsc{Robo\-Fuzz}\xspace fuzzes sensor values which are related to the movement of a robot; therefore,
\textsc{Robo\-Fuzz}\xspace does not cover how
to fuzz values for other types of sensors, e.g., the detectors for dust and water.
The two attack models considered in this paper, i.e., suspension attack and fabrication
attack, are comprehensive and representative. Adversaries have managed to conduct such attacks
to CPS~\cite{fingerprint:CIDS:Security-2016}.
These two attack models target compromising the sensor values and subsequently
misguide the robot control program. However, there exist
other attack models that are not discussed in this paper.
For example, a strong attacker may inject a malware in the control program; consequently,
the attacker does not rely on forging or suspending sensor values to manipulate the robot.
The Shade and Remit schemes which detect and mitigate attacks launched by \textsc{Robo\-Fuzz}\xspace
demand the support of historical records of the environment.
Thus,
if a movable robot is placed in a fresh environment,
or new furnitures are installed in the original environment,
Shade and Remit might not function
effectively as the records of such changed environments have not been fully obtained
yet.
\section{Conclusion}~\label{sec:conclusion}
In this paper, we have considered security threats
for autonomous service robots in order to protect them.
At the standpoint of developers,
we propose \textsc{Robo\-Fuzz}\xspace that automatically
performs directed fuzzing in line
with the normal state transitions of robot and the
environment where the robot works.
By fuzzing sensor values at appropriate occasions,
\textsc{Robo\-Fuzz}\xspace misleads the robot
to a rational but dangerous state so as to compromise it.
Moving even further, we develop Shade and Remit to detect and mitigate attacks
initiated through \textsc{Robo\-Fuzz}\xspace, respectively. Shade and Remit
take advantage of historical records
of obstacles to detect inconsistent obstacle appearances regarding untrustworthy sensor values
and navigate the movable service robot to
continue working in motion.
As a result, we are able to
efficiently detect and mitigate attacks
but also retain the robot's work efficacy, which in turn enhances the security
and stability of autonomous service robot.
Experiments with a real-world cleaning robot show
that, 1) \textsc{Robo\-Fuzz}\xspace dramatically outperforms fuzzing robot control program than state-of-the-art fuzzing tools, with much higher
success rates of compromising the robot,
and 2) Shade and Remit maintain a high work efficacy at the mitigation mode with an insignificant loss.
\section{Attack Detection with Shade}\label{sec:detection}
\textsc{Robo\-Fuzz}\xspace provides a way to initiate successful attacks to an autonomous service robot.
In this section we will consider
how to efficiently detect attacks.
We first investigate possible attack models which, once integrated with \textsc{Robo\-Fuzz}\xspace,
would carry the robot into misbehaving states. Accordingly
we look into three classic detection methods,
and develop a hybrid one with wider coverage, higher accuracy and less overhead.
\subsection{Classic Detection Methods}
\paragraph*{\textbf{Fingerprinting}}
Hardware devices
have their unique physical characteristics~\cite{fingerprint:CIDS:Security-2016},
i.e., {\em fingerprints}, such as
the sensor latency (i.e., response time).
Assuming that attackers
fabricate and send fake sensor values via Wi-Fi,
the sensor latency observed by the control process should
be extraordinary as network latencies are generally
one or two orders of magnitude longer than typical sensor latencies.
Take the ultrasonic distance sensor (HC-SR04) for example.
Its sensor latency mostly falls in a range of 2ms to 12ms.
By contrast, the network latency under TCP and UDP protocols varies between
200ms and 250ms. If the robot control process has learned a sensor's normal latency,
it is able to detect an attack
that delivers sensor values through the network.
Fingerprinting is advantageous with its simplicity and low overhead.
But it has limited usages.
Given a sensor
working in a periodical or proactive mode,
the robot control process cannot measure
its sensor latency for validation.
\paragraph*{\textbf{Cross-reference Validation (CRV)}}
CRV leverages
information from two or more sources to cross-check for verification.
The challenge in using CRV for an autonomous service robot
is that every sensor might be compromised
and
using different sensors for cross-checking is unreliable. Also,
not many sensors are installed in a small service robot
for similar purposes.
CRV must use some information that attackers are unaware of.
Let us still use the distance sensor for example.
A cleaning robot
can make historical records of the positions of stationary obstacles
in a normal working routine.
In fact, some
iRobot Roomba robots draw a map of the space
they have cleaned~\cite{robot:roomba-map:2018}.
Such historical records
can be secured and used as the norm to validate
distance sensor values.
If the distance sensor gives a value that badly
violates historical records,
CRV can indicate the occurrence of an attack.
Compared to fingerprinting, CRV can
detect attacks that compromise sensors working in the proactive mode
since CRV cross-checks by exploiting extra historical
records.
However, CRV requires continually
tracking the robot's motion so as to refer to
the correct record.
Also,
the accuracy of CRV is not very high
because records have been approximately made~\cite{robot:localization-detection:RAS-2018}.
\begin{figure}[t]
\centering
\scalebox{1.00}{\includegraphics[width=\columnwidth]{./Figures/shade2.pdf}}
\caption{An Illustration of Shade with Robot Controller}~\label{fig:shade}
\end{figure}
\paragraph*{\textbf{Network intrusion detection (NID)}}
NID performs an analysis over
the behavior, payload and contents of inbound and outbound network packets~\cite{security:network:SP-2010}.
As attackers remotely attack the robot via network,
NID should be practical.
Given a service robot working in a normal routine,
packets exchanged between it and a legitimate user must
follow a regular pattern and the network
payload should not change largely.
But when attackers undertake to obtain and alter sensor values,
they would bring about unusual network packets,
either in a large
quantity or with abnormal contents.
An independent process monitoring the network traffic
should detect such breaches.
A major drawback of NID is its high cost in computation
and energy. Therefore, in an autonomous service robot powered
by a battery, NID should
be periodically called
for energy-efficiency~\cite{CPS:battery:TDAES-2017}.
Also, NID cannot capture all attacks although they go through
the network interface.
Consider suspension attacks. If
attackers manage to compromise a sensor just at
the first try with few packets, NID
might neglect such an attack.
\subsection{The Design of Shade}
Each of the aforementioned methods has its strengths and limitations.
We have developed a hybrid method called the {\em \underline{sha}dow
\underline{de}tector} (Shade).
Shade acts as a shadow process of the robot control process and closely communicates with
the latter to avoid missing any attack imprint.
~\autoref{fig:shade} illustrates how the Shade process collaborates
with robot control process through inter-process communication.
The control process provides runtime information to Shade,
such as the motion trace, sensor values, sensor latencies, etc.
On the other side, Shade swiftly informs the control process in case of attacks.
\begin{algorithm}[t]
\caption{The Shade Process ({\tt Shade()})}\label{algo:shade}
\begin{algorithmic}[1]
\Require A request for attack detection $p$ with runtime information;\Comment{ $p$ may contain the sensor mode $\mu$, the sensor latency $\lambda$, the current location of robot $\zeta$, etc.}
\Ensure An attack alert $\gamma$ \Comment{ $\gamma$ will be either {\bf True} or {\bf False} }
\If {($p$ is with sensor information)}
\If{($\mu$ is \textit{PASSIVE})} \Comment{Robot actively demands sensor value}
\State \Comment{ Shade calls fingerprinting method with sensor latency}
\State{$\gamma :=$ {\tt fingerprinting\_check($\lambda$)}};
\Else \Comment{ The sensor reports to robot periodically or in emergency}
\State \Comment{ Shade calls CRV first with robot location, then NID}
\State{$\gamma :=$ {\tt CRV\_check($\zeta$)} {\bf Or} {\tt NID\_check()}};
\EndIf
\Else \Comment{ Robot controller queries without sensor information}
\State \Comment{ Shade calls NID method}
\State{$\gamma :=$ {\tt NID\_check()}};
\EndIf
\State \textbf{Return} $\gamma$ to the robot controller process;
\end{algorithmic}
\end{algorithm}
\setlength{\textfloatsep}{0.3cm}
\setlength{\floatsep}{0.3cm}
Shade is a hybrid mechanism of fingerprinting, CRV and NID so as to
achieve wide coverage, high accuracy and low overhead.
Algorithm~\ref{algo:shade} describes the main procedure of Shade.
The robot control process sends a request for attack detection either
in an on-demand or periodical way and the Shade process
returns whether an attack is happening or not.
If Shade receives a request with sensor information
(Lines~1 to 8 in Algorithm~\ref{algo:shade}),
it first determines the working mode
of the sensor. Given a sensor working at a passive mode
with a measurable latency,
Shade prefers the fingerprinting method that
comes with low cost but high accuracy (Lines 2 to 4).
However, as to a sensor working
in a proactive or periodical mode,
Shade calls CRV to validate the sensor value
against historical records (Lines 5 to 8); nevertheless,
due to the accuracy of CRV, Shade may use NID
for double check with a short-circuiting logical \textbf{Or} operator (Line 7).
Moreover, the robot control process may ask Shade without any sensor
information. For example, the control process can consult Shade every five
seconds. In this case, Shade needs to execute NID that finds out abnormal
network traffics (Lines 9 to 11).
In the end, Shade timely notifies the robot control process with a detection
result (Line 13).
Shade can detect various attacks and it
is beyond just integrating three methods in one process. First, Shade
explores the {\em context} provided by the robot control process for attack detection.
Generic NID can also detect the most attacks
but with high cost for self-learning and frequent computations. Shade, however,
gains legitimate network behaviors shared by the robot control process,
which surely entails higher accuracy and less overhead. Second, Shade considers
the pros and cons of three methods and complement them for wider coverage. Like
at Line~7 of Algorithm~\ref{algo:shade}, Shade makes NID recheck
if CRV generates a false result because of the latter's accuracy.
\subsection{Detection Results of Shade}
We compared Shade to fingerprinting, CRV and NID methods.
We used \textsc{Robo\-Fuzz}\xspace to initiate attacks in line with
two aforementioned attack models, i.e., suspension and fabrication attacks.
For each attack model,
a detection method
underwent ten trials of attacks. So in all we performed $2 \times 4 \times 10 = 80$ trials
regarding the composition of detection methods and attack models.
Every trial was triggered at the startup of the robot, which means
the robot is at the top-right corner as shown in Figure~\autoref{fig:case}.
We did so because an attack at the very beginning may
incur the most challenges for a detection method, especially when the
sensor works at a proactive mode reporting boolean values.
We use two metrics to evaluate the effect of detection.
One is the number of trials that a detection method successfully detected
under an attack model. The other one is the average reaction time of ten trials
for a detection method under each attack model.
~\autoref{tab:detect} summarizes the results collected in 80 trials.
Shade has successfully detected all trials
while
the limitations of other three methods are evident. For example,
fingerprinting is competent only when the sensor works in the
passive mode because the sensor latency is measurable.
NID is not suitable for a suspension attack
as such an attack model manages to suspend the sensor at the first attempt,
which hardly leaves any hint for NID to take effect.
Comparatively, Shade, as a hybrid detection method that
closely collaborates with the robot control
program, is not hindered by the working mode of sensors or attack models.
A notable observation revealed by~\autoref{tab:detect}
is that the average reaction time of Shade
is much shorter or comparable than other detection
methods. For suspension attacks,
CRV could detect them as well.
Given a suspension attack initiated at the startup of robot,
only when the robot reached the safe distance (20cm)
would CRV find that the sensor did not raise a `True' warning.
This is why the average reaction time for CRV and Shade
is about 27s.
For fabrication attacks, fingerprinting could instantly detect
them. Meanwhile, the reaction
time of CRV is much shorter for fabrication
attack model than two preceding attack models. It is
because of
the passive sensor mode with fabrication attacks.
Once the robot control program obtains a
sensor value,
it asks CRV to check the numeric distance, which facilitates CRV compared to
boolean values used in the preceding two attack models.
\begin{figure}[t]
\centering
\subfigure[Cleaned Distance]{\includegraphics[width=0.48\columnwidth]{./Figures/mdist3.pdf}~\label{fig:mdist}}
\hfill
\subfigure[Running time]{\includegraphics[width=0.48\columnwidth]{./Figures/mtime3.pdf}~\label{fig:mtime}}
\caption{A Comparison between Remit with Attacks and Normal Routine}
\label{fig:mit}
\end{figure}
The default position to initiate an attack is when the robot starts up, so the attack is issued at a distance of 200cm to the obstacle. To further verify the efficiency of Shade, we did more tests when
\textsc{Robo\-Fuzz}\xspace triggered attacks at eight different distances to the left wall.
~\autoref{fig:range} captures four curves of reaction time for Shade.
In particular, given an attack occurring at a very short distance to the wall,
say 25cm in~\autoref{fig:range}, Shade manages to detect it
at 0.6s to 3s, which efficiently protects the robot from security threats.
\subsection{Mitigation Results of Remit}
We have also done experiments to evaluate Remit.
The measurement of its effectiveness is the distance cleaned by the robot while its
efficiency is measured in terms of running time to clean the use case in Figure~\autoref{fig:case}.
We first made the robot clean the use case in a normal routine
without any attack and recorded the cleaned distance as well as running time.
Then, we
ran Remit with the robot under attacks.
Figure~\ref{fig:mdist} and Figure~\ref{fig:mtime} present the
results of cleaned distance and running time, respectively, for the normal routine and Remit.
Since Remit leverages the historical records maintained by Shade for cross-checking,
it can navigate the robot although the distance sensor is no longer reliable.
Owing to the accuracy limitation of records in navigation, Remit made losses of
3.3\% and 4.9\%, respectively, with two attack models. The overall loss
is 4.1\%. Such insignificant losses confirm the effectiveness of Remit.
On the other hand, after the robot entered the mitigation mode, Remit reduced the
velocity of robot by 10\%. Though, as the robot cleaned 4.1\% less distances
under attacks, the total running time at the mitigation mode is eventually
5.0\% more than that of the normal routine.
To sum up, Remit not only accomplishes scheduled tasks but also
restricts extra time cost to an acceptable extent.
\section{Evaluation}~\label{sec:evaluation}
In this section, we would evaluate \textsc{Robo\-Fuzz}\xspace, Shade, and Remit to
answer following questions.
\begin{enumerate}[label={\arabic*})]
\item Does \textsc{Robo\-Fuzz}\xspace manage to compromise an autonomous service robot? Compared to other fuzzing approaches,
does \textsc{Robo\-Fuzz}\xspace embrace a higher success rate?
\item Is Shade able to detect most of the attacks initiated through \textsc{Robo\-Fuzz}\xspace?
\item Can Remit retain the work efficacy of service robot when mitigating the attacks detected by Shade?
\end{enumerate}
In brief,
we first present experimental setup and evaluation results regarding
the competence of \textsc{Robo\-Fuzz}\xspace in compromising a real-world
cleaning robot with two attack targets.
Then we test Shade and Remit to show their effectiveness in detecting two attack models
and retaining the work efficacy of robot.
\subsection{Evaluation Setup}
\paragraph*{Platform}
We use the aforementioned iRobot Create 2~\cite{robot:irobot-create2:2015}
as the platform for evaluation.
We have prepared a control program in Python 3 that runs in the Raspberry Pi 3 Model B+.
The default velocity of the robot is set to be 50mm/s.
The path planning of the robot follows the classic zigzag fashion.
The main sensor used for the path planning is
an ultrasonic distance sensor (HC-SR04) installed
in front of the robot. The sensor
can detect an obstacle from 2cm to 400cm.
As mentioned in Section~\ref{sec:bg},
in the robot control program, we configure
the sensor to work in different modes to suit different attack models.
In the passive sensor mode, the control program asks for the sensor value.
In the proactive sensor mode, the sensor warns the control process
if an obstacle is nearby or periodically.
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{./Figures/impl6.pdf}
\caption{An Illustration of Initiating an Attack}~\label{fig:impl}
\end{figure}
As to the attacker side,.
we have implemented \textsc{Robo\-Fuzz}\xspace with two attack models (cf. Section~\ref{sec:model}).
In light of the analyses in Section~\ref{sec:model},
we set the sensor mode to be passive
for fabrication attack model.
For the suspension attack model, we choose
the proactive mode.
\paragraph*{Implementation}
In order to manipulate
the iRobot\ Create 2, we make attack programs in
a computer
with Ubuntu 18.04.
We first exploit the attack vector of WiFi interface of Raspberry Pi
as the attack surface to invade the robot.
Today many users still use default or simple
passwords
or their credentials are stored
in plain text~\cite{robot:industrial-security:SP-2017,robot:Mi-hack:2018}.
For a Raspberry Pi with Raspbian, its default username/password
are `pi/raspberry'. After we successfully access the robot,
we start to compromise it.
Figure~\ref{fig:impl} exemplifies the process of altering one
sensor value. As adversaries, we fetch the sensor value $v$
through network (\circled{1} in Figure~\ref{fig:impl}), and
then alter it to be $\gamma$ via the function $f$
(\circled{2} in Figure~\ref{fig:impl}).
After sending back $\gamma$ and replacing $v$ (\circled{3} and
\circled{4} in Figure~\ref{fig:impl}), the robot control program would
proceed with $\gamma$ instead of $v$.
At runtime, \textsc{Robo\-Fuzz}\xspace frequently reads $v$, but only when it
perceives an appropriate opportunity, like the robot approaching
a wall, will it call $f$ and send faked $\gamma$ back to mislead
the robot control program.
\paragraph*{Configuration}
We have made two scenarios to test \textsc{Robo\-Fuzz}\xspace, Shade and Remit, respectively.
Figure~\ref{fig:room} shows the scenario we have used for testing \textsc{Robo\-Fuzz}\xspace. It
has two rooms that are connected by an automatic sliding door.
The cleaning robot needs to clean both rooms
starting from the top-left corner. When
the robot is working, we
try to compromise it using three fuzzing methods with two attack targets:
1) to damage the robot by crashing it
to a hard obstacle (e.g., wall or cabinet), and 2) to reduce the robot's
work efficacy by preventing it from entering and cleaning the right room.
As to three fuzzing methods, the first one is Radamsa fuzzing sensor values
for the control program,
the second one is random fuzzing that
initiates an attack at a random time with a hazardous
alteration of sensor values (e.g.,
changing $v$ of 10cm to $\gamma$ of 60cm), and the third one is \textsc{Robo\-Fuzz}\xspace.
For both attack targets, we conducted 30 trials for a fuzzing method.
We define the success rate as the fraction of the number of successful attacks to
30 trials in percentage.
We note that besides Shade,
the robot control program can
rule out anomalous distance sensor values, e.g., ones that fluctuate greatly,
and subsequently reset the sensor.
\begin{figure}[t]
\centering
\subfigure[The Use Case Scenario for Testing \textsc{Robo\-Fuzz}\xspace]{\includegraphics[width=0.9\columnwidth]{./Figures/room.pdf}~\label{fig:room}}
\vfill
\centering
\subfigure[The Use Case Scenario for Testing Shade and Remit]{\scalebox{1}{\includegraphics[width=1\columnwidth]{./Figures/case.pdf}}~\label{fig:case}}
\caption{An Illustration of Use Case Scenarios for Evaluation}
\label{fig:scenarios}
\end{figure}
Figure~\ref{fig:case} captures the scenario we would use to test Shade and Remit.
The reason why we evaluate them in a scenario different from Figure~\ref{fig:room}
is that we need a quantitative presentation to measure the work efficacy of robot
in case of attacks. As mentioned,
we use the quantitative success rate to show the effectiveness of \textsc{Robo\-Fuzz}\xspace.
On the other hand,
the width of the room in Figure~\ref{fig:case} is 200cm that falls into the range of
HC-SR04 ($\leq$400cm);
the cleaning robot would cruise in the room, so we can record the exact distance
a robot cleans with and without attacks.
To avoid physically crashing the robot
into walls or obstacles due to attacks,
we set a safe distance to be 20cm.
In other words,
without any attack,
when the distance to a wall or obstacle drops below 20cm,
the robot should stop moving and turn left or right; however,
on a successful attack, the robot spins itself in front of
an obstacle to indicate that it is being attacked
instead of really colliding with the obstacle.
Concerning the safe distance and the diameter of robot,
the robot would clean an estimate of 570cm overall in the room of Figure~\ref{fig:case}.
In addition, for the use of fingerprinting and CRV, we
have run the robot without any attack to collect
sensor latencies and historical records of obstacles.
\clearpage
\subsection{Target 1 for \textsc{Robo\-Fuzz}\xspace: Damaging the Robot}~\label{sec:scenario1}
In order to damage the robot, fuzzing methods must let the robot crash into a fixed
obstacle in Figure~\ref{fig:room}.
Note that the robot was not really damaged in trials
but would play a special sound
to indicate
it was enforced to be
within 5cm to an obstacle.
~\autoref{tab:t1} shows the number of successful attacks out of overall 30 trials for
three fuzzing methods when they tried to achieve the target of damaging the robot.
Radamsa failed in all trials
because the robot control process certainly
refused sensor values fuzzed by it
as they evidently deviate from normal sensor values expected
in the environment shown in Figure~\ref{fig:room}.
As to random fuzzing, with regards to multiple stationary walls and furnitures
in Figure~\ref{fig:room}, if it launched an attack at a moment when, though being randomly picked,
the robot was approaching closely to any wall or furniture,
the fuzzed sensor values might make the robot hit the obstacle and in turn
attain the attack target. Whereas, since random fuzzing acts based on randomization,
the success rate is low as confirmed by the experimental results (5 out of 30 trials).
\begin{table}[b]
\centering
\caption{The number of successful trials and success rates of three fuzzing methods to achieve the 1st target}\label{tab:t1}
\resizebox{\linewidth}{!}{
\begin{tabular} {|c|c|c|c|}
\hline
\rule{0pt}{5pt} Fuzzing Method & Radamsa & Random fuzzing & \textsc{Robo\-Fuzz}\xspace \\
\hline\hline
The number of successful trials & \multirow{1}{*}{ 0} & \multirow{1}{*}{ 5} & \multirow{1}{*}{ 30} \\ \hline
\multirow{1}{*}{ Success rate} & \multirow{1}{*}{ 0\%} & \multirow{1}{*}{ 16.7\%} &
\multirow{1}{*}{ 100\%} \\ \hline
\end{tabular}
}
\end{table}
\normalsize
\begin{figure}[t]
\centering
\subfigure[Robot moving towards a wall without any attack]{\includegraphics[width=0.475\columnwidth]{./Figures/fuzz/normalwall.pdf}~\label{fig:nwall}}
\hfill
\subfigure[Robot deceived with the appearance of dynamic obstacle]{\includegraphics[width=0.475\columnwidth]{./Figures/fuzz/fakewall.pdf}~\label{fig:fwall}}
\caption{A Comparison between Distance Sensor Values with and without \textsc{Robo\-Fuzz}\xspace when damaging the robot}\label{fig:rt-wall}
\end{figure}
\begin{figure}[t]
\centering
\subfigure[Robot moving towards automatic sliding door]{\includegraphics[width=0.475\columnwidth]{./Figures/fuzz/normaldoor.pdf}~\label{fig:ndoor}}
\hfill
\subfigure[Robot stopped with auto sliding door changed to be an immovable wall]{\includegraphics[width=0.475\columnwidth]{./Figures/fuzz/fakedoor.pdf}~\label{fig:fdoor}}
\caption{A Comparison between Distance Sensor Values with and without \textsc{Robo\-Fuzz}\xspace when prematurely stopping the robot}\label{fig:rt-door}
\end{figure}
On the other hand,
\textsc{Robo\-Fuzz}\xspace successfully damaged the robot in all 30 trials. Because
\textsc{Robo\-Fuzz}\xspace continued to observe the environment and monitor the state of robot,
at a proper occasion, it would generate sensor values that
brought the robot from the curve in Figure~\ref{fig:wall} to
the one in Figure~\ref{fig:away}.
For a thorough comparison,
we have collected distance sensor values in a normal
routine without any attack and
when \textsc{Robo\-Fuzz}\xspace took effect in one trial.
Figure~\ref{fig:nwall} indicates that the sensor values from the normal routine
well fit in a decreasing linear curve.
On the other hand,
in Figure~\ref{fig:fwall}, the solid linear curve links genuine sensor values
before the attack initiated by \textsc{Robo\-Fuzz}\xspace
and the dashed line fits sensor values that impaired the robot.
The two diagrams in Figure~\ref{fig:rt-wall}
clearly verify the capability of \textsc{Robo\-Fuzz}\xspace.
\subsection{Target 2 for \textsc{Robo\-Fuzz}\xspace: Reducing the Work Efficacy of Robot}~\label{sec:scenario2}
To reduce the work efficacy of the cleaning robot,
we called three fuzzing methods to
hinder the robot from tidying the right room.
In other words,
after the robot finished cleaning up the left room, the robot
should not cross the automatic sliding door due to attacks.
~\autoref{tab:t2} shows that \textsc{Robo\-Fuzz}\xspace achieves a success
rate of 93.3\% while the rates for Radamsa and random fuzzing are still low.
Note that the success rates for both random fuzzing and \textsc{Robo\-Fuzz}\xspace drop compared to that with
the first target. The reason is, on damaging the robot,
both fuzzing methods could find a number of static obstacles to leverage,
but there is only one automatic sliding door connecting two rooms.
Even so, \textsc{Robo\-Fuzz}\xspace managed to sense the existence of automatic sliding door, and
successfully changed sensor values in the most trials (28 out of 30)
to be decreasing ones that emulated the door as an immovable wall.
We again tracked sensor values when the robot was going through
the sliding door without attack (cf. Figure~\ref{fig:ndoor}). Also in one successful
trial, we recorded sensor values the
control process received
before and after \textsc{Robo\-Fuzz}\xspace launched the attack (cf. Figure~\ref{fig:fdoor}).
As observed in Figure~\ref{fig:rt-door}, after 12s,
\textsc{Robo\-Fuzz}\xspace effectively deceived the robot which subsequently
stopped in front of the automatic sliding door.
\begin{table}[b]
\centering
\caption{The number of successful trials and success rates of three fuzzing methods to achieve the 2nd target}\label{tab:t2}
\resizebox{\linewidth}{!}{
\begin{tabular} {|c|c|c|c|}
\hline
\rule{0pt}{5pt} Fuzzing Method & Radamsa & Random fuzzing & \textsc{Robo\-Fuzz}\xspace \\
\hline\hline
The number of successful trials & \multirow{1}{*}{ 0} & \multirow{1}{*}{3} & \multirow{1}{*}{ 28} \\ \hline
\multirow{1}{*}{ Success rate} & \multirow{1}{*}{ 0\%} & \multirow{1}{*}{ 10.0\%} & \multirow{1}{*}{ 93.3\%} \\ \hline
\end{tabular}
}
\end{table}
\normalsize
\section{\textsc{Robo\-Fuzz}\xspace for Autonomous Service Robot}~\label{sec:fuzzing}
In this section, we first
model the state transitions of autonomous service robot and
explain the feasibility and procedure of \textsc{Robo\-Fuzz}\xspace through state composition
(cf. Section~\ref{sec:composition}).
Then
we model \textsc{Robo\-Fuzz}\xspace, a systematic scheme that effectively
damages autonomous service robot by fuzzing sensor values
(cf. Section~\ref{sec:robofuzz}).
\subsection{State Compositions of \textsc{Robo\-Fuzz}\xspace}~\label{sec:composition}
An autonomous service robot can be
modeled as a finite state machine (FSM). The upper-left
part of Figure~\ref{fig:composition} captures a segment of a simplified FSM
for a cleaning robot. This segment applies to all four
scenarios mentioned in Section~\ref{sec:mot} as it
shows how the cleaning robot proceeds on meeting
an obstacle that can be either fixed or movable.
Meanwhile, as developers of the robot,
we maintain the FSM (cf. Figure~\ref{fig:composition}) and
continuously observe the environmental parameters from time to time.
The outcome of \textsc{Robo\-Fuzz}\xspace hence can be viewed as a {\em composition} of two FSMs
(\circled{1} in Figure~\ref{fig:composition}). In particular,
once \textsc{Robo\-Fuzz}\xspace notices a significant change of an
environmental parameter that is to incur a state transition,
like the distance to an obstacle decreasing to be very small,
\textsc{Robo\-Fuzz}\xspace will fabricate a series of rational sensor values
and feed them to the robot control program to make an illusion (\circled{2}
in Figure~\ref{fig:composition}), e.g., the obstacle moving away.
By doing so, \textsc{Robo\-Fuzz}\xspace misleads the robot into
the FSM intended by \textsc{Robo\-Fuzz}\xspace, which, however, the robot control program will not
be aware of. Eventually the robot is supposed to be
wrecked because of hitting the obstacle (\circled{3} in Figure~\ref{fig:composition}).
We note that the main purpose of \textsc{Robo\-Fuzz}\xspace
is to unveil the vulnerability of robot control program
and in turn compromise the robot
through fuzzing sensor values.
\textsc{Robo\-Fuzz}\xspace is an automated procedure.
It keeps monitoring the states of robot and environmental parameters. At a proper occasion, it activates
the state composition with faked but rational sensor values
to deceive the robot control program.
\begin{figure}[t]
\centering
\scalebox{1.00}{\includegraphics[width=\columnwidth]{./Figures/newcomp2.pdf}}
\caption{An Illustration of State Composition of \textsc{Robo\-Fuzz}\xspace}~\label{fig:composition}
\end{figure}
\subsection{Fuzzing Autonomous Service Robots with \textsc{Robo\-Fuzz}\xspace}~\label{sec:robofuzz}
How \textsc{Robo\-Fuzz}\xspace compromises an autonomous service robot is modeled as follows.
Because \textsc{Robo\-Fuzz}\xspace works in line with
the state of an autonomous service robot and the environment,
it falls into the category of {\em directed fuzzing}.
Directed fuzzing starts off with a given target, such as damaging the robot or
reducing the robot's work efficacy.
Let these targets form a set,
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation*}
\boldsymbol {T} = \{\tau_0, \tau_1, ..., \tau_i, ..., \tau_{n-1}\},
\end{equation*}
where $\tau_i$ ($0\leq i < n$) is one independent target, e.g.,
to damage the robot, and the value of $n$ depends on the intention of adversaries.
Before fuzzing, we, at the standpoint of adversaries,
assume that
the physical states of the robot monitored at runtime form a set, i.e.,
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation*}
\boldsymbol {Z} = \{\zeta_0, \zeta_1, ..., \zeta_k, ..., \zeta_{p - 1}\}.
\end{equation*}
We also assume a thorough understanding of the robot, particularly
all the components embodied
in the robot, say,
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation*}
\boldsymbol {C} = \{s_0, s_1, ..., s_{l-1}, a_0, a_1, ..., a_{m - 1}\},
\end{equation*}
in which there exist all $l$ sensors
and $m$ actuators.
\textsc{Robo\-Fuzz}\xspace relies on the $l$ sensors to spot the environment.
In addition,
\textsc{Robo\-Fuzz}\xspace can also utilize actuators
for a target
although we use sensors for illustration
in preceding sections, e.g., by driving wheels faster than usual
towards an obstacle.
To attain a specific target,
\textsc{Robo\-Fuzz}\xspace must formulate 1) what states and environmental parameters should be
monitored,
2) which sensors and actuators in $\boldsymbol {C}$ are useful
for the target,
and 3) when (i.e., the aforementioned `appropriate' occasion)
and how to alter sensor values or actuator commands
for a detrimental state transition (e.g., transiting
between different curves shown in Figure~\ref{fig:states}).
Hence,
for a target $\tau_i$, we need
1) a subset of $\boldsymbol {Z}$, i.e., $\boldsymbol {Z}_i$, which subsumes
states that are useful for $\tau_i$,
2) a subset of $\boldsymbol {C}$, say, $\boldsymbol {C}_i$, which is a list
of essential sensors and actuators for $\tau_i$, and
3) a set $\boldsymbol {V}_i$ in which each element
includes a tuple for the $j$-th ($0 \leq j < |\boldsymbol {C}_i|$)
sensor or actuator in $\boldsymbol {C}_i$, i.e.,
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation*}
\left \langle v^{(i)}_{j}, \gamma^{(i)}_{j}, f^{(i)}_{j} \right \rangle.
\end{equation*}
$v^{(i)}_{j}$ is a normal sensor value/actuator command
while $\gamma^{(i)}_{j}$
is a fuzzed sensor value/actuator command. For instance,
$v^{(i)}_{j}$ and $\gamma^{(i)}_{j}$ fall into the range of $[2, 400]$ (cm)
for an HC-SR04 ultrasonic distance sensor.
Note that both of them
can also be
a special value $\varnothing$ which stands for the non-existence
of sensor value/actuator command. $\varnothing$ is useful when there ought to be
no sensor value/actuator command or adversaries intentionally drop a sensor
value/actuator command.
The third element in the tuple,
i.e., $f^{(i)}_{j}$, is a function,
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation}\label{eq:f}
f^{(i)}_{j}: \boldsymbol {Z}_i \times \mathit{Dom} \left (v^{(i)}_{j} \right ) \rightarrow Dom \left ( \gamma^{(i)}_{j} \right),
\end{equation}
where $\mathit{Dom}(x)$ means the domain of $x$.
Assuming that the robot is at a state
$\zeta \in \boldsymbol {Z}_i$ (e.g., moving forward)
and one or multiple environmental parameters
are to change,
like when the distance to obstacles, i.e., $v^{(i)}_{j}$, is going to decrease to be 6cm,
$f^{(i)}_{j}$ alters $v^{(i)}_{j}$ to $\gamma^{(i)}_{j}$,
say, from 6cm to 60cm (i.e., making a fixed obstacle `move').
$f^{(i)}_{j}$ hence converts
a normal sensor value/actuator command or $\varnothing$
to be a still rational but harmful value.
Also
it may
replace a sensor value/actuator command with $\varnothing$ to
hinder the robot control process from interacting with corresponding sensors/actuators.
$f^{(i)}_{j}$ keeps affecting the robot control process until the
achievement of target $\tau_i$.
Finally, we capture a successful fuzzing procedure for target $\tau_i$
as:
\setlength{\belowdisplayskip}{3pt}\setlength{\belowdisplayshortskip}{3pt}\setlength{\abovedisplayskip}{3pt}\setlength{\abovedisplayshortskip}{3pt}
\begin{equation}\label{eq:entail}
\begin{aligned}[b]
\boldsymbol G_i \vDash \tau_i,
\end{aligned}
\end{equation}
in which $\boldsymbol G_i$ is defined as
\begin{equation}\label{eq:G}
\begin{split}
\boldsymbol G_i = \bigcup_{\zeta \in \boldsymbol {Z}_i} \left \{\langle v^{(i)}, \gamma^{(i)}, f^{(i)} \rangle\ |\ \langle v^{(i)}, \gamma^{(i)}, f^{(i)} \rangle \in \boldsymbol {V}_i \right. \\ \left.\ \land\ \mathit{Dom} \left (f^{(i)} \right ) = \zeta \times \mathit{Dom} \left (v^{(i)} \right ) \right \}.
\end{split}
\end{equation}
$\boldsymbol G_i$ means that, for every state $\zeta \in \boldsymbol {Z}_i$,
\textsc{Robo\-Fuzz}\xspace discovers all tuples related
to $\zeta$ and calls the respective function $f^{(i)}$ to
fabricate and/or drop
one or multiple sensor values and/or actuator commands for the success of $\tau_i$.
\begin{algorithm}[t]
\caption{The $\boldsymbol G_i$ for a Distance Sensor}\label{algo:Gi}
\begin{algorithmic}[1]
\Require The target $\tau_i$ for fuzzing
\Ensure $\gamma^{(i)}_{j}$ for the distance sensor $s_i$;
\While {(the robot is working)}
\State Get the current state $\zeta_k$, and sensor value $v^{(i)}_{j}$;
\If {($\tau_i$ is to crash the robot)}
\If {($v^{(i)}_{j}$ is decreasing)} \Comment{Approaching an obstacle}
\If {($v^{(i)}_{j}$ gradually decreasing)}
\State \Comment{Figure~\ref{fig:wall} $\Rightarrow$ Figure~\ref{fig:away}}
\State When $v^{(i)}_{j}$ is small enough, e.g., $v^{(i)}_{j} < 20$cm,
\indent\indent\indent $v^{(i)}_{j} \xrightarrow{f^{(i)}} \gamma^{(i)}_{j}$
($\gamma^{(i)}_{j}$ continues to increase);
\ElsIf {($v^{(i)}_{j}$ decreasing more sharply)}
\State \Comment{Figure~\ref{fig:face} $\Rightarrow$ Figure~\ref{fig:away}}
\State $v^{(i)}_{j} \xrightarrow{f^{(i)}} \gamma^{(i)}_{j}$ ($\gamma^{(i)}_{j}$ no longer decreases but
\indent\indent\indent gradually increases);
\EndIf
\ElsIf{($\tau_i$ is to reduce the robot's work efficacy)}
\If {($v^{(i)}_{j}$ increases and continue increasing)}
\State \Comment{Figure~\ref{fig:away} $\Rightarrow$ Figure~\ref{fig:wall}}
\State $v^{(i)}_{j} \xrightarrow{f^{(i)}} \gamma^{(i)}_{j}$ ($\gamma^{(i)}_{j}$ continues to decrease);
\ElsIf {($v^{(i)}_{j}$ suddenly increases but then drops)}
\State \Comment{Figure~\ref{fig:slide} $\Rightarrow$ Figure~\ref{fig:wall}}
\State When $v^{(i)}_{j}$ suddenly increase,
$v^{(i)}_{j} \xrightarrow{f^{(i)}} \gamma^{(i)}_{j}$
\indent\indent\indent($\gamma^{(i)}_{j}$ continues to decrease);
\EndIf
\EndIf
\EndIf
\EndWhile
\State \textbf{Return} $\gamma^{(i)}_{j}$ to replace $v^{(i)}_{j}$ for $\tau_i$;
\end{algorithmic}
\end{algorithm}
\setlength{\textfloatsep}{0.3cm}
\setlength{\floatsep}{0.3cm}
\paragraph*{Implementing $G_i$}
The implementation of $\boldsymbol G_i$ is based on the rationale discussed in the preceding section (cf. Section~\ref{sec:mot}).
Algorithm~\ref{algo:Gi} shows the implementation of $G_i$ for a distance sensor
while
the target $\tau_i$ is either to crash the robot or reduce the robot's
work efficacy.
\textsc{Robo\-Fuzz}\xspace continuously tracks running states of an autonomous service robot
and waits for a proper time to fuzz the robot ((Lines 1 to 2 in Algorithm~\ref{algo:Gi})).
For instance, when the sensor value $v^{(i)}_{j}$ is gradually decreasing (Line 4), \textsc{Robo\-Fuzz}\xspace realizes that there is a fixed obstacle ahead.
Therefore, to crash the robot ($\tau_i$ at Line 3),
the $G_i$ function would generate sensor values, i.e.,
$\gamma^{(i)}_{j}$, which continue increasing to resemble a leaving obstacle (Line 7).
By doing so, \textsc{Robo\-Fuzz}\xspace aims to use faked sensor values to change the scenario shown by Figure~\ref{fig:wall} to the one in Figure~\ref{fig:away}.
Algorithms~\ref{algo:Gi} also shows how to convert scenarios for other types of obstacles (
Lines 8 to 10, Lines 12 to 15, and Lines 16 to 18).
\section{Introduction}\label{sec:introduction}}
\IEEEPARstart{T}{his} demo file is intended to serve as a ``starter file''
for IEEE Computer Society journal papers produced under \LaTeX\ using
IEEEtran.cls version 1.8b and later.
I wish you the best of success.
\hfill mds
\hfill August 26, 2015
\subsection{Subsection Heading Here}
Subsection text here.
\subsubsection{Subsubsection Heading Here}
Subsubsection text here.
\section{Conclusion}
The conclusion goes here.
\appendices
\section{Proof of the First Zonklar Equation}
Appendix one text goes here.
\section{}
Appendix two text goes here.
\ifCLASSOPTIONcompsoc
\section*{Acknowledgments}
\else
\section*{Acknowledgment}
\fi
The authors would like to thank...
\ifCLASSOPTIONcaptionsoff
\newpage
\fi
\section{Introduction}
Autonomous service robots are widely used to
not only relieve people from dirty,
monotonous, and dull tasks, but also reduce economic costs~\cite{Fiorini2000, 1242021, robot:service:RAS-2013, robot:service:RAS-2017, 8793764}.
For example, cleaning robots gain wide popularity in
tidying private apartments and public places.
Seoul-Incheon and Singapore Changi airports have deployed
cleaning robots to replace human cleaners and the latter should
save 20\% housekeeping manpowers~\cite{robot:LG:2017, robot:changi-T4:Aug-2017}.
Since autonomous service robots are sharing social spaces with humans at home,
in the offices and even in critical infrastructures like
airports and banks, their security and safety are of paramount importance,
especially concerning they are autonomous without human attendance.
Robotics are generally categorized as
cyber-physical systems (CPS).
A robot
typically has
1) a digital controller, e.g., Raspberry Pi, to manage the system,
2) physical components, such as sensors and actuators, to sense the surrounding
environment (e.g., distance) and to manipulate physical entities (e.g., wheels and
robotic arms), respectively, and 3)
cyber components that connect the robot to
networks (e.g., for remote control via smartphones).
The robot control program running in the controller
is critical to the security and safety of a robot as it decides how to
manoeuvre actuators of the robot on reading sensor values.
A number of studies have
revealed that it is possible
to compromise a CPS through fraudulent sensor values, while
mitigating such attacks usually requires
the involvement of a cloud server for remote computation or attestation~\cite{security:power-grid:CCS-2009,security:stealthy-attacks:CCS-2016,security:CPS:DATE-2017,security:CPS-sensor:DAC-2017,security:Orpheus:ACSAC-2017}.
However, such methods are not applicable to autonomous service robot. The reason is threefold.
Firstly, the computational resource and battery capacity
are relatively limited for an economical autonomous service robot compared to large CPS, say, a power grid.
Secondly, the vast popularity of autonomous service robots imposes overwhelming difficulty
on security patches or remote attestation from time to time.
Thirdly, many autonomous service robots move themselves to
complete planned tasks, which differentiates them from stationary CPS like power grid or 3D printer and
necessitates a mitigation method that replenishes the movement of autonomous service robot.
As a result, it is preferable and practical to secure an autonomous service robot as early as at its design and implementation stage.
In this paper, we work at the standpoint of developers
to enhance the security and safety
of autonomous service robots, particularly ones that are movable
because they would be physically detrimental to human beings once compromised.
We would proceed at two dimensions. On one hand,
we attempt to systematically scrutinize
the security threats to autonomous service robot through investigating
the values of critical sensors, since
these sensor values, as inputs to the robot control program, determine
the next states of robot. On the other hand, with regard to the uncovered threats,
we develop an efficient algorithm to mitigate their impacts while retaining most of
the robot's work efficacy.
Without loss of generality, we illustrate with
an autonomous service robot cruising
by means of an ultrasonic distance sensor to avoid obstacles.
Once the distance sensor indicates a close obstacle ahead,
the robot control program ought to direct the robot to turn left or right.
Otherwise, the robot would
crash into the obstacle. As a result,
altering the sensor value to be malicious for the robot control program
would inflict serious threats to the robot.
We hence employ the idea of software {\em fuzzing} to test the robot control program.
In software testing, fuzzing is used to
identify security vulnerabilities or bugs in a program
by subjecting the program to various kinds of input,
and the program may crash or yield absurd outputs~\cite{fuzzing:book-2008, fuzz:radamsa, security:fuzzing:CCS-2017}.
By fuzzing the robot control program, we aim to discover as many flaws as possible
in the robot control program and secure the robot in turn.
We use a state-of-the-art fuzzing tool,
i.e., Radamsa~\cite{fuzz:radamsa},
to fuzz the robot control program
of the aforementioned movable robot employing a distance sensor
for motion. We generate and feed a series
of distance sensor values
to the robot control program
replacing real-world distances when the robot is moving.
The robot trembles because the sensor values fuzzed by Radamsa,
as intended to maximally uncover
bugs of a program, fluctuate significantly.
We can easily patch the robot control program with a filter
to rule out such volatile sensor values, since
a moving robot working in a rational environment is
expecting sensor values that fall in a reasonable
range in line with the environment and the state of robot.
For instance, a robot moving towards a wall should
continuously receive decreasing distance sensor values.
The analysis over arbitrary and irregular sensor values,
however, implies us further in fuzzing the robot control program, i.e.,
to test the control program with rational and regular sensor values
in a {\em directed fuzzing} way. The distance can be viewed as a critical
{\em environmental parameter} for a moving service robot as it
triggers state transitions for the robot.
For example,
a moving robot receiving decreasing distance sensor values
would turn left or right while the distance gradually drops
below a threshold.
Given a dynamic obstacle, say,
an automatic sliding door, it may move out of the robot's path and
the distance sensor value should
suddenly increase to a large value, after which the robot
keeps moving forward.
Note that the robot control program is unable to ascertain whether an obstacle
is truly dynamic or static solely depending on distance sensor values,
because the scenarios where
the distance either monotonically decreases or abruptly increases
are both possible in the real world.
Assume that the robot is moving towards a hard wall, but
we intentionally replace the distance sensor values with ones that resemble
the getaway of a dynamic obstacle. The robot should
collide with the wall.
The aforementioned example addresses the essence of our directed fuzzing strategy, namely \textsc{Robo\-Fuzz}\xspace.
In a nutshell, by
investigating the state transitions and environmental parameters that
influence the behaviors of an autonomous service robot,
\textsc{Robo\-Fuzz}\xspace generates rational but harmful sensor values so as to mislead the robot
for concrete threats.
Adversaries can implement
\textsc{Robo\-Fuzz}\xspace with realistic attack models,
like suspending or fabricating sensor values,
to compromise an autonomous service robot.
As developers, we move forward and
defend against the attacks entailed via \textsc{Robo\-Fuzz}\xspace
by detecting and mitigating them. There are two concerns in doing so.
First, the detection and mitigation should not be
heavyweight regarding the limited computational resources of an autonomous
service robot.
Secondly, once an attack is detected, the mitigation cannot barely shut down the
robot but maximally retain the robot's work efficacy. Nevertheless,
as mentioned, the robot control program alone
cannot rule out rational but anomalous sensor values.
We need further information that can be used to counteract \textsc{Robo\-Fuzz}\xspace.
We note that, for a movable service robot, such as a cleaning robot,
it is supposed to repeatedly cruise in a certain and steady place.
Consequently, the robot is able to make and maintain a historical record of
obstacles for the place~\cite{robot:roomba-map:2018}. Such a
historical record is an exploitable resource for us to detect the attacks initiated through
\textsc{Robo\-Fuzz}\xspace. Concerning that
\textsc{Robo\-Fuzz}\xspace deceives the robot control program using fuzzed distance sensor values that emulate a dynamic obstacle,
a historical record can help the robot control program to cross-check
if the obstacle is really dynamic so as to avoid a collision where necessary.
The historical record is also effectual for us to mitigate the impact caused by \textsc{Robo\-Fuzz}\xspace.
A movable autonomous robot must keep moving to complete
the task planned for it even in the
presence of an untrustworthy distance sensor.
Although the robot control program cannot put reliance on distance sensor values,
it can reuse the historical record to
circumvent obstacles and
navigate the robot. Reusing such records not only retains a movable robot's work efficacy,
but also gains high cost efficiency in mitigating for an economical robot.
The main ideas of this paper are summarized as follows.
\begin{itemize}[leftmargin=5mm,nosep]
\setlength{\itemsep}{-\itemsep}
\item We propose \textsc{Robo\-Fuzz}\xspace which tests an autonomous service robot by fuzzing rational but harmful sensor values so as to mislead the robot's control program;
\item To defend against the attacks initiated by \textsc{Robo\-Fuzz}\xspace, we develop detection and mitigation methods which leverage historical records to maximally protect the robot and efficiently accomplish planned tasks.
\end{itemize}
\textsc{Robo\-Fuzz}\xspace and strategies of detection and mitigation to contract \textsc{Robo\-Fuzz}\xspace
form a self-contained and systematic scheme that
help to develop a
secure autonomous service robot. We have prototyped them with a real-world movable robot,
i.e., iRobot Create 2 with an HC-SR04 distance sensor.
Experimental results confirm that \textsc{Robo\-Fuzz}\xspace attains
up to 93.3\% success rate in imposing threats
onto a moving iRobot Create 2.
Our detection and mitigation methods also
efficiently detect attacks at a very high rate and make the
robot being under attack accomplish scheduled tasks with an
insignificant loss of work efficacy, i.e., 4.1\% overall.
The remainder of this paper is organized as follows.
In Section~\ref{sec:bg}, we present the background of autonomous service robot
and attack models it is prone to.
We conduct a motivational study to incur concrete threats to a movable service robot
in Section~\ref{sec:mot}. In Section~\ref{sec:fuzzing}, we detail the design
and methodology of \textsc{Robo\-Fuzz}\xspace. In Section~\ref{sec:detection} and Section~\ref{sec:migitation}, respectively,
we show our algorithms for detecting and mitigating threats incurred by \textsc{Robo\-Fuzz}\xspace.
We present experimental results with a prototype built with iRobot Create 2 in Section~\ref{sec:evaluation}.
We discuss threats to validity in Section~\ref{sec:threats} and
conclude the paper in Section~\ref{sec:conclusion}.
\section{Mitigation with Remit}\label{sec:migitation}
Once Shade detects
any attack affecting an autonomous service robot,
we must mitigate the attack's impact. A straightforward solution
is to halt the robot immediately.
However, a shutdown of the robot badly loses its work efficacy
because the robot is supposed to have a scheduled task, like tidying a room.
As a result, we need a mitigation algorithm that retains as much work
efficacy as possible for the robot being attacked. In particular,
the mitigation algorithm ought to take into account two issues.
First, an autonomous service robot significantly differs from
stationary CPS and handheld smartphones
as the robot needs to move itself to work.
Since the distance sensor is not reliable due to attacks,
how to navigate the robot to continue its motion
must be resolved. Second, because of the limited resources of a small service robot,
including the computation capability and energy supply,
the mitigation algorithm should be lightweight and cost-efficient.
Regarding the two challenges, we have
designed a mitigation algorithm, namely
{\em \underline{re}taining-oriented
\underline{mit}igation} ({\em Remit}), to achieve
the least loss of work efficacy for an autonomous service robot.
One noteworthy point of Remit is that, it reuses the
historical records used by Shade in detecting attacks with CRV, which
not only preserves the motion of robot, but also avoids any extra cost
for enabling the navigation.
Algorithm~\ref{algo:mitigation}
captures the procedure of Remit.
We define the robot without being attacked
is in the {\em normal} mode. Remit switches the robot to {\em mitigation} mode once Shade
detects an attack. On entering the safe mode, the robot first decelerates its speed
(Line 2 of Algorithm~\ref{algo:mitigation}). This helps it have more time to
respond to an emergent object, say, an obstacle.
Then,
Remit leverages the historical records to navigate the robot (Lines 3 to 4).
Since these records are not very accurate, Remit
tries to repair the compromised sensor through resetting (Line~5) and
calls the network module to
block attackers (Line~6).
If the attackers are successfully blocked,
Remit will switch the robot back to the
normal mode (Lines 7 to 8). Remit also needs to deal with a dynamic
obstacle (e.g., a pet or person)
if the robot cannot move at a time
but no obstacle was recorded (Lines 10 to 13). Remit
alerts the pet or person by playing a sound (Line 12) and continues moving (Line 14). If the robot
completes the scheduled task, Remit stops the robot
(Lines 15 to 17). Otherwise, Remit
repeats the aforementioned steps until Shade detects no
attack any more (Line 18).
Remit attempts to guarantee the work efficacy of the robot.
Since the robot needs to move at a lower velocity,
the time needed to complete a planned task might become longer.
However, with regard to the robot being under attack,
such additional time cost is insignificant and acceptable.
\section{Motivation and Overview}\label{sec:mot}
\begin{figure}[t]
\centering
\subfigure[Suspension Attack]{\includegraphics[width=0.8\columnwidth]{./Figures/sus5.pdf}~\label{fig:suspension-model}}
\subfigure[Fabrication Attack]{\includegraphics[width=0.8\columnwidth]{./Figures/fab5.pdf}~\label{fig:fabrication-model}}
\caption{An Illustration of Typical Attacks Models}\label{fig:models}
\end{figure}
\subsection{Security Treats for Autonomous Service Robot}\label{sec:model}
\begin{figure*}[t]
\centering
\subfigure[A static obstacle (e.g., a wall)]{\includegraphics[width=0.49\columnwidth]{./Figures/state11.pdf}~\label{fig:wall}}
\hfill
\subfigure[A dynamic obstacle suddenly moves in the same direction as robot]{\includegraphics[width=0.49\columnwidth]{./Figures/state21.pdf}~\label{fig:away}}
\hfill
\subfigure[A dynamic obstacle suddenly moves towards the robot]{\includegraphics[width=0.49\columnwidth]{./Figures/state31.pdf}~\label{fig:face}}
\hfill
\subfigure[A dynamic obstacle suddenly moves out of the path of robot]{\includegraphics[width=0.49\columnwidth]{./Figures/state41.pdf}~\label{fig:slide}}\\
\caption{An Illustration of State Transitions for a Cleaning Robot Regarding Four Types of Obstacles} \label{fig:states}
\end{figure*}
Recently, Bonaci et al.~\cite{robot:surgical-robot:2015} investigated the
vulnerabilities of teleoperated surgical robots and
Quarta et al.~\cite{robot:industrial-security:SP-2017} performed an empirical
analysis on the security issues of industrial robots. These works draw
the attention of research community to the security of robots found in
factories, operating rooms, and so on. Nevertheless, such awareness
should be extended to the security of autonomous service robots.
In practice, Giese and Wegemer have managed to hack a Xiaomi
Mi cleaning robot~\cite{robot:Mi-hack:2018}. Their
success should not only alert robot manufacturers, but also the users of such
service robots.
As mentioned, the robot control program maneuvers
an autonomous service robot by reading sensor values.
On the other hand, the network interface of robot widely
provides adversaries an exploitable attack surface,
because many users still use default or weak passwords
today, especially for domestic robots. As a result,
adversaries are bound to manipulate the robot's sensor values
through unauthorized
remote access so as to misguide the robot control program.
In the meantime, there are multiple attack models
for adversaries to follow.
In this paper we consider two
harmful and representative ones that have been manifested recently~\cite{fingerprint:CIDS:Security-2016, SABALIAUSKAITE2017174},
i.e., {\em suspension attack} and {\em fabrication attack}.
\paragraph*{\textbf{Suspension Attack}}
As shown in Figure \ref{fig:suspension-model}, attackers
suspend sensors from sending out information.
A sensor at the passive mode, once suspended, would
leave a null response
to the robot control program, which
misleads the control program to
conclude that the sensor malfunctions.
If the sensor works at the proactive mode,
the impact of suspension attack
should be even worse.
Consider a distance sensor that alerts the robot control program
only in case of a very close obstacle.
After a successful suspension attack,
the control program would no longer receive any alert.
As a consequence,
the robot might crash into an obstacle.
\paragraph*{\textbf{Fabrication Attack}}
With a fabrication attack,
adversaries fabricate harmful sensor values and feed them to
the robot control program.
As shown by Figure~\ref{fig:fabrication-model}, when the robot
is in motion, the control program asks for a distance sensor value
to decide whether an obstacle is nearby to the robot.
Noticing such a request, adversaries replace the normal
sensor value with an anomalous one. The control
program would accordingly make a wrong decision and put
the robot into a concrete danger.
\subsection{A Motional Study}
Without loss of generality, we choose a programmable
cleaning robot, i.e., iRobot Create 2~\cite{robot:irobot-create2:2015},
for case study.
We run a control program in
a Raspberry Pi 3 to maneuver the robot and install an ultrasonic distance sensor (HC-SR04)
to enable the robot to avoid obstacles.
As developers, we use the WiFi interface shown in~Figure~\ref{fig:arch}
as the port to communicate with the robot controller
for monitoring and debugging.
The distance to obstacles is
a crucial environmental parameter
for a cleaning robot.
The control program depends on the distance sensor
values received at runtime to decide whether the robot moves forward or turns.
As these sensor values are the input to the control program,
the first attempt we did
is leverage the idea of software fuzzing, which
generates various kinds of input values to a program so as to inflict
disorder or even crash to the program.
We used Radamsa~\cite{fuzz:radamsa}, a
state-of-the-art fuzzing tool,
to make a series of 1,006 values
within the distance range supported by HC-SR04 (2cm to 400cm).
A segment of the values fuzzed by Radamsa are as follows:
\setlength{\abovedisplayskip}{3pt}
\setlength{\belowdisplayskip}{3pt}
\begin{equation*}
\{..., 26, 128, 5, 16, 3, 241, 107, 6, 255, 45, 240, 4, 18, ... \}
\end{equation*}
We supposed that such distance sensor values, when fed to
the control program, should have compromised the robot.
However, after we delivered them
to satisfy the requests raised by the control program,
the control program would refuse them as
anomalies.
We then analyzed the failure of fuzzing control program in isolation.
The reason is mainly due to the
concept of software fuzzing and the mechanism
of service robot.
Fuzzing a program is used to
reveal bugs and security vulnerabilities
of a program. Hence the fuzzed inputs, as shown in the above segment,
fluctuate significantly so as to
traverse different code paths and generate
as many corner cases as possible. Therefore,
fuzzing the control program is a good
approach to test the program alone but ignores the
mechanism of service robot. As mentioned, the control program transits a
cleaning robot among states depending on sensor values it receives.
A cleaning robot moving towards a wall will
receive decreasing sensor values and in the end it should
turn or stop, so the robot transits from a state of
moving forward to the next state of turning or stopping.
Given sensor values fuzzed by Radamsa that change strikingly
and continually, they are easy to be distinguished
since they obviously deviate from what the control program expects
in an ordinary environment.
We thoroughly investigate
the states of cleaning robot
and environmental parameters that drive the robot to do
state transitions. We find that,
for a cleaning robot
moving at a stable velocity (e.g., 5cm/s),
its state transitions are affected by the distance to obstacles in
four scenarios, as ideally
illustrated by Figure~\ref{fig:states}.
In the four diagrams of Figure~\ref{fig:states},
the Y axis is the distance to obstacles measured over time (cf. X axis).
In Figure~\ref{fig:wall}, the robot is moving towards a fixed
obstacle (e.g., a wall), so the distance gradually
decreases to zero. The remaining three diagrams show
a robot meets three types of dynamic obstacles.
In Figure~\ref{fig:away}, at a time, a dynamic obstacle (e.g., a pet)
suddenly moves away at a higher velocity and in the same direction as the robot,
so the distance stops dropping but increases abruptly.
In Figure~\ref{fig:face}, after 20s, the dynamic obstacle moves towards
the robot, which makes their distance decrease faster than before.
Figure~\ref{fig:slide} represents another kind of obstacle that
has been on the path of the robot but, at one moment, moves out of
the robot's path, like the prompt open of an automatic sliding door.
The distance thus migrates to another decreasing linear curve.
The four cases capture normal scenarios where
the distance to obstacles, as a critical environmental parameter,
affects a cleaning robot in transiting its states at runtime, say,
to keep moving forward or turn/stop.
The four curves in Figure~\ref{fig:states} help the control program
rule out anomalous sensor values like ones generated by Radamsa.
More important, they inspire us with the
opportunities to {\em mislead} the control program.
Note that the control program
relies on the distance sensor values to learn the distance to obstacles.
Consider a cleaning robot is steadily moving to a wall.
We are monitoring the robot's
state and the real distance by reading sensor values.
When the robot is close to the wall,
we fuzz
increasing distance sensor values to emulate that
the obstacle is dynamic
and moving away. If the control program asks
for distance sensor values, we
will feed fuzzed values to it.
From the viewpoint
of control program, such increasing
sensor values are absolutely rational regarding Figure~\ref{fig:away}.
So the robot is misled from the curve in Figure~\ref{fig:wall}
to the one in Figure~\ref{fig:away}. In the end, the robot shall crash into the
wall.
\begin{figure}[t]
\centering
\scalebox{1.00}{\includegraphics[width=\columnwidth]{./Figures/system.pdf}}
\caption{An Overview of \textsc{Robo\-Fuzz}\xspace, Detection and Mitigation}~\label{fig:system}
\end{figure}
\subsection{Overview}
\autoref{fig:system} illustrates an overview of the three schemes proposed in this paper.
The preceding motivating example indicates the essence of \textsc{Robo\-Fuzz}\xspace (at the leftmost corner
of~\autoref{fig:system}).
By closely monitoring the state of a robot and its environmental parameters (\circled{1} in~\autoref{fig:system}),
\textsc{Robo\-Fuzz}\xspace starts to deceive the robot's control program at an appropriate occasion with faked but
rational sensor values ($\circled{2}$ in~\autoref{fig:system}) so as to inflict concrete harm to the movable robot.
The sensor values fuzzed by \textsc{Robo\-Fuzz}\xspace should impose concrete security breaches to
autonomous service robots. Because our intention is
to enhance the security and safety of autonomous service robots at their design
and implementation stage, we need to defend against \textsc{Robo\-Fuzz}\xspace.
We subsequently develop detection and mitigation schemes, i.e., Shade and Remit at the top of ~\autoref{fig:system},
to counteract \textsc{Robo\-Fuzz}\xspace.
The detection and mitigation reside within the robot control program.
As a result, they can learn the robot's states and historical records of
the environment in which the robot is working.
Using such information (\circled{3} in ~\autoref{fig:system}), the detection
module would report whether the sensor values are compromised or not (\circled{4} in
~\autoref{fig:system}). Upon an alert of detected attacks, the robot control program
cannot rely on the sensor values to proceed moving. Instead,
the mitigation module would be activated to
leverage historical records (\circled{5} in ~\autoref{fig:system}) of obstacles in the environment so as to navigate the robot to complete planned tasks (\circled{6} in ~\autoref{fig:system}).
\section{Related Work}\label{sec:related}
CPS must be highly secure, especially for autonomous robotics systems~\cite{SABALIAUSKAITE2017174,robot:security:IntechOpen-2017,security:CPS:DATE-2017,ahmad2018analyzing}.
Researchers investigated the
cyber threats to teleoperated surgical robots~\cite{robot:surgical-robot:2015,7579758}. For the cyber-security of industrial robots,
Quarta et al.~\cite{robot:industrial-security:SP-2017} performed a thorough
analysis.
Comparatively, service robots are close to human beings, usually working together for service tasks~\cite{robot:service:RAS-2017}. Recently,
Lera et al.~\cite{robot:security:IntechOpen-2017} looked into the security threats with
a survey on the cyber-attacks associated to service robots as well as a
taxonomy that classifies the risks in using service robots.
However, not much work has been done to compromise a movable service robot with
rational but harmful sensor values as \textsc{Robo\-Fuzz}\xspace does. In particular,
Sabaliauskaite et al.~\cite{SABALIAUSKAITE2017174} comprehensively developed methods to conduct cyber-attacks to a specific mobile
robot. Whereas, their methods were significantly different from \textsc{Robo\-Fuzz}\xspace since they tried to use irrational sensor values
to crash the robot.
On the other hand,
how to detect and mitigate attacks for various CPS has been investigated~\cite{CPS:detection-survey:CSUR-2014,CPS:cross-layer:TCAD-2016,security:CPS:DATE-2017,fingerprint:CIDS:Security-2016,CPS:AM:WOOT-2017}.
For example,
Liu et al.~\cite{CSP:detection:TDSC-2016} used partially observable
Markov decision process to monitor and protect a
smart home against pricing attacks.
Dutta et al.~\cite{security:CPS-sensor:DAC-2017} utilized the
challenge response authentication to detect attacks
for active sensors and the recursive least square algorithm
to mitigate the impact of attacks.
Chhetri et al.~\cite{CPS:KCAD:ICCAD-2016} studied how to
detect an attack that could happen at various
points of the digital process chain of analog emissions in
CPS like a 3D printer.
Researchers have also looked into security issues of service robots
in other aspects. For instance,
Guerrero-Higueras et al.~\cite{robot:localization-detection:RAS-2018} attended
attacks to real time location systems
for autonomous mobile robots.
Li et al.~\cite{robot:heavy-duty-robot:appl-sci} proposed to upload
the analysis of attack detection and mitigation to a cloud server in
the improved deep belief networks.
Our Shade and Remit differ from aforementioned approaches
in that they detect attacks within the
computational resources of an autonomous service robot
and, furthermore, mitigate attacks without badly losing
the robot's work efficacy.
|
1,314,259,994,993 | arxiv | \section{Introduction and previous work}
In this note, we initiate the study
of Steiner tree instances
that are stable to multiplicative perturbations
to the distances in the underlying
metric.
Our analysis lies in the Bilu-Linial stability~\cite{BiluL12} setting,
which provides a way to study tractable instances of NP-hard problems.
Instances that are $\gamma$-stable in the Bilu-Linial model have the property that the structure
of the optimal solution is not only unique, but also does not change even when the underlying
distances among the input points are perturbed by a multiplicative
factor $\gamma > 1$. In their original paper, Bilu and Linial
analyzed MAX-CUT clustering, and since their seminal work, other
problems have been analyzed including center-based
clustering~\cite{AwasthiBS12,BalcanL16,Ben-DavidR14},
multi-way cut problems~\cite{MakarychevMV14}, and metric TSP~\cite{MihalakSSW11}.\footnote{Bilu-Linial stability is
one among other notions of data stability studied in the literature~\cite{AckermanB09,BalcanBG09}. This is in
contrast to notions of of algorithmic stability,
which
focus on properties algorithms as opposed to data, see e.g.~\cite{Alabdulmohsin15,Ben-DavidLP06,FanIMSSV20,LiuLNT17}.}
Here, we look at the metric Steiner tree proble
and also the more
restricted Euclidean version. For general metrics, the Steiner tree
problem is known to be APX-hard in the worst case~\cite{ChlebikC08}. For the Euclidean metric, a PTAS is known~\cite{Arora98}.
In this paper
we begin by providing strong geometric structural properties that need to be satisfied
by stable instances. These point to the existence of algorithms for non-trivial
families. We then make use of, and strengthen, these geometric properties to show
that $1.562$-stable instances of Euclidean Steiner trees are polynomial-time
solvable. Finally, we discuss the connections between certain approximation
algorithms and Bilu-Linial stability for Steiner trees.
\section{Model and definitions}
In this section, we recall the relevant definitions.
Fist we define the Steiner tree problem, which is
among Karp's 21 original NP-hard problems~\cite{Karp72}.
It has various applications including in network design, circuit layouts, and phylogenetic tree reconstruction.
\begin{definition}[{\bf the Steiner tree problem}]
For an undirected graph
$G = (V, E)$ with edge weights $w_{e} \in \mathbb{R}^{+}_{0}$ for every edge $e \in E$, and a set $T \subseteq V$ of terminals. A Steiner tree $S$ is a tree in the graph $G$ that spans all terminal
vertices $T$ and may contain some of the
non-terminals (also called Steiner points).
The goal is to find such a tree of lowest weight,
which we call $\mathrm{OPT},$
$$\mathrm{OPT} = \arg\min_{S}\sum_{e\in S} w_e.$$
\end{definition}
We can assume without loss of generality\footnote{For
any graph with distances specified on edges, a metric can be formed by taking the vertices to be points and considering the shortest path distances in the graph between pairs of points vertices. Solving (or approximating) the Steiner tree problem on a metric formed in this matter solves (or approximates) the problem on the original graph. See Vazirani~\cite{Vazirani01} for further discussion of this issue.}
that the vertices are points in a metric space
and the weights of the edges are given by the distance function -- when the input is in the form of a metric, we call this the
\textbf{metric Steiner tree problem}. Our results use properties of metric spaces, but move freely between the metric space and graph representations of the problem. When the metric is Euclidean,
this is called the \textbf{Euclidean Steiner tree problem}.
Now we move on to defining Bilu-Linial stability for the Steiner tree
problem on metrics.
\begin{definition}[{\bf Bilu-Linial $\gamma$-stabile instances}]
Let $I = (G, w)$ be an instance of a metric Steiner tree problem and $\gamma > 1$.
$I$ is $\gamma$-stable if for any function
$w': V \times V \rightarrow \mathbb{R}^{+}_{0}$ such that
$\forall u,v \in V$,
$$
w_{uv} \le w'_{uv} \le \gamma w_{uv},
$$
the optimal Steiner tree $\mathrm{OPT}'$ under $w'$ is equal to the
optimal Steiner tree $\mathrm{OPT}$ under $w$.
\end{definition}
We note that the perturbations can be
such that instances originally satisfying
the metric or Euclidean properties
no longer have to satisfy these properties
after perturbation.
We also note that due to
the triangle inequality, no instances
have stability $2$ or greater in the metric setting.
\textbf{Notation:}
For a graph $G$, $w_{ab}^{G}$ is the weight of edge $ab$ in $G$. We abbreviate $w_{ab} = w_{ab}^{G}$ and $w'_{ab} = w^{G'}_{ab}$. Let $\mathrm{OPT} \subseteq E(G)$ denote the minimum weight Steiner tree of $G$, let $w(\mathrm{OPT}) = w^{G}(\mathrm{OPT})$ denote the weight of the Steiner tree.
\section{Structural properties in general metrics}
In this section, we work in the context of a general metric space, and we develop interesting restrictions on the types of problems with $\gamma$-stable solutions, for various values of $\gamma.$
The techniques of this section \emph{do not} give, in complete generality, an efficient algorithm for finding the optimal Steiner tree for any value of $\gamma$ less than $2,$ a problem we leave open. However, when more information about the metric space is available, one can use the structural results here to give restrictions on the arrangements of Steiner points which does yield a definitive solution. In particular,
\begin{enumerate}
\item In Section~\ref{sec:Euclidean}, we use Lemma~\ref{lem: Steiner degree} to give an algorithm for the Euclidean metric when $\gamma > \sqrt{2}$.
\item More generally, in the case that no two Steiner points are adjacent in the optimal solution, Lemma \ref{funfan} together with the other results of the section can be used to give an efficient and very simple algorithm to find the minimal weight Steiner tree. Other more general situations can be efficiently handled via only slightly more elaborate arguments - e.g. if one has a bound on the length of the longest path of Steiner points in the optimal solution.
\end{enumerate}
\begin{lem} \label{lem: Steiner degree}
The degree of any Steiner point in the optimal solution is greater than $\frac{2}{2-\gamma}$.
\end{lem}
\proof Consider a Steiner node $s$ in the optimal solution, that is connected to
$(m\neq n) $ other points, $a_1,...,a_m$. Let $\overline{w} = \sum_{i = 1}^m\frac{w_{sa_i}}{m}$, and let $w_{sv_1}$ and $w_{sa_m}$ be such that $w_{sv_1} + w_{sa_m} \geq 2 \overline{w}$. Let $G'$ be obtained by perturbing each edge $sv_i$ by a factor of $\gamma$. Let
\[
\mathrm{OPT}' := (\mathrm{OPT} \setminus \{sv_1,\ldots, sv_m\}) \cup \{v_1v_2,\ldots,v_{m-1}v_m\}.
\]
Clearly, $\mathrm{OPT}'$ is also a Steiner tree. Using the fact that $w_{a_ia_{i+2}} \leq w_{sa_i} + w_{s_{a_{i+1}}}$, we have
\begin{align*}
w'(\mathrm{OPT}') & \leq w'(\mathrm{OPT}) - \sum_{i=1}^{m}\gamma w_{sa_i} \\
&~~~~~+\sum_{i=1}^{m-1}\left(w_{sa_i} + w_{sa_{i+1}}\right) \\
& = w'(\mathrm{OPT}) - \sum_{i=1}^{m}\gamma w_{sa_i} +\sum_{i=2}^{m-1}2w_{sa_i} \\
&~~~~~+ w_{sa_1} + w_{sa_m}
\end{align*}
Using the fact that $w'(\mathrm{OPT}') > w'(\mathrm{OPT})$, we have
\[
\sum_{i=1}^{m}\gamma w_{sa_i} < \sum_{i=2}^{m-1}2w_{sa_i} + w_{sa_1} + w_{sa_m}
\]
or
\[
\gamma \cdot \overline{w} m < (2m-2)\overline{w}.
\]
Rearranging, we have
\[
\frac{2}{2-\gamma} < m
\]
\qed
Now we state some additional structural properties of optimal Steiner trees in $\gamma$-stable instances. These are not used in Section~\ref{sec:Euclidean}. Nevertheless, we hope that they are of independent interest.
\begin{lem}
If $a,b \in V(\mathrm{OPT})$ are nearest neighbors in the graph, then the edge $ab$ is in the optimal solution.
\end{lem}
\begin{lem}
\label{lem:basicprop}
Suppose $ab,bc \in \mathrm{OPT}$, then
\begin{enumerate}
\item $w_{ac} > \gamma \cdot \max \{ w_{ab} , w_{bc}\}$.
\item $\frac{2}{\gamma} \cdot w_{ac} > w_{ab} + w_{bc}$.
\item $(\gamma - 1) \cdot w_{ab} < w_{bc} $, $(\gamma - 1) \cdot w_{bc} < w_{ab}.$
\end{enumerate}
\end{lem}
\proof
\begin{enumerate}
\item Assume w.l.o.g. $w_{ab} \geq w_{bc}$. Suppose that $w_{ac} \leq \gamma \cdot\max \{w_{ab},w_{bc}\}$, let $G'$ be obtained by perturbing $ab$ by a factor of $\gamma$. Then $(\mathrm{OPT} \setminus \{ab\}) \cup \{ac\}$ is also a Steiner tree in $G'$ of weight $w'(\mathrm{OPT})$ contradicting stability. This completes the proof of $1.$
\item The proof of $2.$ follows from $1.$ and the fact that $\max\{w_{ab}, w_{bc}\} \geq \frac{w_{ab} + w_{bc}}{2}$.
\item Let $G'$ be obtained by perturbing $bc$ by a factor of $\gamma$. Then $\mathrm{OPT}' : = \mathrm{OPT} \setminus \{bc\} \cup \{ac\}$ is also a Steiner tree of weight
\begin{equation}
\label{eqn:eqn1}
w'(\mathrm{OPT}') = w(\mathrm{OPT}) - w_{bc} + w_{ac} \leq w(\mathrm{OPT}) + w_{ab}.
\end{equation}
On the other hand, stability
gives us that
\begin{equation}
\label{eqn:eqn2}
w'(\mathrm{OPT}') > w'(\mathrm{OPT}) = w(\mathrm{OPT}) + (\gamma - 1)w_{bc}.
\end{equation}
Putting~(\ref{eqn:eqn1}) and~(\ref{eqn:eqn2}) together gives us that $(\gamma - 1) \cdot w_{bc} \leq w_{ab}$.
Repeating the same argument but swapping $bc$ for $ab$ gives us $(\gamma - 1) \cdot w_{ab} \leq w_{bc}$.
\end{enumerate}
\qed
\begin{lem} \label{close}
Let $H$ be a subgraph of $\mathrm{OPT}$ with at least one edge. Let $ab \in H.$ Fix any vertex $c \in V(\mathrm{OPT} )\setminus V( H)$ satisfying $w_{ca} \leq \gamma (\gamma -1 ) \cdot w_{ab}$; then we have $ca \in \mathrm{OPT}$.
\end{lem}
\proof
If $ca \notin \mathrm{OPT}$, then adding the edge $ac$ to $\mathrm{OPT}$ produces a cycle which includes edge $ac.$ Suppose that the cycle also includes $ab$. Let $G'$ be obtained by perturbing $ab$ by a factor of $\gamma$. Then $(\mathrm{OPT}' \setminus \{ab\}) \cup \{ac\}$ is a Steiner tree of weight at most $w'(\mathrm{OPT})$, contradicting stability.
If the cycle does not include $ab$, it includes some edge other than $ac$ which has endpoint at $a$. This edge, call it $ad$, is in $\mathrm{OPT}$. By Lemma \ref{lem:basicprop}, $w_{ad} > (\gamma -1) w_{ba}$. Let $G'$ be obtained by perturbing $ad$ by a factor of $\gamma$. We have $w'_{ad} > \gamma (\gamma -1) w_{ba} \geq w_{ac}$. Then $(\mathrm{OPT} \setminus \{ad\}) \cup \{ac\}$ is a Steiner tree of weight less than $w(\mathrm{OPT})$, again contradicting stability.
\qed
\begin{lem}
Let $\gamma > \frac{1+\sqrt{5}}{2}.$ Let $ab \in H$, a subgraph of $\mathrm{OPT}.$
Suppose that $c$ is a vertex with $w_{ca} \geq \gamma \cdot w_{ab}$, then $ca \notin \mathrm{OPT}$.
\end{lem}
\proof
Let $\gamma ' = \frac{w_{ca}}{w_{ab}}.$ Note that $\gamma' \geq \gamma$ is some real number larger than $\frac{1+\sqrt{5}}{2}.$ If $ac \in \mathrm{OPT}$, then by part $1.$ of Lemma~\ref{lem:basicprop}, we must have
\[
\frac{w_{bc}}{w_{ac}} >\gamma.
\]
On the other hand, \begin{eqnarray*}
\frac{w_{bc}}{w_{ac}} & \leq & \frac{w_{ab}+ w_{ac}}{w_{ac}} \\
& \leq & \frac{w_{ab}+ \gamma ' w_{ab}}{\gamma 'w_{ab}} \\
& \leq & \frac{1+\gamma'}{\gamma '}.
\end{eqnarray*}
We now have a contradiction as long as $\frac{1+\gamma'}{\gamma '} < \gamma $. The function $f(x) = \frac{1+x}{x}$ is decreasing for $x>0$ and $f(x) < x$ for any $x \geq \frac{1+\sqrt{5}}{2}$. So, we have that $\frac{1+\gamma '}{\gamma '} < \frac{1+\gamma }{\gamma } <\gamma $ as desired.
\qed
\begin{proposition} \label{close2}
Let $H $ be a subgraph of $\mathrm{OPT}$ with at least one edge. Suppose that $ab \in H$ and suppose that $c \in V(\mathrm{OPT}) \setminus V(H)$ with $w_{bc} <\gamma (\gamma - 1) w_{ab}$. Then we must have $w_{bc} < \frac{w_{ab}}{\gamma -1}$ and $w_{ab} < \frac{w_{bc}}{\gamma-1} .$
\end{proposition}
\proof By Lemma \ref{close}, we must have that $bc \in \mathrm{OPT}$. Therefore, property $3.$ of Lemma~\ref{lem:basicprop} gives us the desired inequalities.
\qed
When $\gamma (\gamma -1)^2 > 1$ Proposition \ref{close2} strengthens the bounds of Lemma \ref{close}. This holds, for instance, when $\gamma >1.755$. In this case, we obtain:
\begin{proposition}
Assume that $\gamma (\gamma -1)^2 > 1$. Assume that $H$ is a subgraph of $\mathrm{OPT}$ with at least two vertices. Let $ab \in H.$ Fix any vertex $c \in V(\mathrm{OPT} )\setminus V( H)$. Then we have $w_{ca} < \frac{1}{\gamma -1} \cdot w_{ab}$ if and only if $ca \in \mathrm{OPT}$.
\end{proposition}
\proof By Lemma \ref{close} and the assumption that $\gamma (\gamma -1) > \frac{1}{\gamma -1}$, we must have that $ac \in \mathrm{OPT}$. If $w_{ca} \geq \frac{1}{\gamma -1} \cdot w_{ab}$, we can not have edge $ac$ in $\mathrm{OPT}$ by Lemma \ref{lem:basicprop} part $3.$
\qed
Let $a, \bar b = (b_1, \ldots , b_m)$ be vertices (either terminal or Steiner points). We denote by $T(a, \bar b)$ the tree on vertex set $a,\bar b$ in which $a $ is connected to each element of $\bar b.$ Let the \emph{average weight of $T(a, \bar b)$} be
\[
\frac{\sum_{i=1}^m w_{ab_i}}{m-1}.
\]
Suppose that $H$ is a subgraph of $\mathrm{OPT}$. We call $T(a,\bar b)$ a \emph{terminal component fan} relative to $H$ if $a$ is a Steiner point and $\bar b$ are all terminals or vertices in distinct connected components of $H$ each with at least two vertices. We call the collection of components of $H$ together with the terminals not in $H$ the \emph{terminal components of $H$.}
\begin{lem} \label{funfan}
Let $\gamma > 1.755$ and suppose that $H$ is a subgraph of $\mathrm{OPT}$ and in the optimal solution, no two Steiner points are adjacent. Suppose that $T(a,\bar b)$ with $\bar b = (b_1, \ldots , b_m)$ is a terminal component fan such that:
\begin{itemize}
\item the average weight of $T(a,\bar b)$ is less than all edges not in $H$ which connect two terminal components of $H$,
\item the average weight of $T(a,\bar b)$ is minimal among all terminal component fans,
\item the edges of $T(a, \bar b)$ are all within a factor of $\frac{1}{\gamma - 1}$ of each other.
\end{itemize}
Then $T(a, \bar b)$ is a subgraph of $\mathrm{OPT}$.
\end{lem}
\proof
Suppose that the fan $T(a, \bar b)$ is not in $\mathrm{OPT}.$ Then for some subset of the edges of $T(a,\bar b)$ are not in $\mathrm{OPT}$ - the components of $H$ that contain each $b_i$ are connected. Specifically, if there are $k < m$ edges of $T(a, \bar b)$ which are not in $\mathrm{OPT}$, then there are at least $k$ edges of $\mathrm{OPT} \setminus H$ such that in $\mathrm{OPT} \cup T(a, \bar b)$ we may remove these $k$ edges and still have a Steiner tree.\footnote{In the case that $k=m$, there may be only $m-1$ such edges, as $a$ may not be in $\mathrm{OPT}$, but the argument works identically in that case.} Moreover, since no two Steiner points are adjacent, these edges are either
\begin{itemize}
\item terminal to terminal edges, or
\item part of a terminal component fan.
\end{itemize}
In the first case, the terminal to terminal edges have weight at least $\frac{\sum_{i=1}^m w_{ab_i}}{m}.$ In this case perturb this edge by a factor of $\gamma$, and swap it with one edge of the terminal component fan $T(a, \bar b)$. Since the edges of $T$ are within a factor of $\frac{1}{\gamma - 1}$ of each other and their average weight is $\frac{\sum_{i=1}^m w_{ab_i}}{m}$, this swap decreases of the weight of the resulting Steiner tree after the perturbation.
Similarly in the case that one of the $k$ edges is in another terminal component fan, $T_1$, the average weight of edges in that fan is at least $\frac{\sum_{i=1}^m w_{ab_i}}{m}$, and applying part $3.$ of Lemma~\ref{lem:basicprop}, the minimal weight edge in $T_1$ is at least $(\gamma -1) \cdot \frac{\sum_{i=1}^m w_{ab_i}}{m}.$ Now, perturb such an edge by a factor of $\gamma$ to make the weight at least $\gamma \cdot (\gamma -1) \cdot \frac{\sum_{i=1}^m w_{ab_i}}{m},$ which is larger than the weight of the largest weight edge of $T(v, \bar w)$, which is a most $\frac{1}{\gamma-1} \cdot \frac{\sum_{i=1}^m w_{ab_i}}{m}$ because $\gamma > 1.755.$
Performing any of these $k$ swaps yields a lower weight Steiner tree than $\mathrm{OPT}$ under the above perturbations, contradicting $\gamma$-stability.
\qed
\iffalse
\begin{remark}
The techniques of this section \emph{do not} give, in complete generality, an efficient algorithm for finding the optimal Steiner tree for any value of $\gamma$ less than $2,$ a problem we leave open. However, when more information about the metric space is available, one can use the structural results here to give restrictions on the arrangements of Steiner points which does yield a definitive solution. For instance, in the next section, we do this for Euclidean space.
More generally, in the case that no two Steiner points are adjacent in the optimal solution, Lemma \ref{funfan} together with the earlier results of the section can be used to give an efficient and very simple algorithm to find the minimal weight Steiner tree. Other more general situations can be efficiently handled via only slightly more elaborate arguments - e.g. if one has a bound on the length of the longest path of Steiner points in the optimal solution.
\end{remark}
\fi
\section{Euclidean Steiner trees}\label{sec:Euclidean}
In this section, we consider the restriction of the Steiner tree problem
to the Euclidean metric.
Under the assumption of $\gamma$ stability the min angle between two terminal points can be defined as a function of $\gamma$.
\begin{definition}[angle]
Let $a_1,a_2,b$ be points on a Euclidean metric. Then
we call $\angle a_1ba_2$ the \emph{angle} between $a_1, a_2$.
\end{definition}
\begin{lem}\label{lem:theta&gamma}
For a $\gamma$-stable instance of a Euclidean Steiner tree, the angle between two terminal points with respect to their
common Steiner neighbor in the tree should be greater than $2\sin^{-1}(\gamma/2)$.
\end{lem}
\proof Lets assume, for a $\gamma$-stable instances of Steiner tree, the angle between two terminal points $a_1$, and $a_2$ at a Steiner point $b$ is $\theta$.
Without loss of generality, let $w_{a_1b} =: w \geq w_{a_2b}$ . Clearly $w_{a_1a_2} > \gamma w$, since otherwise, perturbing edge $a_1b$ by a factor of $\gamma$ allows one to replace $a_1b$ by $a_1a_2$ in a minimal Steiner tree, contradicting stability. Let us use $\alpha$ to denote the angle $\angle a_1a_2b$. Clearly, $\alpha \geq \pi/2 - \theta/2$. Thus by the sine rule, we have
\[
\frac{\gamma w}{\sin \theta} < \frac{w_{a_1a_2}}{\sin\theta}=\frac{w}{\sin\alpha} \leq \frac{w}{\sin(\pi/2-\theta/2)}.
\]
Rearranging, we have
\begin{align*}
\gamma\ &<\ \frac{\sin\theta}{\sin(\pi /2 -\theta/2)} \\
&=\ \frac{2\sin(\theta/2)\cos(\theta/2)}{\cos(\theta/2)}\\
&=\ 2\sin(\theta/2)
\end{align*}
as desired.
\qed
Thus we immediately get the following Corollary.
\begin{corollary}
For a $\gamma$-stable instances of Steiner tree, if $\gamma > \sqrt{2}$ then the angle $\theta$ between two terminal points is $\theta > \pi/2$.
\end{corollary}
\begin{figure}
\centering
\includegraphics[width=.25\textwidth]{nosteiner.png}
\caption{An example of points $t_1$, $t_2$, $t_3$, and $t_4$
surrounding Steiner point $s$ at angles over $\theta > 90$ degrees. No more than $\frac{-1}{\mathrm{cos}{\theta}}$ can
fit, independent of the dimension.}
\label{fig:nosteiner}
\end{figure}
\begin{lem}
If there are $N$ points in $\mathbb{R}^{d}$ such that the angle between every pair with respect to a point $u$ is at least $\theta > (\pi/2)$, then $N \leq \frac{-1}{\cos\theta}$.
\end{lem}
\proof
Let $\theta > \pi/2$ and let $v_1,\dots,v_N \in \mathbb{R}^{d} $ be unit vectors in $\mathbb{R}^{d}$ such that $\langle v_i,v_j \rangle \leq \cos\theta$. Consider the matrix $V$ whose columns are the $v_i$s. We know that by construction $V^TV$ is positive semi-definite. But if $N > \frac{-1}{\cos \theta}$, then the sum of every row is negative, which contradicts the positive semidefiniteness of $V^TV$, and so
it must be the case that $N \leq \frac{-1}{\cos\theta}$.
\qed
\begin{corollary} \label{col:Steiner degree packing}
For $\gamma > \sqrt{2}$ the degree of a Steiner node in the optimal solution is at most $\frac{-2}{2 - \gamma^2}$.
\end{corollary}
\proof
From Lemma~\ref{lem:theta&gamma} we have
\[
\gamma< 2 \sin(\theta/2).\\
\]
So \[
\gamma^2 < 4 \sin^2(\theta/2)
\]
and so $\gamma^2/2 < 2\sin^2(\theta/2)$ or $1- \gamma^2/2> 1- 2\sin^2(\theta/2)$.
Since $\cos( \theta) = 1- 2\sin^2(\theta/2)$, we have
\[
\cos (\theta)<1-\gamma^2/2
\]
or
\[
\theta > \cos^{-1} (1-\gamma^2/2).
\]
\qed
\begin{corollary}
When $\gamma > 1.562$,
the optimal Steiner tree for a $\gamma$-stable instance
does not have Steiner nodes.
\end{corollary}
\proof
This happens when the min degree imposed by stability is larger than the max degree imposed by the packing bound.
By Lemmas~\ref{lem: Steiner degree} and \ref{col:Steiner degree packing} we have the following:\\
\[
\frac{2}{\gamma^2-2}\leq \frac{2}{2-\gamma}
\]
By solving the above equation for $\gamma$ we get $\gamma \geq \frac{\sqrt{17}-1}{2}$, which is bounded from above by $1.562$.
\qed
This geometric property implies
that for $1.562$-stable instances, Steiner points
will not be used in the optimal solution. Hence,
an MST algorithm on just the terminal points will give
the answer in polynomial time.
Finally, we point to the existence of Gilbert and Pollak's the Steiner ratio conjecture~\cite{GilbertP68}, which states that in the Euclidean plane,
there always exists an MST within a cost of $2/\sqrt{3}$ of
the minimum Steiner tree,and the behavior of this ratio for higher dimensions is yet unknown. Assuming this conjecture,
in certain cases it may imply some limitations on the stability of Euclidean instances, especially in low dimensions, using the idea that even if the Steiner tree distances are ``blown up" by more than the Steiner ratio, one could instead use the MST instead and get a cheaper solution. Unfortunately, because the MST may overlap with the Steiner tree, we cannot give a concrete statement.
\section{Using approximation algorithms to solve stable instances}
In this section we give a general argument about how strong approximation
algorithms for Steiner tree problems
give stability guarantees. We note that it is known that an FPTAS for the Steiner tree would imply P=NP~\cite{ChlebikC08}, so there is no hope to use the result below in the general metric case. But if at some future point an FPTAS for the Euclidean variant of the Steiner tree problem is developed (currently, only a PTAS is known to exist~\cite{Arora98}), then this would immediately imply the existence of polynomial-time algorithms for stable instances for any constant $\gamma >1$.
\begin{thm}
An FPTAS for the Steiner tree problem
gives a polynomial time algorithm for optimally solving any $\gamma$-stable
Steiner tree problem in time poly$(n, (\gamma-1)^{-1})$. In particular,
this gives a polynomial-time algorithm for any constant $\gamma > 1$.
\end{thm}
\proof Assume we are given an FPTAS for the Steiner tree problem.
This means that we have an algorithm that runs in time
$\mathrm{poly}(n,1/\epsilon)$ on instances of size $n$ to
give $(1+\epsilon)$-approximations to the optimum Steiner tree.
Now consider a $\gamma$-stable instance for constant $\gamma > 1$.
We run our FPTAS on that instance with $\epsilon = \frac{\gamma-1}{2n}$ to get an Steiner tree $S'$ with weight within
$\mathrm{OPT}(1+(\gamma-1)/2n)$.
We now claim that $S'$ must contain every edge in OPT whose weight is at least $\frac{\mathrm{OPT}}{n}$. Suppose it doesn't -- then we could perturb
such an edge by $\gamma$ and increase the weight of the optimal solution
to $\mathrm{OPT}(1+(\gamma-1)/n)$ and
$S'$ would become cheaper than $\mathrm{OPT}$, thereby violating $\gamma$-stability.
By the fractional pigeonhole principle, the most expensive edge of the FPTAS
satisfies the desired property above and is therefore in $\mathrm{OPT}$. Hence, we can
contract this edge into a new vertex and get a new instance with $n-1$ vertices at $\gamma$-stability.
We can continue this process,
getting one new edge of the optimal in each iteration, until we have a constant-size problem that we can brute-force.
\qed
We note that the above technique could be used to convert
event slightly weaker (than FPTAS) approximation algorithms to nontrivial stability guarantees.
|
1,314,259,994,994 | arxiv | \section{Introduction}
First time, David Deutsch \cite{Deutsch-1985} described quantum TM
(Turing machine), he said the computational power of a quantum TM
and an classical (deterministic) one are the same for functions
from $\mbb{Z}$ to $\mbb{Z}$. However, due to inherent properties
of quantum TM, this equivalence is not immediate. Then it is
necessary to introduce some distinctions for Turing machines. In
particular, time evolution operator for quantum TM is unitary, it
means, evolution is reversible; this reason because it is
necessary to consider the quantum TM as a reversible one. On the
other hand, local transition function for a quantum TM represents
probability amplitude for evolution, in this case, it is necessary
to consider the quantum TM as a probabilistic one.
\section{The components: quantum TM and deterministic TM}
A TM has a finite set of states:
\begin{equation}\label{eq-10}
Q = \{q_1, q_2, \dots, q_p\}.
\end{equation}
\
The states for an quantum TM are a finite set of observables:
\begin{equation}\label{eq-20}
\mb{\hat{n}} = \{ \hat{n}_1,\hat{n}_2,\dots,\hat{n}_k \}.
\end{equation}
\
Every observable $\hat{n}_i \in \mb{\hat{n}}$ has spectrum
$\{0,1\}$ and every $q \in Q$ can take two values $\{current, non
\; current\}.$
\\
\\
The TM works on a bi-infinity unidimensional tape. The machine
reads from the tape or writes over it some symbols, that belong to
a finite alphabet:
\begin{equation}\label{eq-30}
\Sigma = \{0,1\}.
\end{equation}
\
Quantum TM represents the bi-infinity unidimensional tape by an
infinity set of observables:
\begin{equation}\label{eq-40}
\mb{\hat{m}} = \{\hat{m}_i\}; \quad i \in \mbb{Z}.
\end{equation}
\
Every observable $\hat{m}_i \in \mb{\hat{m}}$ has spectrum
$\{0,1\}$ and every TM tape cell can take some values from the
alphabet $\Sigma = \{0,1\}$.
\\
\\
The TM has a read-write head. This head marks the position of the
machine on the tape. The quantum TM represents the read-write head
by an observable $\hat{x}$. The spectrum for the observable
$\hat{x}$ is $\mbb{Z}$ because of the existence of infinite cell
in TM tape.
\\
\\
The instantaneous description of a TM is formed by the current
state, the symbols on the tape and the read-write head's position.
The state of a quantum TM is a \cite{Deutsch-1985} ``\emph{unit
vector in the Hilbert space $\mc{H}$ spanned by the simultaneous
eigenvector:}
\begin{equation}\label{eq-50}
\ket{\psi} = \ket{x,\mb{n},\mb{m}},
\end{equation}
\
\emph{of $\hat{x}$, $\mb{\hat{n}}$ and $\mb{\hat{m}}$, labelled by
the corresponding eigenvalues $x$, $\mb{n}$ and $\mb{n}$}''. The
states given by equation (\ref{eq-50}) are called the
computational bases states.
\section{The evolution: quantum TM and reversible TM}
Let $Q$ be states set and let $\Sigma$ be the alphabet for a TM.
The machine movements set (left, no motion, right) on the tape is
represented by:
\begin{equation}\label{eq-55}
D = \{-1,0,1\}.
\end{equation}
\
\\
Evolution for machine is represented for a finite set of
instructions:
\begin{equation}
\label{eq-60}
q,s,s',d,q' \qquad \text{where} \quad q,q' \in Q; s,s' \in \Sigma; d \in
D;
\end{equation}
\
\\
Instruction ``$q,s,s',d,q'$'' means: if the current state is $q$
and if the symbol on the cell marked by the read-write head is
$s$, the machine writes the symbol $s'$ in this cell, the machine
moves in to direction marked by $d$ and the machine goes to state
$q'$. Then, evolution for TM is a transition function $\delta$:
\begin{equation}
\label{eq-65}
\fp{\delta} Q \times \Sigma \times \Sigma \times D \times Q \to
\{0,1\}, \quad \text{where},
\end{equation}
\begin{equation}
\label{eq-70}
\delta(q,s,s',d,q') =
\begin{cases}
1 & \text{iff $q,s,s',d,q'$ is an instruction for the TM},
\\
0 & \text{iff $q,s,s',d,q'$ is not an instruction for the
TM}.
\end{cases}
\end{equation}
\
\\
The TM is deterministic if and only if the transition function
$\delta$ satisfies:
\\
For any $(q,s) \in Q \times \Sigma$:
\begin{equation} \label{eq-80}
\sum_{\substack{s' \in \Sigma \\ d \in D \\ q' \in Q}}\delta(q,s,s',d,q') \in
\{0,1\}.
\end{equation}
\
\\
A deterministic TM is a reversible one if and only
if the transition function $\delta$ satisfies for any
$(q,s),(q',s') \in Q \times \Sigma$ with $(q,s) \neq (q',s')$:
\begin{equation}
\label{eq-100}
\sum_{\substack{s'' \in \Sigma \\ d \in D \\ q'' \in
Q}}\delta(q,s,s'',d,q'') + \delta(q',s',s'',d,q'') \in \{0,1\}.
\end{equation}
\
\\
On the other hand, the evolution for a quantum TM during a single
computational step is \cite{Deutsch-1985, Ozawa-Nishimura-1999}:
\begin{equation}\label{eq-90}
\ket{\psi(t)} = U^t \ket{\psi(0)}, \quad t \in \mbb{Z}^+, \qquad
\text{where} \quad U^{\dagger}=U^{-1}.
\end{equation}
\
\\
$U$ is an unitary operator called time evolution operator. Because
$U$ is unitary, the evolution of a quantum TM is reversible.
Bennett proved that for any deterministic TM there is an
equivalent reversible TM \cite{Deutsch-1985}, in this way
irreversibility is not an essential feature of TM, while
reversibility is an essential feature of a quantum TM.
\section{The transition function: quantum TM and probabilistic TM}
Let $\tilde{\mbb{R}}$ be the computable real numbers set and
let$\widetilde{[0,1]}$ be the computable real numbers belong interval
$[0,1]$. If the transition function for a Turing Machine is:
\begin{equation}
\label{eq-110}
\fp{\delta} Q \times \Sigma \times \Sigma \times D \times Q \to
\tilde{\mbb{R}},
\end{equation} and satisfies for any $(q,s) \in Q \times \Sigma$:
\begin{equation} \label{eq-115}
\sum_{\substack{s' \in \Sigma \\ d \in D \\ q' \in Q}} \delta(q,s,s',d,q')
\in \widetilde{[0,1]},
\end{equation}
\
\\
then the Turing machine is a probabilistic TM \cite{Gill-1977}.
The transition function $\delta$ of a probabilistic TM means that
if the current state is $q$ and if the symbol on the cell marked
by the read-write head is $s$, the probability of writing the
symbol $s'$ in this cell, of moving into the direction marked for
$d$ and of changing to current state $q'$ is given by the value of
function $\delta(q,s,s',d,q')$.
\\
\\
A Turing machine operates by finite means \cite{Turing-1936}. The
finite operation of a (deterministic, reversible, probabilistic)
TM is supported in finite process unit set and finite alphabet
(this implies a finite instructions set). On the other hand, a
quantum TM operates by finite means if and only if
\cite{Deutsch-1985}: ``\emph{only a finite subsystem is in motion
during any one step, and the motion depends only on the state of a
finite subsystem, and the rule that specifies that motion can be
given finitely in the mathematical sense}''.
\\
\\
To meet the requirements of finite operation, matrix elements of time
evolution (unitary) operator $U$ given by the equation (\ref{eq-90})
will have the form given by \cite{Deutsch-1985, Ozawa-Nishimura-1999}:
\\
\\
For any states $\ket{x,\mb{n},\mb{m}}$ and
$\ket{x',\mb{n}',\mb{m}'}$:
\begin{align}\label{eq-120}
& \bra{x',\mb{n}',\mb{m}'} U \ket{x,\mb{n},\mb{m}} = \notag \\ &
\quad \Bigr[\mb{\delta}_{x'}^{x+1}
\delta(\mb{n},m_{x},m_{x}',1,\mb{n}') + \mb{\delta}_{x'}^{x}
\delta(\mb{n},m_{x},m_{x}',0,\mb{n}') + \notag \\ & \quad \;
\mb{\delta}_{x'}^{x-1} \delta(\mb{n},m_{x},m_{x}',-1,\mb{n}')
\Bigl] \prod_{y \neq x}\mb{\delta}_{m_{y}}^{m_{y}'}
\end{align}
\
\\
where $\mb{\delta}$ is Kronecker delta and $\delta$ is local
transition function for a quantum TM.
\\
\\
Let $\tilde{\mbb{C}}$ be the complex computable numbers set, let
$\mb{n} \in \mb{N} = \{0,1\}^k$ be, where $\mb{\hat{n}}$ is given
by equation (\ref{eq-20}); let $\mb{m} \in \mb{M} =
\{0,1\}^{\mbb{Z}}$ be, where $\mb{\hat{m}}$ is given by equation
(\ref{eq-40}) and let $m_x \in \Sigma$ be, where $\Sigma$ is given
by equation (\ref{eq-30}). The function $\delta$ is a function
\cite{Bersntein-Vazarini-1997, Ozawa-Nishimura-1999}:
\begin{equation}
\label{eq-130}
\fp{\delta} \mb{N} \times \Sigma \times \Sigma \times D
\times \mb{N} \to \tilde{\mbb{C}}.
\end{equation}
\
If the state of a quantum TM is $\ket{x,\mb{n},\mb{m}}$ and $d=x'-x$,
the function $\delta(\mb{n},m_x,m_x',d,\mb{n}')$
represents probability amplitude for to evolve to state
$\ket{x',\mb{n}',\mb{m}'}$.
\\
\\
From the equation (\ref{eq-120}) it is possible determinate the
local transition function $\delta$ from time evolution
operator $U$. Conversely, is possible determinate the time evolution
operator $U$ from the local transition function $\delta$ by
\cite{Ozawa-Nishimura-1999}:
\begin{equation} \label{eq-140}
U\ket{x,\mb{n},\mb{m}} = \sum_{\substack{m_x' \in \Sigma \\ d \in D
\\ \mb{n}' \in \mb{N}}}
\delta(\mb{n},m_x,m_x',d,\mb{n}')\ket{x+d,\mb{n}',\mb{m}'}.
\end{equation}
\
\\
Local transition $\delta$ should (indirectly) satisfy equation
(\ref{eq-90}). Operator $U$ is unitary if and only if $\delta$
satisfies the following conditions \cite{Ozawa-Nishimura-1999}:
\\
\\
\begin{enumerate}
\item For any $(\mb{n},m_x) \in \mb{N} \times \mb{M}$:
\begin{equation} \label{eq-150}
\sum_{\substack{m_x' \in \Sigma \\ d \in D \\ \mb{n}' \in \mb{N}}} \mid
\delta(\mb{n},m_x,m_x',d,\mb{n}') \mid ^2 = 1.
\end{equation}
\item For any $(\mb{n}, \mb{m}, x), (\mb{n}', \mb{m}', x') \in \mb{N} \times \mb{M} \times \mbb{Z}$ with
$(\mb{n}, \mb{m}, x) \neq (\mb{n}', \mb{m}', x')$:
\begin{equation} \label{eq-160}
\sum_{\star}
\delta(\mb{n}',m_{x'}',m_{x'}^{\diamond},d',\mb{p})^{*}\delta(\mb{n},m_x,m_x^{\diamond},d,\mb{p})
= 0.
\end{equation}
\
\\
where, the summation $\underset{\star}{\sum}$ is taken over all
$\mb{p} \in \mb{N}$; $\mb{m}^{\diamond} \in \mb{M}$; $d, d' \in D$
and $x \in \mbb{Z}$ such that $x + d = x' + d'$.
\end{enumerate}
\
\\
Equation (\ref{eq-150}) is quantum counterpart for equation
(\ref{eq-115}), under the relation between probability and
probability amplitude. Equation (\ref{eq-160}) is quantum
counterpart for equation (\ref{eq-100}), it means, the local
transition function $\delta$ should be reversible.
\section{Conclusion}
There are some ``classic'' models (deterministic, reversible,
probabilistic) equivalent (from computability's point of view) for
a Turing machine. Other model, the quantum Turing machine can be
compared to these models. From a ``physics'' perspective, it
means, from $U$ evolution operator's point of view, a quantum TM
can be seen as a reversible TM and from a ``mathematical''
perspective, it means, from the $\delta$ local transition
function's point of view, a quantum TM can be seen how an
probabilistic and reversible TM.
\section{Acknowledgements}
The paper was financed by EAFIT University, under the research
project number $817407$.
\bibliographystyle{acm}
|
1,314,259,994,995 | arxiv | \section{Introduction}
\IEEEPARstart{N}{atural} scenes contain rich spectral information, so they can be collected as spatial-spectral three dimensional (3D) cubes named hyperspectral images (HSIs), where two dimensions (2D) represent the spatial domain and another dimension represents the spectral domain. Since HSIs have more information than red-–green–-blue (RGB) images, they have been extensively used in food surveillance\cite{app1}, face recognition\cite{app2}, remote sensing\cite{app3}, biomedical imaging\cite{app4}, etc.
To collect HSIs, conventional imaging approaches use spectrometers to scan scenes along the spatial or spectral dimension, which imposes challenges on the scanning and storage of the datacube. However, the HSIs are highly redundant among the spectral dimension. These challenges may be solved by using compressed sensing (CS)\cite{CSo}. Based on the principles of CS, coded aperture snapshot spectral imaging (CASSI) was proposed\cite{cassi1, cassi2, cassi3, cassi4}. CASSI systems acquire 2D compressive multiplexed projection measurements instead of scanning all voxels in the HSIs. The remarkable advantage of CASSI is that the entire HSIs can be reconstructed with few measurements or even one snapshot. The coded aperture can be optimized to improve the reconstruction performance of CASSI\cite{ca1, ca2}. However, the image reconstruction is an ill-posed problem which exacerbates as the number of measurements decreases or the spectral channel of HSIs increases. In order to reconstruct 3D HSI from 2D measurements, regularization methods have to be applied\cite{reg}, which typically exploit some prior information of the scenes. In general, effective image priors are critical for CASSI reconstruction, such as total variation (TV)\cite{tv1, tv2}, sparsity\cite{sparse1, sparse2}, low-rank\cite{lowrank1, lowrank2}, and deep image prior (DIP)\cite{dip1, dip2}. In particular, the sparsity prior is not only one of the main principles of CS\cite{cassi2}, but also one of the most salient features of natural images\cite{natural1,natural2}. TV, low-rank and DIP priors are often used for image denoising. DIP has been widely used in recent years due to its excellent denoising ability.
Several image reconstruction algorithms have been proposed for CASSI. Traditional methods use iterative algorithms based on a regularization term. GPSR\cite{GPSR} and TwIST\cite{TwIST} are used to solve CASSI reconstruction problem based on sparsity prior. GAP-TV\cite{GAP-TV} solves the reconstruction problem with the TV prior. DeSCI\cite{DeSCI} solves the reconstruction problem based on the low-rank property and non-local self-similarity. However, these traditional algorithms are limited in reconstruction time and performance\cite{DGSMP}. The emergence of deep learning (DL) led to significant improvement in the reconstruction performance of CASSI.
At present, the reconstruction methods based on DL are mainly divided into supervised learning methods and unsupervised learning methods.
\begin{figure*}[!t]
\centering\includegraphics[width=16cm]{system_fig-eps-converted-to.pdf}\caption{Schematic of the CASSI system.}
\label{f1}
\end{figure*}
Supervised leaning methods consider the HSI reconstruction as a non-linear inverse mapping from the compressive measurements to a 3D datacube\cite{rl1,rl2,l-net,TSA-net,PnP,DGSMP}.
$ \lambda $-net\cite{l-net} introduces a two-stage reconstruction network to recover the HSI from a compressive measurement, where the HSI is reconstructed by a self-attention Generative Adversarial Network framework followed by a refinement stage.
TSA-Net\cite{TSA-net} uses three Spatial-Spectral
Self-Attention modules to jointly model the spatial and spectral correlation. PnP-HSI\cite{PnP} firstly trains a denoising network using the hyperspectral dataset, and then performs denoising processing on the results of the GAP-TV algorithm. DGSMP\cite{DGSMP} introduces an interpretable HSI reconstruction method based on Gaussian scale mixture prior.
The main difference between these supervised-based methods is their network architecture. These approaches treat the inverse imaging as a regression problem, however, the approaches suffers from unsatisfactory reconstruction performance when the number of snapshot is low.
In addition, supervised-based methods are highly dependent on the dataset used. Since hyperspectral data is difficult to collect in large quantities, it is important to develop algorithms based on unsupervised learning. Furthermore, since there may be some small-variations in the acquisition of compressive measurements under different scenes, such as changes in coded aperture or scene noise, the model may not work leading to time-consuming retraining. Therefore, supervised-based methods are often faced with the problem of insufficient generalization ability.
Due to the aforementioned limitations of CASSI reconstruction algorithms based on supervised learning, the development of unsupervised learning algorithms is important.
PnP-DIP\cite{PNP-DIP} was proposed by the joint use of TV regulation and DIP.
Although PnP-DIP is an iterative algorithm, its final reconstruction result is the output of the DIP network, but the final iterative result of the algorithm is unsatisfactory.
In addition, since both of the TV and DIP priors used in PnP-DIP are essentially for denoising, the algorithm has limitations in solving ill-posed inverse problems and can easily fall into a local minimum. In order to solve this problem, it is necessary to re-initialize the network parameters and increase the iteration number of network training each time the DIP is used, so as to find a better solution. However, the strategy of re-initializing the network parameters greatly increases the uncertainty of network reconstruction, and the quality of reconstruction result is also easily affected.
To sum up, it can be seen that although DIP has good image denoising and image representation capabilities, it is easy to fall into local minima and the optimization process is time-consuming. On the other hand, CS reconstruction based on sparsity priors can find the optimal solution, but the reconstruction performance is unsatisfactory in severly ill-posed problems. However, it is surprising that the sparsity prior and DIP can compensate for each other. DIP can further optimize the images reconstructed by CS according to sparsity prior. Sparsity priors can be used to prevent the reconstruction results from falling into local minima. The bidirectional effect of DIP and sparsity prior will make the reconstruction result in the ill-posed problem reach an optimal solution.
In this paper, we bridge the gap between sparsity prior and DIP, developing a fast alternating minimization algorithm based on the sparsity and deep image priors (Fama-SDIP). The proposed algorithm can avoid the reconstruction of HSI getting trapped in local minima.
Furthermore, there is no need to re-initialize the network parameters or increase iteration number of network training during the DIP training, which greatly improves the convergence speed and reconstruction performance.
According to the principle of CS, we constrain the images in their sparse domain for the purpose of accurate reconstruction.
By using the split Bregman algorithm, we integrate the denoising characteristic of DIP into the optimization process of CS, and establish a fast alternating minimization algorithm, which can
achieve the purpose of fast reconstruction of the inverse problem.
To the best of our knowledge, this is the first time that the sparsity prior and DIP are explicitly utilized in the CASSI reconstruction problem.
The remainder of this paper is organized as follows. Section II presents the CASSI forward model. Section III presents the principle of deep image prior. The reconstruction framework based on split Bregman algorithm is formulated in Section IV.
Section V presents the simulation and experimental results, and Section VI presents conclusion.
\section{Snapshot Measurement Model}
The CASSI system, which is mainly composed of a coded aperture, a prism and a gray-scale focal plane array, is used to acquire the 2D compressed measurements of the 3D spectral datacube. A concise schematic of the CASSI system is shown in Fig.~\ref{f1}. The spatial-spectral datacube of a scene is represented as $ \mathbf{X} \in \mathbb{R}^{M \times N \times L} $, where $ M, N $ denote spatial dimensions, and $ L $ denotes spectral dimension, respectively. The encoded datacube is acquired when the spatial information of the scene is first modulated by a coded aperture $ \mathbf{T} \in \mathbb{R}^{M \times N} $. Then, the encoded datacube is shifted along the horizontal way after passing through a prism. Next, the detector measures the coded shifted spectral datacube, where the spectral information is integrated along the spectral dimension leading to the 2D compressive measurement $ \mathbf{Y} \in \mathbb{R}^{M \times (N + L -1)} $.
The vectorized representation of the datacube and snapshot measurement is represented as $ \bm{x} = \text{vec}(\mathbf{X}) \in \mathbb{R}^{MNL \times 1} $ and $ \bm{y} = \text{vec}(\mathbf{Y}) \in \mathbb{R}^{M (N + L -1) \times 1} $ respectively. Then, the forward model of CASSI can be written in the following matrix form:
\begin{equation}
\bm{y} = \mathbf{H}\bm{x} + \bm{\omega},
\end{equation}\label{e1}
where $ \mathbf{H} \in \mathbb{R}^{M (N + L -1) \times MNL} $ and $ \bm{\omega} \in \mathbb{R}^{M \times (N + L -1)} $ denote the sensing matrix and the sensing/system
noise, respectively. The system matrix denotes the joint effects of the coded aperture and the prism. In order to further explain the system matrix. We take a specific scenario with two snapshots as an example, and we set the datacube of dimensions $ M = N = 6, L = 3 $, and the transmittance of the coded apertures follows a Bernoulli distribution at 50\%. Then, the structure of this system matrix is shown in Fig.~\ref{f2}. It can be observed that the matrix $ \mathbf{H} $ is sparse and highly structured, which provides robust conditions for CS reconstruction.
\begin{figure}[!t]
\centering\includegraphics[width=8cm]{system_m-eps-converted-to.pdf} \caption{The system matrix $ \mathbf{H} $. The figure depicts the sensing of three spectral bands and two snapshots.}
\label{f2}
\end{figure}
The goal in CASSI is to reconstruct HSI from the compressive measurements by solving the ill-posed
inverse problem.
Due to the sparsity property of HSI, it can be reconstructed according to the principle of CS. Suppose that the HSI can be sparsely represented on a orthonormal basis $ \mathbf{\Psi} $, i.e., $ \bm{x} = \mathbf{\Psi}\bm{\theta} $, where $ \bm{\theta} $ is composed of a small number of significant coefficients, referred to as the sparse coefficient vector of the HSI. The sparse basis $ \mathbf{\Psi} $ is often generated by the Kronecker product of a two-dimensional-wavelet Symmlet-8 basis in the $ M - N $ plane and a 1D discrete Fourier transform basis along the $ L- $asis.
According to the CS theory, HSI can be reconstructed by solving the $ l_1 $ minimization problem:
\begin{equation}\label{3-1}
\arg \underset{\bm{x}}{\min}~\bigg\{\frac{1}{2}\|\bm{y}-\textbf{H}\bm{x} \|_2^2 + \xi_1\|\bm{\theta}\|_1 \bigg\},
\end{equation}
where $ \xi_1 $ is the regularization parameter, and sparse coefficient vector can be calculated as $ \bm{\theta} = \mathbf{\Psi}^T \bm{x} $.
\section{Deep Image Prior}
Deep image priors were originally proposed for image restoration tasks such as image denoising, image super-resolution, image inpainting\cite{dip1}. It shows that a randomly-initialized neural network can be used as a handcrafted prior, which can get excellent results in many image restoration tasks. Given a degraded image, a good image reconstruction can be obtained after DIP training. According to DIP, HSI can be estimated by a neural network, i.e., $ \bm{x}_{DIP}=f_\lambda(\bm{z}) $, where $ \bm{x}_{DIP} $ is the optimized result by DIP, and $ \bm{z} $ is a fixed random code vector with the same spatial size as $ \bm{x}_{DIP} $, and $ \lambda $ represents the parameters of the neural network. According to (\ref{3-1}), CASSI reconstruction problem can be solved by DIP:
\begin{equation}\label{3-2}
\arg \underset{\lambda}{\min}~\frac{1}{2}\|\bm{y}-\textbf{H}f_\lambda(\bm{z}) \|_2^2 .
\end{equation}
However, the problem of CASSI reconstruction is ill-posed, and if the network is trained without intervention, it will easy enter into a local minimum. In addition, increasing the number of training of the network can be helpful for reconstruction, however it will increase the computational burden and unavoidably lead to local minimum. To address this problem, we design an alternating iterative algorithm that incorporates both sparsity prior and DIP.
Assuming that the output of a certain iteration is $ \bm{x} $, which can be used as the reference value of DIP.
Since the image is constrain in the sparse domain, the output of each iteration $ \bm{x} $ will gradually approach the optimal solution. Therefore, during DIP training, another fidelity term can be added, which is formulated as
\begin{equation}\label{3-3}
\arg \underset{\lambda}{\min}~\frac{1}{2}\|f_\lambda(\bm{z}) - \bm{x} \|_2^2 .
\end{equation}
The joint effect of (\ref{3-2}) and (\ref{3-3}) will enable DIP to denoise the output $ \bm{x} $. Due to the ``supervision'' of $ \bm{x} $, the network of DIP can get the best parameters without re-initializing, which greatly shortens the training time.
Combining (\ref{3-1}), (\ref{3-2}), (\ref{3-3}), the CASSI reconstruction problem can be expressed as
\begin{equation}\label{3-4}
\begin{aligned}
\arg \underset{\bm{x}, \lambda}{\min}~\bigg\{\frac{1}{2}\|\bm{y}-\textbf{H}\bm{x}\|_2^2 +\xi_1 \| \bm{\theta} \|_1 + \frac{1}{2} \|\bm{y}-\textbf{H} f_\lambda(\bm{z}) \|_2^2 &\\+ \frac{\eta}{2} \|f_\lambda(\bm{z})-\bm{x} \|_2^2 \bigg\},
\end{aligned}
\end{equation}
where $ \eta $ is the regularization parameter.
\section{Image reconstruction framework based on split Bregman algorithm}
\begin{figure*}[!t]
\centering\includegraphics[width=16cm]{network2-eps-converted-to.pdf} \caption{Schematic diagram of the neural network structure used for solving $ \textbf{Step 2} $. The input is progressively downsampled by factor of 2 at each scale (e.g. $ M_4 = M_1 / 8 $).}
\label{network}
\end{figure*}
In this section, we develop a fast alternating minimization algorithm based on split Bregman algorithm framework to solve for the HSI reconstruction problem in (\ref{3-4}), where the inverse problem is split into several sub-problems, and the sparse regularization term is replaced by the Bregman distance. Firstly, we introduce an auxiliary variable $ \bm{e} $ with the same dimension as $ \bm{y} $ to relax the $ l_2 $-norm by using the add-residual-back iterative scheme. To efficiently handle the non-differentiable norms, the arguments of the $ l_1 $-norm is replaced by the auxiliary variables $ \bm{c} = \bm{\theta} = \mathbf{\Psi}^T \bm{x} $. In addition, the add-residual-back iterative scheme is adopted to relax the $ l_1 $-norm into $ l_2 $-norm by introducing the auxiliary variable $ \bm{w} $, which indicates the difference between $ \bm{c} $ and $ \bm{\theta} $. Similarly, we introduce an auxiliary variable $ \bm{b} $ with the same dimension as $ \bm{x} $ to relax the $ l_2 $-norm. Finally, the CASSI reconstruction problem is modified to
\begin{equation}\label{4-1}
\begin{aligned}
\hat{\bm{x}} &= \arg \underset{\bm{x},\lambda,\bm{e},\bm{c},\bm{w},\bm{b}}{\min}~\bigg\{\frac{1}{2}\|\bm{y}-\textbf{H}\bm{x}+\bm{e} \|_2^2 + \frac{\xi}{2} \|\bm{c} - \bm{\theta} - \bm{w} \|_2^2 \\&+\xi_1 \| \bm{c} \|_1 + \frac{1}{2} \|\bm{y}-\textbf{H} f_\lambda(\bm{z}) \|_2^2 + \frac{\eta}{2} \|f_\lambda(\bm{z})-\bm{x}+\bm{b} \|_2^2 \bigg\},
\end{aligned}
\end{equation}
where $ \xi $ is a regularization parameter.
The solution of (\ref{4-1}) includes the following four steps.
$ \textbf{Step 1} $. Update the vector $ \bm{x} $ and the auxiliary variable $ \bm{e} $:
\begin{equation}\label{4-2}
\begin{aligned}
&\bm{x}^{n+1} = \arg \underset{\Delta\bm{x}}{\min}~\bigg\{\frac{1}{2}\|\bm{y}-\textbf{H}(\bm{x}^n+\Delta\bm{x})+\bm{e}^n \|_2^2 \\&+ \frac{\xi}{2} \|\bm{c}^n - \bm{\theta}^n - \bm{w}^n \|_2^2
+ \frac{\eta}{2} \|f_{\lambda^n}(\bm{z})-(\bm{x}^n+\Delta\bm{x})+\bm{b}^n \|_2^2 \bigg\},
\end{aligned}
\end{equation}
\begin{equation}\label{4-3}
\bm{e}^{n+1} = \bm{e}^n + \bm{y} - \textbf{H}\bm{x}^{n+1},
\end{equation}
where \textit{n} indicates the iteration number.
$ \textbf{Step 2} $. Update the parameters of neural network $ \lambda $ and the auxiliary variables $ \bm{b} $:
\begin{equation}\label{4-4}
\begin{aligned}
\lambda^{n+1} &= \arg \underset{\Delta\lambda}{\min}~\bigg\{\frac{1}{2} \|\bm{y}-\textbf{H} f_{(\lambda^n+\Delta\lambda)}(\bm{z}) \|_2^2 \\&+ \frac{\eta}{2} \|f_{(\lambda^n+\Delta\lambda)}(\bm{z})-\bm{x}^{n+1}+\bm{b}^{n} \|_2^2 \bigg\},
\end{aligned}
\end{equation}
\begin{equation}\label{4-5}
\bm{b}^{n+1} = \bm{b}^n + f_{\lambda^{n+1}}(\bm{z}) - \bm{x}^{n+1}.
\end{equation}
$ \textbf{Step 3} $. Update the auxiliary variables $ \bm{c} $ and $ \bm{w} $:
\begin{equation}\label{4-8-1}
\begin{aligned}
\bm{c}^{n+1} &= \arg \underset{\Delta\bm{c}}{\min}~\bigg\{\xi_1 \| (\bm{c}^n+\Delta\bm{c}) \|_1 \\&+ \frac{\xi}{2} \|(\bm{c}^n+\Delta\bm{c}) - \bm{\theta}^{n+1} - \bm{w}^n \|_2^2 \bigg\},
\end{aligned}
\end{equation}
\begin{equation}\label{4-9-1}
\bm{w}^{n+1} = \bm{w}^n + \bm{\theta}^{n+1} - \bm{c}^{n+1}.
\end{equation}
$ \textbf{Step 4} $. Return $\textbf{Step 1} $ until the algorithm converges or the maximum number of iterations is reached.
$ \textbf{Solve Step 1:} $ The quadratic optimization problem in (\ref{4-2}) has a closed-form solution formulated as:
\begin{equation}\label{4-7-1}
\begin{aligned}
\bm{x} &= (\textbf{H}^T\textbf{H} + \eta\textbf{I} + \xi\textbf{I})^{-1}[\textbf{H}^T(\bm{y}+\bm{e})+\eta(f_{\lambda}(\bm{z})+\bm{b})\\&+\xi\bm{\Psi}(\bm{c}-\bm{w})] .
\end{aligned}
\end{equation}
Due to fact that $ \textbf{H} $ is a fat matrix, the matrix inversion formula is employed to simplify the calculation by use of the Woodbury matrix identity:
\begin{equation}\label{4-7-2}
\begin{aligned}
(\textbf{H}^T\textbf{H} + \eta\textbf{I} + \xi\textbf{I})^{-1} &= (\eta + \xi)^{-1}\textbf{I}-(\eta + \xi)^{-1}\textbf{H}^T\\&(\textbf{I}+\textbf{H}(\eta + \xi)^{-1}\textbf{H}^T)^{-1}\textbf{H}(\eta + \xi)^{-1} .
\end{aligned}
\end{equation}
Plugging (\ref{4-7-2}) into (\ref{4-7-1}), the solution of $ \bm{x} $ can be obtained by
\begin{equation}\label{4-7-3}
\begin{split}
&\bm{\alpha} \overset{\rm{def}}{=} (\eta + \xi)^{-1}[\eta(f_{\lambda}(\bm{z})+\bm{b})+\xi\bm{\Psi}(\bm{c}-\bm{w})],
\\&\bm{x} = \bm{\alpha} + \textbf{H}^T(\bm{y}-\textbf{H}\bm{\alpha}+\bm{e}) \oslash (\rm{Diag}(\textbf{H}\textbf{H}^T)+\eta\textbf{I} + \xi\textbf{I}),
\end{split}
\end{equation}
where $ \oslash $ represents the operation of element-wise division, and $ \rm{Diag}() $ denotes the operation of extracting the diagonal elements.
$ \textbf{Solve Step 2:} $ For the implementation of DIP, we use a U-net \cite{U-net} without skip connections, which is a similar network structure as in \cite{PNP-DIP}. The schematic diagram of the neural network structure is shown in Fig.~\ref{network}.
The two quadratic optimization problems in (\ref{4-4}) can be equivalent to two loss functions of the neural network. The first loss function is to reduce the measurement error $ Loss_y = \left|\bm{y}-\textbf{H}f_\lambda(\bm{z}) \right| $ according to the projection measurement value. The second loss function denotes as $ Loss_x = \left|f_\lambda(\bm{z}) - \bm{x} \right| $, which can make the network output close to the reference value, so that the network jumps out of the local minimum. In addition, by restricting the output of the neural network to be close to $ \bm{x} $, the value of the auxiliary variable $ \bm{b} $ can also be minimized. Therefore, the loss function of the network is set as $ Loss = Loss_y + Loss_x $.
For simplicity, we do not add any balance weights between the two losses. In addition, $ \bm{z} $ generated by uniform noise is a fixed input variable of the neural network and of the same spatial size as $ \bm{x} $. Then, \textbf{Step 2} can be solved by training the neural network with back-propagation algorithm.
$ \textbf{Solve Step 3:} $
According to \cite{FIST,IST2}, the problem in (\ref{4-8-1}) can be solved by using the iterative soft thresholding algorithm, which can be formulated as
\begin{equation}\label{4-12}
\bm{c}^{n+1} = \text{Soft}\bigg\{\bm{c}^n - t\xi \times (\bm{c}^n-\bm{\theta}^{n+1}-\bm{w}^n), \frac{\xi_1}{\xi}\bigg\},
\end{equation}
where $ \xi_1 / \xi \geq 0 $, $ \bm{\theta}^{n+1} = \bm{\Psi}^T \bm{x}^{n+1} $, $ t $ is an appropriate stepsize, and $ \text{Soft}\{\cdot,\cdot\} $ is the soft-shrink operator. For an arbitrary vector $ \bm{v} \in \mathbb{R}^{N \times 1} $, the shrink operation is defined as
\begin{equation}\label{4-13}
\text{Soft}\bigg\{\bm{v},\frac{\xi_1}{\xi}\bigg\} = \text{sgn}(\bm{v})\odot\max\bigg\{ \left|\bm{v}\right|-\frac{\xi_1}{\xi}, 0 \bigg\},
\end{equation}
where $ \text{sgn}() $ is Sign function.
For simplicity, (\ref{4-12}) can be rewritten as
\begin{equation}\label{4-12-2}
\bm{c}^{n+1} = \text{Soft}\bigg\{(1-t') \times \bm{c}^n +t' \times (\bm{\theta}^{n+1}+\bm{w}^n), \frac{\xi_1}{\xi}\bigg\},
\end{equation}
where $ t'=t\xi $.
Following the abovementioned procedures, we have solved the above three steps, which can be solved efficiently by solving each sub-problem separately, leading to a stable solution.
\begin{figure*}[!t]
\centering\includegraphics[width=16cm]{dataset-eps-converted-to.pdf} \caption{Spectral data scenes from (a) ICVL and (b) KAIST data sets used in simulations. }
\label{f4-1}
\end{figure*}
\section{Results}
Extensive experiments are presented in this section to validate the performance of our proposed Fama-SDIP algorithm. First, we conduct ample simulations to compare Fama-SDIP with other competitive methods including supervised-based methods. Then we built the CASSI experimental platform and perform real experiments on the platform to verify our proposed algorithm. In the simulations and in the real experiments, only one snapshot is used. In our simulations and real experiments, the parameters are uniformly set as follows: $ \xi_1=10 $, $ \xi=8 $, $ \eta=10 $, and $ t'=0.95 $. The initial value of $ \bm{x} $ is set as $ \bm{x}=\mathbf{H}^T\bm{y} $, and the initial value of $ f_\lambda(\bm{z}) $ is set as $ f_\lambda(\bm{z}) = \mathbf{H}^T\bm{y} $. In order to verify the robustness of the proposed algorithm, we use the same neural network model in both simulations and real experiments. It is worth noting that in our experiments, there is only one-pixel shift between the neighboring spectral channels, because for higher shift steps, the system will have more noise and require more accurate modeling\cite{hmodel}.
In the training process of DIP, Adam\cite{Adam} optimizer $ (\beta_1=0.9$ and $\beta_2=0.999) $ is used. The learning rate is set to be 0.002, and the weight decay is set as $ 1\text{e}^{-4} $. In our implementation, the maximum iteration number of Fama-SDIP is set as 45, and the DIP inner loop is set as 100 times in each iteration.
In order to ensure the robustness of the algorithm, we should ensure that the initial $ Loss_y $ is close to the end $ Loss_y $ of the previous DIP output each time solve Step 2, otherwise we first optimize the network to make it close to the end $ Loss_y $ of the previous DIP output.
\subsection{Simulation Results}
The publicly available datasets ICVL\cite{ICVL} and KAIST\cite{KAIST} are employed for simulations. The ICVL data with spatial size $ 1392 \times 1300 $ and the KAIST data with spatial size $ 2704 \times 3376 $ have same 31 spectral bands, and both of their wavelengths are in the range from 400 to 700 nm at a step of 10 nm.
In each data set, we select eight scenes identical to those in \cite{PnP}. Then, we cropped the data set to spatial sizes of $ 256 \times 256 $ as shown in Fig~\ref{f4-1}.
We compare our proposed algorithm with other leading algorithms, including three traditional algorithms, i.e. GPSR\cite{GPSR}, TwIST\cite{TwIST}, GAP-TV\cite{GAP-TV}, two DL methods based on supervised learning, i.e. PnP-HSI\cite{PnP}, DGSMP\cite{DGSMP}, one DL methods based on unsupervised learning, i.e. PnP-DIP\cite{PNP-DIP}.
For the sake of fair comparison, we divide each data by its maximum value so that its image peak is 1, and we use the same real coded aperture as in\cite{DGSMP,PNP-DIP} to generate the compressive measurements.
We apply the peak signal to noise ratio (PSNR) and structural similarity (SSIM)\cite{ssim} as the objective quality metrics to evaluate the quality of reconstructed spectral datacube.
\begin{table*}[!t]
\caption{Quantitative results on 16 Simulation Scenes (8 from ICVL and 8 from KAIST). PSNR and SSIM are reported. \label{table1}}
\centering
\begin{tabular}{|c||c||c||c||c||c||c||c|}
\hline
Algorithms & GPSR & TwIST & GAP-TV & DGSMP & PnP-HSI & PnP-DIP & Proposed Fama-SDIP \\
\hline
Scene 1 & 27.16, 0.879 & 27.09, 0.879 & 28.95, 0.913 & 23.94, 0.825 & 29.36, 0.909 & 30.19, 0.915 & 34.74, 0.965 \\
\hline
Scene 2 & 23.04, 0.848 & 23.39, 0.851 & 25.50, 0.893 & 27.71, 0.923 & 26.96, 0.911 & 32.41, 0.948 & 36.13, 0.979 \\
\hline
Scene 3 & 25.79, 0.937 & 26.43, 0.941 & 38.67, 0.988 & 34.66, 0.962 & 38.83, 0.986 & 38.54, 0.981 & 43.36, 0.994 \\
\hline
Scene 4 & 26.20, 0.871 & 27.00, 0.880 & 29.28, 0.920 & 29.76, 0.924 & 29.87, 0.923 & 31.24, 0.929 & 33.99, 0.959 \\
\hline
Scene 5 & 21.14, 0.709 & 21.30, 0.716 & 22.71, 0.778 & 25.46, 0.872 & 23.44, 0.796 & 28.35, 0.903 & 30.48, 0.939 \\
\hline
Scene 6 & 21.55, 0.732 & 21.58, 0.732 & 23.94, 0.831 & 24.88, 0.858 & 24.78, 0.847 & 28.20, 0.894 & 32.48, 0.958 \\
\hline
Scene 7 & 26.32, 0.902 & 26.28, 0.901 & 28.72, 0.938 & 23.87, 0.777 & 29.91, 0.944 & 31.24, 0.929 & 35.03, 0.975\\
\hline
Scene 8 & 28.56, 0.895 & 29.51, 0.902 & 31.28, 0.930 & 30.38, 0.938 & 32.04, 0.937 & 34.87, 0.962 & 37.12, 0.978 \\
\hline
Scene 9 & 20.78, 0.766 & 22.41, 0.797 & 26.35, 0.907 & 27.29, 0.888 & 28.81, 0.939 & 30.65, 0.905 & 36.38, 0.977\\
\hline
Scene 10 & 24.66, 0.852 & 24.75, 0.854 & 28.09, 0.922 & 20.37, 0.653 & 28.03, 0.904 & 28.90, 0.911 & 32.45, 0.960\\
\hline
Scene 11 & 26.86, 0.887 & 27.05, 0.889 & 27.59, 0.926 & 30.44, 0.939 & 30.07, 0.951 & 32.37, 0.927 & 37.33, 0.980\\
\hline
Scene 12 & 21.70, 0.825 & 21.20, 0.706 & 23.63, 0.809 & 25.29, 0.836 & 24.39, 0.832 & 30.34, 0.932 & 32.95, 0.970\\
\hline
Scene 13 & 18.66, 0.711 & 19.45, 0.735 & 23.26, 0.857 & 23.44, 0.857 & 24.82, 0.881 & 30.82, 0.923 & 34.19, 0.974\\
\hline
Scene 14 & 24.17, 0.866 & 24.87, 0.877 & 27.36, 0.932 & 22.63, 0.760 & 28.02, 0.939 & 29.05, 0.926 & 32.13, 0.973\\
\hline
Scene 15 & 22.97, 0.805 & 23.30, 0.812 & 26.22, 0.904 & 25.73, 0.832 & 26.74, 0.918 & 29.75, 0.902 & 33.73, 0.972\\
\hline
Scene 16 & 19.03, 0.715 & 19.30, 0.725 & 19.16, 0.747 & 24.04, 0.839 & 20.59, 0.794 & 28.88, 0.921 & 32.38, 0.963\\
\hline
Average & 23.66, 0.825 & 24.06, 0.825 & 26.92, 0.887 & 26.23, 0.855 & 27.92, 0.901 & 30.99, 0.926 & 34.68, 0.970\\
\hline
\end{tabular}
\end{table*}
The performance comparisons on the sixteen benchmark scenes are given in Table \ref{table1}, using different algorithms, i.e., GPSR\cite{GPSR}, TwIST\cite{TwIST}, GAP-TV\cite{GAP-TV}, DGSMP\cite{DGSMP}, PnP-HSI\cite{PnP}, PnP-DIP\cite{PNP-DIP} and our proposed Fama-SDIP.
It can be seen that the PSNR and SSIM values of our proposed Fama-SDIP are much higher than other reconstruction algorithms.
Since there is just one-pixel shift between the neighboring spectral channels, the compression ratio is increased compared with the case of two-pixels shift, so the supervised learning method DGSMP has a significant decrease in the reconstruction accuracy. Compared with the unsupervised learning method PnP-DIP, the proposed Fama-SDIP shows an improvement of up to 3.6dB in average PSNR for reconstructions obtained.
Figure \ref{f4-2} plots selected reconstructed scenes of Fama-SDIP compared with GPSR, TwIST, GAP-TV, DGSMP, PnP-HSI and PnP-DIP.
\begin{figure*}[!t]
\centering\includegraphics[width=18cm]{result-eps-converted-to.pdf} \caption{Reconstructed simulation HSIs comparisons of Scene 7 and 9 with 4 out of 31 spectral channels.
The reconstructed spectral curves on selected regions are shown for comparing the spectral accuracy of different algorithms. The correlation of the reconstructed spectra is shown in the legends.}
\label{f4-2}
\end{figure*}
We can observe from the reconstructed HSIs and the magnified patches within the white boxes that previous methods are less favorable for recovering HSI details, and their reconstruction results all produce different degrees of image blur and artifacts. In contrast, Fama-SDIP produces sharper borders and better image details because the image is optimized towards the optimal solution under the bidirectional constraints of DIP and sparsity prior, leading to the state-of-the-art results on both PSNR and SSIM.
Furthermore, we also plot the reconstructed spectral curves of
two selected regions and calculate the correlations with the reference spectra. It can be seen that Fama-SDIP provides more accurate spectra.
\subsection{Running Time}
Table \ref{table2} compares the running times time on sixteen data of training the models and reconstructing the HSIs by the proposed Fama-SDIP method and other methods. All of the simulations are carried out on a computer with Intel
Core i7-8700K CPU, 16GB of RAM, and an Nvidia RTX 2080Ti GPU.
Although the proposed method dose not improve in the reconstruction time compared with the supervised learning method represented by DGSMP, it takes more than 10 days for DGSMP to train a model, which will be a great challenge for practical use. Compared to the unsupervised learning method represented by PnP-DIP, the proposed Fama-SDIP method can achieve more than 10-fold speedup and provides better results.
\begin{table}[!t]
\caption{Runtimes of training or reconstruction. \label{table2}}
\centering
\begin{tabular}{|c||c||c|}
\hline
Algorithms & Training & Reconstruction \\
\hline
GPSR & - & 29.07 min \\
\hline
TwIST & - & 31.79 min \\
\hline
GAP-TV & - & 2.32 min \\
\hline
DGSMP & 10 days & 0.13 sec \\
\hline
PnP-HSI & 3 days & 2.23min \\
\hline
PnP-DIP & - & 115.63 min \\
\hline
Fama-SDIP & - & 10.75 min \\
\hline
\end{tabular}
\end{table}
\subsection{Real data Results}
In this section, we apply the proposed Fama-SDIP algorithm into our real CASSI systems as shown in Fig.~\ref{f5-1}. The system includes a light source (Zolix GLORIA-X500A), an imaging lens (Thorlabs AC254-100-A-ML), bandpass filters (Daheng Optics GCC-300117 \& GCC-211002), a digital micromirror device (DMD) (Texas Instruments DLP9500), a relay lens (Edmund Optics \#45-762), a dispersive prism (double Amici prism designed in \cite{prism}), and a detector (Basler acA2040-90μm).
Note that the parameters we used in the real experiment are exactly the same as in the simulation.
In order to obtain robust reconstruction results, we can firstly use sparsity prior in the image reconstruction framework to obtain a result, which serves as a warm starting point for Fama-SDIP.
\begin{figure*}[!t]
\centering\includegraphics[width=15cm]{realsystem-eps-converted-to.pdf} \caption{Our proof-of-concept test bed of CASSI.}
\label{f5-1}
\end{figure*}
The spectral cube in our experiment contains 18 spectral channels with the spatial size of 512$ \times $512, and its wavelengths are in the range from 550nm to 652nm.
Since the data set in real scenes is difficult to obtain, the DGSMP algorithm is hard to implement. In addition, PnP-HSI algorithm is using the pretrained HSI denoising network on the simulation data, so it can be used in our comparative experiments. As shown in Fig~\ref{f5-2}, the reconstruction results of different algorithms are displayed. It can be seen that compared with other five algorithms, our reconstruction has less artifacts and less noise. Furthermore, our reconstruction is able to distinguish the spectral features of different channels, which are clearly displayed around the wavelength of 614nm. This also means that our proposed algorithm can better handle the ill-posed inverse reconstruction problem. It is worth noting that the reconstructed images of PnP-HSI are worse than that attained by GAP-TV, which shows that the algorithm based on supervised learning may not work in real scenes.
In addition, we select three regions to display the corresponding recovered spectral curves, and the results also show that our proposed algorithm has higher spectral accuracy.
\begin{figure*}[!t]
\centering\includegraphics[width=18cm]{compare_real-eps-converted-to.pdf} \caption{Reconstructed spectral images of real data.}
\label{f5-2}
\end{figure*}
\section{Conclusion}
This paper developed a fast alternating minimization algorithm for coded aperture snapshot spectral imaging. Through synergistically utilizing the sparsity and deep image priors, the ill-posed reconstruction problem can be solved by using split Bregman algorithm. The proposed method can effectively reconstruct HSI within a relatively short period of time and does not need any training dataset. We verified the effectiveness and robustness of the proposed algorithm in both of simulations and real experiments, in which we use the same set of parameters to achieve state-of-art results.
\section*{Acknowledgments}
The author would like to thank the China Scholarship Council (202106030517).
|
1,314,259,994,996 | arxiv |
\section{Introduction}
\input{intro}
\section{Stochastic volatility models}
\noindent We consider the following class of stochastic volatility models: asume that asset spot price $0 \leq S(t)< \infty$ and variance $0 \leq \sigma(t)< \infty$ follow two stochastic diffusive processes for $t\in[0,T]$,
\begin{subequations}
\label{eq:SVmodels}
\begin{align}
dS(t)&=\bar{\mu} S(t)dt + \sqrt{\sigma (t)} S(t) dW^{(1)}(t), \\
d\sigma(t)&=\widetilde{\kappa}(\sigma(t))^\alpha(\widetilde{\theta}-\sigma(t))dt + v(\sigma(t))^\beta dW^{(2)}(t) ,
\end{align}
\end{subequations}
which are characterised by two Brownian motions, $dW^{(1)}(t)$ and
$dW^{(2)}(t)$, with constant correlation parameter
$dW^{(1)}(t)dW^{(2)}(t)=\rho dt$.
The drift coefficient for stochastic asset returns is given by the mean return of the asset where $\bar{\mu}\in \mathbb{R}$ and the diffusion coefficient is given by $\sqrt{\sigma (t)}S(t)$.
The drift coefficient of the asset variance is given by
$\widetilde{\kappa}(\sigma(t))^\alpha(\widetilde{\theta}-\sigma(t))$,
where constants $\widetilde{\kappa} \geq 0$ and $\widetilde{\theta}
\geq 0$ are the mean reversion speed of $\sigma(t)$ and the long run
mean of $\sigma(t)$, respectively. The diffusion
coefficient is given by $v(\sigma(t))^\beta$ where constant $v\geq 0$
is the volatility of volatility. The
constant riskless interest rate is denoted by $r \geq 0.$
The constants $\alpha,\beta$
determine the stochastic volatility model used.
The class of stochastic volatility models \eqref{eq:SVmodels} includes
a number of known stochastic volatility models:
The most prominent stochastic volatility model, the \textit{Heston
model} \cite{Hes93} (also called \textit{square root (SQR) model}) specifies the variance by
$$d\sigma(t) = \widetilde{\kappa} \left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sqrt{\sigma(t)} {\rm d}W^{(2)}(t).$$
Other known stochastic volatility models include the \textit{GARCH}
(or \textit{VAR model}) model, see \cite{Duan95}, where the stochastic variance is modelled by
$$d\sigma(t) = \widetilde{\kappa} \left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sigma(t) {\rm d}W^{(2)}(t),$$
and the \textit{3/2 model} \cite{Lewis00} in which the variance follows the process
$$d\sigma(t) = \widetilde{\kappa} \left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sigma^{\frac{3}{2}}(t) {\rm d}W^{(2)}(t).$$
All of the three stochastic volatility models mentioned above use a linear mean-reverting drift for the stochastic process of the variance $v(t)$,
but there are also models, in which the drift is mean reverting in a
non-linear fashion.
Following \cite{ChJaMi08}, we denote these models with an additional ``N'':
in the \textit{SQRN model} the stochastic variance follows
$$dv = \widetilde{\kappa} \sigma(t)\left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sqrt{\sigma(t)} {\rm d}W^{(2)}(t) ,$$
in the \textit{VARN model}
$$dv = \widetilde{\kappa} \sigma(t)\left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sigma(t) {\rm d}W^{(2)}(t) ,$$
and in the \textit{$3/2$-N model}
$$dv = \widetilde{\kappa} \sigma(t)\left(\widetilde{\theta} -\sigma(t) \right) {\rm d}t + v \sigma^{\frac{3}{2}}(t) {\rm d}W^{(2)}(t) ,$$
see \cite{ChJaMi08}.
Applying standards arbitrage arguments and Ito’s lemma to the
class of stochastic volatility models \eqref{eq:SVmodels}, we can
derive the following second order partial differential equation for
any financial derivative $V(S,\sigma,t)$, to be solved backwards in
time with $0 < S < \infty $, $0 < \sigma < \infty$, $t \in [0,T)$:
\begin{equation}
\label{PDE}
V_{t} + \frac{S^2\sigma}{2}V_{SS} + \rho v \sigma^{\beta+\frac{1}{2}} S V_{S\sigma} + \frac{v^2\sigma^{2\beta}}{2}V_{\sigma\sigma} + rSV_{s} +[\widetilde{\kappa}\sigma^{\alpha}(\widetilde{\theta} - \sigma) - \lambda(S,\sigma, t)]V_{\sigma} -rV = 0 .
\end{equation}
Here, $\lambda (S,\sigma ,t) $ is the market price of volatility
risk which is usually assumed to be proportional to the variance:
$\lambda (S,\sigma,t) = \lambda_0 \sigma(t) $, where $\lambda_0 \in
\mathbb{R}$. The boundary conditions and final condition are
determined by the type of financial derivative $V(S,\sigma,t)$ we are
solving for. For example, in the case of the European Put Option:
\begin{align*}
V(S,\sigma, T) &= \max(E-S,0), & 0<&S<\infty, \; 0<\sigma<\infty,\\
\lim_{S\to\infty}V(S,\sigma, t) &= 0, & 0<&\sigma<\infty, \; 0< t<T,\\
V(0,\sigma, t) &= E\exp(-r(T-t)), & 0<&\sigma<\infty, \; 0< t<T,\\
\lim_{\sigma\to\infty}V_{\sigma}(S,\sigma, t) &= 0, & 0<&S<\infty,\; 0< t<T,\\
\end{align*}
The remaining boundary condition at $\sigma=0$ can be obtained by looking at
the formal limit $\sigma\to 0$ in \eqref{PDE}, i.e.,
\begin{equation}
V_t+rSV_S+\kappa^*\theta^* V_\sigma-rV= 0,\quad T> t\geq 0,\;S>0,\; \text{as } \sigma\to 0.
\label{boundary3}
\end{equation}
This boundary condition is used frequently, e.g.\ in \cite{IkoToi07,ZvFoVe98}.
Alternatively, one can use a homogeneous Neumann condition
\cite{ClaPar99}, i.e.,
\begin{equation}
V_{\sigma}(S,0,t) = 0, \quad 0<S<\infty, \; 0 < t<T.
\end{equation}
\medskip
\noindent By using a change of variables:
$$ x=\ln\frac{S}{E},\hskip10pt y = \frac{\sigma}{v}, \hskip10pt \tau=T-t, \hskip8pt u = \exp(r\tau)\frac{V}{E}, \hskip10pt \kappa = \widetilde{\kappa}+\lambda_{0}, \hskip10pt \theta = \frac{\widetilde{\kappa}\widetilde{\theta}}{\widetilde{\kappa}+\lambda_{0}}, $$
we transform the partial differential equation to an convection-diffusion equation in two spatial dimensions with a mixed derivative term. The transformed partial differential equation and boundary/initial conditions are now satisfied by $u(x,y,\tau)$, where $x \in \mathbb{R}$, $y >0$, $\tau \in (0,T]$:
\begin{equation} \label{PDEtransf}
u_{\tau}=\frac{vy}{2}u_{xx}+\frac{(vy)^{2\beta}}{2}u_{yy} + \rho (v
y)^{\beta+\frac{1}{2}} u _{xy} + \Big(r-\frac{vy}{2}\Big)u_{x} +
\kappa(vy)^{\alpha}\frac{\theta - vy}{v}u_y,
\end{equation}
\begin{subequations}
\begin{align}
u(x,y,0) &= \max(1-\exp(x),0), & -\infty<&x<\infty, 0<y<\infty, \\
\lim_{x\to\infty}u(x,y,\tau) &= 0 , & 0<&y<\infty, 0\leq \tau<T ,\\
\lim_{x\to -\infty}u(x,y,\tau) &= 1 , & 0<&y<\infty, 0\leq
\tau<T ,\\
\label{bcymax}
\lim_{y\to \infty}u_{y}(x,y,\tau)&=0 , & -\infty<&x<\infty, 0<
\tau\leq T,\\
\label{bcymin}
\lim_{y\to 0}u_{y}(x,y,\tau)&=0 , & -\infty<&x<\infty, 0< \tau\leq T.
\end{align}
\end{subequations}
In order to discretise the problem and solve numerically, we truncate our spatial boundaries to finite values. Take $ L_{1} \leq x \leq K_{1} $, where $ L_{1} < K_{1}$, and $ L_{2} \leq y \leq K_{2} $, where $0<L_2<K_{2}$, so that the spatial domain forms a closed rectangle in $\mathbb{R}^2$ of $M \times N$ points with uniform spacing of $\Delta_{x}$ in the $x$-direction and $\Delta_{y}$ in the $y$-direction:
$$x_{i} = L_{1} +(i-1)\Delta_{x},\; i=1,2,\ldots, M, \hspace{20pt} y_{j} = L_2 + (j-1)\Delta_{y},\; j=1,2,\ldots, N.$$
The lower $y$-boundary is truncated to $ L_{2}> 0$ to ensure
non-degeneracy of the partial differential equation for all values of $y$. We also take a uniform partition of $\tau\in [0,T]$ into $P$ points such that $\tau_{k} = (k-1)\Delta_{\tau}$, where $k = 1,2,\ldots,P$. We denote the discrete approximation of $u((i-1)\Delta_x,(j-1)\Delta_y,(k-1)\Delta_\tau)$ by $u^{k}_{i,j}$ and $U^n=(u_{i,j}^n)_{i,j}$.
\section{Hundsdorfer-Verwer ADI splitting scheme}
\label{sec:HV}
\noindent We consider the Alternating Direction Implicit (ADI)
time-stepping numerical method proposed by Hundsdorfer and Verwer
\cite{Hund02, Verwer}. Our partial differential equation \eqref{PDEtransf}
takes the form $u_{\tau} =F(u)$. We employ the splitting $F(u) =
F_{0}(u)+F_{1}(u)+F_{2}(u) $ where unidirectional and mixed
derivative differential operators are given by:
\begin{equation}\label{splitting}
F_{0}(u) = \rho (v y)^{\beta+\frac{1}{2}} u _{xy}, \; F_{1}(u) =
\frac{vy}{2}u_{xx}+\Big(r-\frac{vy}{2}\Big)u_{x}, \; F_{2}(u)=
\frac{(vy)^{2\beta}}{2}u_{yy} + \kappa(vy)^{\alpha}\frac{\theta -
vy}{v}u_y .
\end{equation}
We consider \eqref{PDEtransf} with the splitting \eqref{splitting}
and look for a semi-discrete approximation $U^n
\approx u(\tau_n)$ at time $n\Delta_{\tau}$.
Given an approximation $U^{n-1}$we
can calculate an approximation for $U^{n}$ at time
$n\Delta_{\tau}$ using the differential operators from
\eqref{splitting}:
\begin{subequations}
\label{HVscheme}
\begin{align}
Y_0 &= U^{n-1}+\Delta_{t}F(U^{n-1}), \\
Y_1 &= Y_0 + \phi \Delta_{t}(F_{1}(Y_1)-F_{1}(U^{n-1})), \\
Y_2 &= Y_1 + \phi \Delta_{t}(F_{2}(Y_2)-F_{2}(U^{n-1})),\\
\widetilde{Y}_0 &= Y_0 + \psi \Delta_{t}(F(Y_2)-F(U^{n-1})), \\
\widetilde{Y}_1 &= \widetilde{Y}_0+\phi \Delta_{t}(F_{1}(\widetilde{Y}_1)-F_{1}(Y_2)),\\
\widetilde{Y}_2 & = \widetilde{Y}_1+\phi \Delta_{t}(F_{2}(\widetilde{Y}_2)-F_{2}
(Y_2)),\\
U^n &= \widetilde{Y}_2.
\end{align}
\end{subequations}
The parameter $\psi$ is taken to be $\psi=1/2$ to ensure
second-order accuracy in time. The choice of $\phi$ is discussed in
\cite{Hund02}. Typically it is fixed to $\phi=1/2$. Larger values give
stronger damping of the implicit terms while lower values return better accuracy.
The first and fourth step in \eqref{HVscheme} can be solved explicitly, while the
remaining steps are solved implicitly. Our aim is to derive high-order
spatial discretisations of the differential operators. Following
\cite{DuFoRi13} we combine high-order compact finite difference
methods for the implicit steps with a (classical, non-compact)
high-order stencil for the explicit steps.
\section{High-order compact scheme for implicit steps}
\label{sec:HOC}
\noindent For $F_{1}(u)$, consider
\begin{equation}\label{F1}
F_{1}(u) = \frac{vy}{2}u_{xx}+\Big(r-\frac{vy}{2}\Big)u_{x} = g
\end{equation}
with arbitrary right hand side $g$. We wish to derive a fourth-order
accurate in space approximation for \eqref{F1} which can be used to solve
the implicit second and fifth step in \eqref{HVscheme}.
Using standard second-order central difference operators and Taylor's expansion, we have:
\begin{align}
\label{ux}
u_x(x_i,y_j) &= \delta_{x0} u_{i,j}
-\frac{\Delta_x^2}{6}u_{xxx}(x_i,y_j)+\mathcal{O}(\Delta_x^4)
\\
\label{uxx}
u_{xx}(x_i,y_j) &= \delta_x^2 u_{i,j} -\frac{\Delta_x^2}{12}u_{xxxx}(x_i,y_j)+\mathcal{O}(\Delta_x^4)
\end{align}
where
$$\delta_{x0} u_{i,j}=\frac{u_{i+1,j} - u_{i-1,j}}{2\Delta_x}\text{
and } \delta_x^2 u_{i,j} = \frac{u_{i+1,j} - 2u_{i,j} +
u_{i-1,j}}{\Delta^{2}_x}.$$
If we can find second-order accurate expressions for $u_{xxx}$ and
$u_{xxxx}$ using only information on the compact stencil, then it will
be possible to approximate $u_x$ and $u_{xx}$ with fourth order
accuracy on the compact stencil. By differentiating \eqref{F1} once
and twice with respect to $x$, respectively, it is possible to express $u_{xxx}$ and $u_{xxxx}$ in terms of first- and second-order derivatives of $u$ and $g$ with respect to $x$:
\begin{align}
\label{uxxx}
u_{xxx}&= \frac{2}{vy}g_x+\Big(1-\frac{2r}{vy}\Big)u_{xx},\\
\label{uxxxx}
u_{xxxx} &= \frac{2}{vy}g_{xx} +\Big(1-\frac{2r}{vy}\Big)\Big[\frac{2}{vy}g_x+\Big(1-\frac{2r}{vy}\Big)u_{xx}\Big].
\end{align}
By substituting standard second-order central difference operators into $\eqref{uxxx}$ and $\eqref{uxxxx}$ we obtain second-order accurate in space approximations for $u_{xxx}$ and $u_{xxxx}$:
\begin{align}
\label{uxxxdiscr}
u_{xxx}(x_i,y_j) &= \frac{2}{vy_j}\delta_{x0}
(g_{i,j})_x+\Big(1-\frac{2r}{vy_j}\Big)\delta_x^2
u_{i,j}+O(\Delta_x^2), \\
\label{uxxxxdiscr}
u_{xxxx}(x_i,y_j) &= \frac{2}{vy_j}\delta_x^2 g_{i,j} +\Big(1-\frac{2r}{vy_j}\Big)\Big[\frac{2}{vy_j}\delta_{x0} g_{i,j}+\Big(1-\frac{2r}{vy_j}\Big)\delta_x^2 u_{i,j}\Big]+\mathcal{O}(\Delta_x^2).
\end{align}
Substituting \eqref{uxxxdiscr} and \eqref{uxxxxdiscr} into \eqref{ux} and \eqref{uxx}, respectively, yields:
\begin{align}
u_x(x_i,y_j) &= \delta_{x0} u_{i,j} -\frac{\Delta_x^2}{6}\Big[\frac{2}{vy_j}\delta_{x0} (g_{i,j})_x+\Big(1-\frac{2r}{vy_j}\Big)\delta_x^2 u_{i,j}\Big]+\mathcal{O}(\Delta_x^4), \\
u_{xx}(x_i,y_j) &= \delta_x^2 u_{i,j} -\frac{\Delta_x^2}{12}\Bigg[\frac{2}{vy_j}\delta_x^2 g_{i,j} +\Big(1-\frac{2r}{vy_j}\Big)\Big[\frac{2}{vy_j}\delta_{x0} g_{i,j}+\Big(1-\frac{2r}{vy_j}\Big)\delta_x^2 u_{i,j}\Big]\Bigg]+\mathcal{O}(\Delta_x^4 ).
\end{align}
Substituting these fourth-order approximations for $u_x$ and $u_{xx}$
into \eqref{F1} and rearranging the equation such that all derivatives of $u$ with respect to $x$ are on the left hand side and all derivatives of $g$ with respect to $x$ are on the right hand side we obtain a fourth-order compact scheme for \eqref{F1}:
\begin{multline}
\label{F1HOC}
\Big(\frac{vy_j}{2}-\frac{-v^2 y_j^2 \Delta_x^2+4rvy_j\Delta_x^2-4r^2\Delta_x^2}{24vy_j}\Big)\delta_x^2 u_{i,j}+\Big(r-\frac{vy_j}{2}\Big)\delta_{x0} u_{i,j} \\ = g_{i,j}
+ \frac{-2vy_j\Delta_x^2+4r\Delta_x^2}{24vy_j}\delta_{x0} g_{i,j}+\frac{\Delta_x^2}{12}\delta_x^2 g_{i,j} .
\end{multline}
Finally, substituting the expressions for the difference operators
$\delta_{x0}$, $\delta_x^2$ into \eqref{F1HOC} and separating the terms into values of $u$ and $g$ at the three horizontally adjacent nodal points in space, we get:
\begin{multline}
\label{F1HOCnodal}
\frac{v^2y_j^2\Delta_x^2-4rvy_j\Delta_x^2-6v^2y_j^2\Delta_x+4r^2\Delta_x^2+12rvy_j\Delta_x+12v^2y_j^2}{24vy_j\Delta_x^2}{\it
u_{i+1,j}} \\ -
\frac{v^2y_j^2\Delta_x^2-4rvy_j\Delta_x^2+4r^2\Delta_x^2+12v^2y_j^2}{12vy_j\Delta_x^2}{\it
u_{i,j}} \\+
\frac{v^2y_j^2\Delta_x^2-4rvy_j\Delta_x^2+6v^2y_j^2\delta_x+4r^2\Delta_x^2-12rvy_j\Delta_x+12v^2y_j^2}{24vy_j\Delta_x^2}{\it
u_{i-1,j}}\\ = \frac{-vy_j\Delta_x+2r\Delta_x+2vy_j}{24vy_j}{\it
g_{i+1,j}}+\frac{5}{6}{\it
g_{i,j}}-\frac{-vy_j\Delta_x+2r\Delta_x-2vy_j}{24vy_j}{\it g_{i-1,j}}
\end{multline}
Equation \eqref{F1HOCnodal} defines a fourth-order compact
approximation for \eqref{F1HOCnodal}. In other words, we have a system of equations
which defines a fourth-order accurate approximation for \eqref{F1HOCnodal} at any
point on the inner grid of the spatial domain (all points of the
spatial domain except those that lie on the $x$ and $y$
boundaries). To approximate \eqref{F1HOCnodal} at points along the $x$ boundaries
of the inner grid of the spatial domain, we will require a
contribution from the Dirichlet values at the $x$-boundaries of the
spatial domain. We collect these separately in a vector
$d$. Details on the boundary conditions are given in Section~\ref{sec:bc}.
The linear system to be solved can be written in matrix form:
$$A_{x}{{u}} = B_{x}{{g}} + d,$$
where ${{u}} = (u_{2,2}, u_{2,3}, \ldots, u_{N-1,M-1})$,
${{g}} = (g_{2,2}, g_{2,3}, \ldots, g_{N-1,M-1})$. The coefficient matrices $A_x$ and $B_x$ are block diagonal matrices, with the following structure:
\begin{equation*} {A_x} = \left[ {\begin{array}{cccc}
A_x^{1,1}& 0 & 0 & 0\\
0 & A_x^{2,2} & 0 & 0\\
0 & 0 & \ddots & 0 \\
0 &0 &0 &A_x^{N-2,N-2}
\end{array}} \right],\quad
{B_x} = \left[ {\begin{array}{cccc}
B_x^{1,1}& 0 & 0 & 0\\
0 & B_x^{2,2} & 0 & 0\\
0 & 0 & \ddots & 0 \\
0 &0 &0 &B_x^{N-2,N-2}
\end{array}} \right],
\end{equation*}
where each $A_x^{j,j} = \mathrm{diag}[a_{-1}^{j,j},a_0^{j,j},a_1^{j,j}]$ and $B_x^{j,j} = \mathrm{diag}[b_{-1}^{j,j},b_0^{j,j},b_1^{j,j}]$ are tri-diagonal matrices.
\medskip
Let us consider now the case of $F_{2}$:
\begin{equation}
\label{F2}
F_{2} (u)= \frac{(vy)^{2\beta}}{2}u_{yy} + \kappa(vy)^{\alpha}\frac{\theta - vy}{v}u_y=g .
\end{equation}
Due to the appearance of $y$ terms in the coefficients of $F_2(u)$,
the algebraic complexity in deriving a fourth-order accurate scheme in
space is much greater. By Taylor's expansions we obtain:
\begin{align}
\label{uy}
u_y\left( x_{i},y_{j} \right)
=\delta _{y_{0}}u_{i,j}-\frac{{\Delta_y}^{2
}}{6}u_{yyy} \left( x_{i},y_{j
} \right) +\mathcal{O} (\Delta_y^{4}) , \\
\label{uyy}
u_{yy}\left( x_{{i}},y_{{j}
} \right) ={\delta^{2}_{y}}u_{{i,j}}-\frac{{\Delta_y}^{2}}{12}u_{yyyy} \left( x
_{{i}},y_{{j}} \right) +\mathcal{O} (\Delta_y^{4}).
\end{align}
We wish to find second order accurate approximations for $u_{yyy}$ and
$u_{yyyy}$ on the compact stencil in order to find fourth-order
accurate expressions for $u_y$ and $u_{yy}$. Re-arranging \eqref{F2},
we get:
$$
u_{yy} = \frac{2}{(vy)^{2\beta}}\Big(-\kappa(vy)^{\alpha}\frac{(\theta - vy)}{v}u_y+g\Big).
$$
Via repeated applications of the chain rule, second-order accurate approximations for $u_{yyy}(x_i,y_j)$ and $u_{yyyy}(x_i,y_j)$ are given by:
\begin{multline}
\label{uyyy}
u_{yyy}(x_i,y_j) = \frac
{\left( 2 \left( vy_j \right) ^{\alpha}\alpha kvy_j-2 \left( vy_j
\right) ^{\alpha}\theta\alpha k+2 \left( vy_j \right) ^{\alpha}kvy_j
\right) }{
\left( vy_j \right) ^{2\beta} vy_j} \delta_{y0}u_{i,j} \\
+\frac { ( 2 \left( vy_j \right) ^{\alpha} kv {y_j}^{2} - 2 \left( vy_j \right)^{2\beta} \beta v-2 \left( vy_j \right) ^{\alpha}\theta k y_j ) } {\left(vy_j\right)^{2\beta}vy_j} \delta^2_y u_{i,j} +\frac {2}{ \left(vy_j\right)^{2\beta}}\delta_{y0}g_{i,j}+\mathcal{O}(\Delta^2_y),
\end{multline}
\begin{multline}
\label{uyyyy}
u_{yyyy} ( x_i,y_j) = \left( {\frac {2(2 \left( vy_j \right) ^{\alpha}kv{y_j}^{2}-2
\left(vy_j\right)^{2\beta}\beta v-2 \left( vy_j
\right) ^{\alpha}\theta ky_j)}{\left(vy_j\right)^{4\beta}vy_j}}-{\frac {4\beta}{ \left(vy_j\right)^{2\beta}y_j}} \right) \delta_{y0}g_{i,j} \\ + \Bigg( {\frac {1}{ \left(vy_j\right)^{2\beta}vy_j} ( 2 \left( vy_j \right) ^{\alpha}{\alpha}^{2}kv+
4 \left( vy_j \right) ^{\alpha}\alpha kv-{\frac { 2\left( vy_j
\right) ^{\alpha}{\alpha}^{2}\theta k}{y_j}}+2 \left( vy_j \right) ^{
\alpha}kv ) } \\ -{\frac {2 \beta \left( 2 \left( vy_j \right) ^{\alpha}
\alpha kvy_j-2 \left( vy_j \right) ^{\alpha}\theta \alpha k+2
\left( vy_j \right) ^{\alpha}kvy_j \right) }{\left(vy_j\right)^{2\beta}v{y_j}^{2}}}-{\frac {2 \left( vy_j
\right) ^{\alpha}\alpha kvy_j-2 \left( vy_j \right) ^{\alpha}\theta
\alpha k+2 \left( vy_j \right) ^{\alpha}kvy_j}{\left(vy_j\right)^{2\beta}v{y_j}^{2}}} \\ +{\frac { ( 2 \left( vy_j
\right) ^{\alpha}kv{y_j}^{2}-2\left(vy_j\right)^{2\beta}\beta v-2 \left( vy_j \right) ^{\alpha}\theta ky_j
) \left( 2 \left( vy_j \right) ^{\alpha}\alpha kvy_j-2
\left( vy_j \right) ^{\alpha}\theta \alpha k+2 \left( vy_j \right) ^{
\alpha}kvy_j \right) }{ \left(vy_j\right)^{4\beta}{
v}^{2}{y_j}^{2}}} \Bigg) \delta_{y0}u_{i,j} \\ + \Bigg( {\frac {2 \left( vy_j \right) ^{\alpha}\alpha kvy_j-2
\left( vy_j \right) ^{\alpha}\theta \alpha k+2 \left( vy_j \right)
^{\alpha}kvy_j}{ \left(vy_j\right)^{2\beta}vy_j}} \\ + {
\frac {1}{ \left(vy_j\right)^{2\beta}vy_j} ( 2
\left( vy_j \right) ^{\alpha}\alpha kvy_j+4 \left( vy_j \right) ^{
\alpha}kvy_j -4 {\frac { \left(vy_j\right)^{2\beta}
{\beta}^{2}v}{y_j}}-2 \left( vy_j \right) ^{\alpha}\theta \alpha k-2
\left( vy_j \right) ^{\alpha}\theta k ) } \\ - {\frac {2\beta ( 2
\left( vy_j \right) ^{\alpha}kv{y_j}^{2}-2 \left(vy_j\right)^{2\beta}\beta v-2 \left( vy_j \right) ^{\alpha}\theta ky_j
)}{ \left(vy_j\right)^{2\beta}v{y_j}^{
2}}}-{\frac {2 \left( vy_j \right) ^{\alpha}kv{y_j}^{2}-2 \left(vy_j\right)^{2\beta}\beta v-2 \left( vy_j
\right) ^{\alpha}\theta ky_j}{ \left(vy_j\right)^{2\beta}v{y_j}^{2}}} \\ + {\frac { ( 2 \left( vy_j \right) ^{\alpha
}kv{y_j}^{2}-2\left(vy_j\right)^{2\beta}\beta v
-2 \left( vy_j \right) ^{\alpha}\theta ky_j ) ^{2}}{ \left(vy_j\right)^{4\beta}{v}^{2}{y_j}^{2}}} \Bigg) \delta^2_y u_{i,j}+{
\frac {2}{ \left(vy_j\right)^{2\beta}}}\delta^2_y g_{i,j}+\mathcal{O}(\Delta^2_y).
\end{multline}
where $\delta_{y0}$ and $\delta_y^2$ denote the standard second-order
central difference operators.
Substituting \eqref{uyyy} and \eqref{uyyyy} into \eqref{uy} and
\eqref{uyy}, respectively, yields fourth-order accurate
approximations (not given here) for $u_y(x_i,y_j)$ and $u_{yy}(x_i,y_j)$ on the compact
stencil. By substituting these fourth-order accurate approximations
into \eqref{F2} and separating the $u$ and $g$ terms onto the left and
right hand sides, respectively, we obtain a linear system which can be represented in matrix form: $$A_{y}{{u}} = B_{y}{{g}}$$
where ${{u}} = (u_{2,2}, u_{2,3}, \ldots, u_{N-1,M-1})$,
${{g}} = (g_{2,2}, g_{2,3}, \ldots, g_{N-1,M-1})$.
We do not impose any boundary conditions in $y$-direction, but
discretise the boundary grid points with the same scheme, and handle
resulting ghost points via extrapolation; details on the boundary
conditions are given in Section~\ref{sec:bc}.
The coefficient matrices $A_y$ and $B_y$ are block tri-diagonal matrices with the following structures:
\begin{center}
\begin{eqnarray*}
\mathbf{A_y} = \left[\begin{array}{ccccc}
A_y^{1,1} & A_y^{1,2} & 0 & 0 & 0 \\
A_y^{2,1} & A_y^{2,2} & A_y^{2,3} & 0 & 0\\
0 & \ddots & \ddots & \ddots & 0 \\
0 & 0 & A_y^{N-3,N-4} & A_y^{N-3,N-3} & A_y^{N-3,N-2} \\ 0 & 0&0 & A_y^{N-2,N-3} & A_y^{N-2,N-2}
\end{array}\right], \\ \mathbf{B_y} = \left[\begin{array}{ccccc}
B_y^{1,1} & B_y^{1,2} & 0 & 0 & 0 \\
B_y^{2,1} & B_y^{2,2} & B_y^{2,3} & 0 & 0\\
0 & \ddots & \ddots & \ddots & 0 \\
0 & 0 & B_y^{N-3,N-4} & B_y^{N-3,N-3} & B_y^{N-3,N-2} \\ 0 & 0&0 & B_y^{N-2,N-3} & B_y^{N-2,N-2}
\end{array}\right],
\end{eqnarray*}
\end{center}
where each $A_y^{j,j} = \mathrm{diag}[a^{i,j}]$ and $B_y^{j,j} =
\mathrm{diag}[b^{i,j}]$ are diagonal matrices, with values on these diagonals given as follows:
\begin{multline}
a^{i,j\pm 1}=\frac{1}{2{\Delta_y}^2} (vy_j)^{2\beta}-\frac{1}{12(vy_j)^{2\beta}
{v}^{2}{y_j}^{2}}\Big(-2 ( vy_j )^{2\alpha} {\kappa}^{2}{v}^{2}{y_j}^{4} +2 (vy_j)^{2\beta+\alpha} \alpha \kappa{v}^{2}{y_j}^{2} \\
-2 (vy_j)^{2\beta+\alpha}\beta \kappa{v}^{2}{y_j}^{2}+4 ( vy_j ) ^{2\alpha} \theta{k}^{2}v{y_j}^{3} +2 (vy_j)^{4\beta}{\beta}^{2}{v}^{2} -2 (vy_j)^{2\beta+\alpha} \theta\alpha \kappa v y_j \\
+2 (vy_j)^{2\beta+\alpha} \theta\beta \kappa v y_j +2 (vy_j)^{2\beta+\alpha}\kappa{v}^{2}{y_j}^{2} -2 ( vy_j ) ^{2\alpha} {\theta}^{2}{\kappa}^{2}{y_j}^{2}+ (vy_j)^{4\beta}\beta{v}^{2}\Big) \\
\pm\Bigg( \frac {- ( vy_j )^{\alpha}\kappa v y_j+ ( vy_j ) ^{\alpha}\theta \kappa}{2v\Delta_y} -\frac{1}{24\Delta_y\beta^2( vy_j)^4}\Big(-2 ( vy_j ) ^{2
\alpha}\alpha{\kappa}^{2}{v}^{2}{y_j}^{3}{\Delta_y}^{2}\\
+ (vy_j)^{2\beta} ( vy_j ) ^{\alpha}{\alpha}^{2}\kappa{v}^{2}y_j{\Delta_y}^{2} -4 (vy_j)^{2\beta+\alpha} \alpha\beta \kappa{v}^{2}y_j{\Delta_y}^{2}+4(vy_j)^{2\alpha}\theta\alpha{\kappa}^{2}v{y_j}^{2}{\Delta_y}^{2}\\
- 2 ( vy_j )^{2\alpha} {\kappa}^{2}{v}^{2}{y_j}^{3}{\Delta_y}^{2}- ( vy_j )^{2\beta+\alpha} \theta{\alpha}^{2}\kappa v{\Delta_y}^{2}+4 ( vy_j )^{2\beta+\alpha}
\theta\alpha\beta \kappa v{\Delta_y}^{2}\\
+ ( vy_j )^{2\beta+\alpha}\alpha \kappa{v}^{2}y_j{\Delta_y}^{2} -4 ( vy_j )^{2\beta+\alpha}\beta
\kappa{v}^{2}y_j{\Delta_y}^{2}-2 ( vy_j )^{2\alpha}{\theta}^{2}\alpha{\kappa}^{2}y_j{\Delta_y}^{2}\\
+2 ( vy_j )^{2\alpha}\theta{\kappa}^{2}v{y_j}^{2}{\Delta_y}^{2}+ ( vy_j )^{2\beta+\alpha}\theta\alpha \kappa v{\Delta_y}^{2}\Big)\Bigg),
\end{multline}
\begin{multline}
a^{i,j}= \frac{1}{6( vy_j ) ^ {2\beta+2}}\Big( -2 ( vy_j ) ^ {2\alpha}{k}^{2}{v}^{2}{y_j}^{4}+2 ( vy_j ) ^ {2\beta+\alpha}\alpha k{v}^{2}{y_j}^{
2} +2 ( vy_j ) ^ {4\beta}{
\beta}^{2}{v}^{2}\\ -2 ( vy_j ) ^ {2\beta+\alpha}\beta k{v}^{2}{y_j}^{2} +4
( vy_j ) ^ {2\alpha}\theta{k}^{2}v{y_j}^{
3} -2 ( vy_j ) ^ {2\beta+\alpha} \theta\alpha k v y_j + ( vy_j ) ^ {4\beta}\beta{v}^{2} \\ +2 ( vy_j ) ^ {2\beta+\alpha}\theta\beta k v y_j+2 ( vy_j ) ^ {2\beta+\alpha}k{v}^{2}{y_j
}^{2} - 2 ( vy_j ) ^ {2\alpha}{\theta}^{2}{k}^{2}{y_j}^{2} - 6
( vy_j ) ^ {4\beta+2} )\Big),
\end{multline}
\begin{equation}
b^{i,j\pm 1}= \pm{\frac {-2 ( vy_j ) ^{\alpha}k{v}^{2}{y_j}^{3}{
\Delta_y}^{2}-4 ( vy_j ) ^ {2\beta}\beta
{v}^{2}y_j{\Delta_y}^{2}+2 ( vy_j ) ^{\alpha}\theta kv{y_j}^
{2}{\Delta_y}^{2}}{ 24( vy_j ) ^ {2\beta+2}\Delta_y}}+\frac{1}{12},\quad b^{i,j}=\frac{5}{6}.
\end{equation}
\section{High-order scheme for explicit steps}
\label{sec:HOexpl}
\noindent The first and fourth steps of the ADI scheme
\eqref{HVscheme} operate only on previous approximations to explicitly
calculate an updated approximation. The differential operator in these
steps takes the form of the right hand side of \eqref{PDEtransf}. For
the mixed derivative term it seems not to be possible to
exploit the structure of the differential operator to
obtain a fourth-order approximation on a compact computational
stencil. Hence, in order to maintain fourth-order accuracy of the
scheme in the explicit steps of \eqref{HVscheme}, the derivatives in
each differential operator $F_0$, $F_1$ and $F_2$ are approximated
using classical, fourth-order central difference operators which
operate on a larger $5\times 5$-stencil in the spatial domain.
For $F_{1}(u) = \frac{vy}{2}u_{xx}-(\frac{vy}{2}-r)u_{x}$, we have the following scheme:
\begin{multline*}
\Big[ \frac{vy}{2} \frac{\partial^2 u}{\partial x^2}+\Big (r-\frac{vy}{2}\Big)\frac{\partial u}{\partial x}\Big]_{i,j} = \Big (\frac{2r-vy_j}{24\Delta_x}-\frac{vy_j}{24\Delta_x^2}\Big)u_{i,j-2} + \Big (\frac{8vy_j-16r}{24\Delta_x}+\frac{16vy_j}{24\Delta_x^2}\Big)u_{i,j-1} \\ - \frac{30vy_j}{24\Delta_x^2}u_{i,j} + \Big (\frac{16r-8vy_j}{24\Delta_x}+\frac{16vy_j}{24\Delta_x^2}\Big)u_{i,j+1} + \Big (\frac{vy_j-2r}{24\Delta_x}-\frac{vy_j}{24\Delta_x^2}\Big)u_{i,j+2} + \mathcal{O}(\Delta^4_x).
\end{multline*}
For $F_{2}(u)=\frac{(vy)^{2\beta}}{2}u_{yy}+\frac{\kappa(vy)^{\alpha}(\theta-vy)}{v}u_{y}$, we have:
\begin{multline*}
\Big[\frac{(vy)^{2\beta}}{2}\frac{\partial^2 u}{\partial y^2}+\frac{\kappa(vy)^{\alpha}(\theta-vy)}{v}\frac{\partial u}{\partial y}\Big]_{i,j} = \Big (-\frac{\kappa(vy_j)^{\alpha}(\theta-vy_j)}{12v\Delta_y}-\frac{(vy_j)^{2\beta}}{24\Delta_y^2}\Big)u_{i-2,j} \\
+ \Big (\frac{8\kappa(vy_j)^{\alpha}(\theta-vy_j)}{12v\Delta_y}+\frac{16(vy_j)^{2\beta}}{24\Delta_y^2}\Big)u_{i-1,j} - \frac{30(vy_j)^{2\beta}}{24\Delta_y^2}u_{i,j} \\
+ \Big (-\frac{8\kappa(vy_j)^{\alpha}(\theta-vy_j)}{12v\Delta_y}+\frac{(vy_j)^{2\beta}}{24\Delta_y^2}\Big)u_{i+1,j} + \Big (\frac{\kappa(vy_j)^{\alpha}(\theta-vy_j)}{12v\Delta_y}-\frac{(vy_j)^{2\beta}}{24\Delta_y^2}\Big)u_{i+2,j} + \mathcal{O}(\Delta^4_y ).
\end{multline*}
Finally, for the mixed derivative term $F_0 = \rho (v y)^{\beta+\frac{1}{2}}u_{xy} $, the following computational stencil is used:
\begin{multline*}
\Big[\rho (v y)^{\beta+\frac{1}{2}} \frac{\partial^2 u}{\partial x \partial
y}\Big]_{i,j} = -\frac{64\rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-1,j-1}
+ \frac{64\rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-1,j+1}
+ \frac{64\rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i+1,j-1} \\
- \frac{64\rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i+1,j+1}
- \frac{\rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-2,j-2}
+ \frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-2,j-1}
- \frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-2,j+1}\\
+ \frac{ \rho (v y_j)^{\beta+\frac{1}{2}}}{144 \Delta_x\Delta_y}u_{i-2,j+2} +
\frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144 \Delta_x\Delta_y}u_{i-1,j-2}
- \frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i-1,j+2}
- \frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i+1,j-2} \\
+\frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144 \Delta_x\Delta_y}u_{i+1,j+2}
+ \frac{ \rho (v y_j)^{\beta+\frac{1}{2}}}{144 \Delta_x\Delta_y}u_{i+2,j-2}
-\frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i+2,j-1}
+\frac{8 \rho (v y_j)^{\beta+\frac{1}{2}}}{144\Delta_x\Delta_y}u_{i+2,j+1} \\
- \frac{ \rho (v y_j)^{\beta+\frac{1}{2}}}{144 \Delta_x\Delta_y}u_{i+2,j+2} +
\mathcal{O}(\Delta^2_x\Delta^2_y).
\end{multline*}
Using these fourth-order approximations, the first and fourth step in
\eqref{HVscheme} can be computed directly. The values at the spatial
boundaries for each solution of the ADI scheme are determined by the
boundary conditions, the computational stencil is required for all
remaining points in the spatial domain. For the explicit steps, the
$5\times 5$-point computational stencil exceeds the spatial boundary
when we wish to approximate differential operator $F(u)$ at any point
along the boundary of the spatial domain's inner grid. For example if
we wish to evaluate $F(u_{2,2})$, we will require contributions from
ghost points which fall outside the spatial domain, as marked by
bullet points in Figure~\ref{fig:ghostpoints}.
\begin{figure}
$$\begin{array}{c|ccccc}
\bullet & \mathrm{u_{4,1}} & u_{4,2} & u_{4,3} & u_{4,4} \\
\bullet & \mathrm{u_{3,1}} & u_{3,2} & u_{3,3} & u_{3,4} \\
\bullet & \mathrm{u_{2,1}} & {{u_{2,2}}} & u_{2,3} & u_{2,4} \\
\bullet & \mathrm{u_{1,1}} & \mathrm{u_{1,2}} & \mathrm{u_{1,3}} & \mathrm{u_{1,4}} \\
\hline
\odot & \circ & \circ & \circ & \circ
\end{array}$$
\caption{Example: evaluation of $F(u_{2,2})$ using the $5\times 5$-point computational stencil in the lower left
corner of the computational domain; ghost points outside the
computational domain at which values are
extrapolated from the interior of the domain are marked by bullets ($\bullet$,$\circ$,$\odot$),
grid points on the boundary are set in Roman.}
\label{fig:ghostpoints}
\end{figure}
We extrapolate information from grid points $u(x_i,y_j)$, where $i =
1,\ldots ,M-1,$ $j = 1,\ldots, N-1$ to establish values at these ghost
points for the purpose of evaluating the differential operator $F(u)$ at any point along the boundary of the inner grid of the spatial domain. To calculate the values at these ghost points, we use the following five-point extrapolation formulae for three cases:
\begin{align*}
& x=L_1\text{ boundary }(\bullet)\text{ :} &u_{i,0} &= 5u_{i,1} - 10u_{i,2} + 10 u_{i,3} -5u_{i,4} + u_{i,5} + \mathcal{O}(\Delta^6_x) ,\\
& y=L_2\text{ boundary }(\circ)\text{ :} &u_{0,j} &= 5u_{1,j} - 10u_{2,j} + 10 u_{3,j} -5u_{4,j} + u_{5,j} + \mathcal{O}(\Delta^6_y),\\
& x=L_1, y=L_2\text{ corner }(\odot)\text{ :} &u_{0,0} &= 5u_{1,1} - 10u_{2,2}+ 10 u_{3,3} -5u_{4,4} + u_{5,5} +\mathcal{O}(\Delta^3_x\Delta^3_y).
\end{align*}
The extrapolation at the $x=K_1$ and $y=K_2$ boundaries
and the remaining three corners is handled analogously.
\section{Solving the high-order ADI scheme}
\label{sec:HOsolve}
\noindent Starting from a given $U^{n-1}$, the ADI
scheme \eqref{HVscheme} involves six approximation steps to obtain $U^{n}$, the
solution at the next time level. The first approximation $Y_0$ can be
solved for explicitly using the $5\times 5$-point computational
stencil derived in Section~\ref{sec:HOexpl}. The second approximation
for our solution, denoted by $Y_1$, has to be solved for implicitly:
\begin{align}
\label{stepximpl}
Y_1 =& Y_0 + \phi{\Delta_{t}} (F_{1}(Y_1)-F_{1}(U^{n-1}))\quad
\Longleftrightarrow \quad F_1(Y_1-U^{n-1})= \frac1{\phi\Delta_t}(Y_1-Y_0).
\end{align}
We apply the fourth-order compact scheme established in
Section~\ref{sec:HOC} to solve \eqref{stepximpl}. In matrix form we obtain
$$A_x(Y_1-U^{n-1}) = B_x\Big(\frac{1}{\phi\Delta_t}(Y_1-Y_0)\Big)+ d.$$
Collecting unknown $Y_1$ terms on the left hand side and known terms
$Y_0$, $U^{n-1}$ and $d$ on the right hand side we get
$$\left(B_x-\phi\Delta_t A_x\right)Y_1 = B_{x}Y_0 - \phi\Delta_tA_{x}U^{n-1}- \phi\Delta_td.$$
To solve, we invert the tri-diagonal matrix
$\left(B_x-\phi\Delta_t A_x\right)$.
For the third step of the ADI scheme, we proceed analogously, and use
the the high-order compact scheme presented in Section~\ref{sec:HOC}
to solve for $Y_2$ implicitly. The fourth, fifth and sixth step of the
ADI scheme are performed analogously as the first, second
and third steps, respectively.
Note that the matrix $\left(B_x-\phi\Delta_t A_x\right)$ appears
twice in the scheme \eqref{HVscheme}, in the second and fifth step. Similarly,
$\left(B_y-\phi\Delta_tA_y\right)$ appears in the third and the sixth step. Hence, using LU-factorisation, only two matrix inversions are
necessary in each time step of scheme \eqref{HVscheme}.
Moreover, since the coefficients in the partial differential equation
\eqref{PDEtransf} do not depend on time, and the matrices are therefore
constant, they can be LU-factorised before iterating in time to obtain
a highly efficient algorithm.
The combination of the fourth-order spatial discretisation presented in
Section~\ref{sec:HOC} and \ref{sec:HOexpl} with the second-order time
splitting \eqref{HVscheme} yields a high-order ADI scheme with order of consistency two in time and four in space.
\section{Boundary conditions}
\label{sec:bc}
\noindent For the case of the Dirichlet conditions at $x=L_1$ and
$x=K_1$ we impose
\begin{align*}
u(L_1,y_j,\tau_k)&= 1-e^{r\tau+L_1}, & j&=1,2,\ldots,N,\;k=1,2,\ldots
, \\
u(K_1,y_j,\tau_k)&= 0, & j&=1,2,\ldots,N,\;k=1,2,\ldots.
\end{align*}
Using the homogeneous Neumann conditions \eqref{bcymax} and \eqref{bcymin} which are correct in
the limit $y\to \infty$ and $y\to 0$, respectively, at the (finite)
boundaries $y=L_2>0$ and $y=K_2$ would result in a dominant error along
these boundaries. Hence, we do not impose any boundary condition at
these two boundaries but discretise the partial derivative using the computational stencil
from the interior.
The values of the unknown on the boundaries are set by extrapolation
from values in the interior. This introduces a numerical error, and it
needs to be considered that the order of extrapolation should be high
enough not to affect the overall order of accuracy. We refer to Gustafsson \cite{GusBC} to discuss the influence of the
order of the approximation on the global convergence rate.
We use the following extrapolation formulae:
\begin{align*}
& u^k_{i,1} = 5u^k_{i,2} -10u^k_{i,3} +10u^k_{i,4} -5u^k_{i,5}+u^k_{i,6} +\mathcal{O}(\Delta^6_y),\\ & u^k_{i,N} = 5u^k_{i,N-1} -10u^k_{i,N-2} +10u^k_{i,N-3} -5u^k_{i,N-4} +u^k_{i,N-5} +\mathcal{O}(\Delta^6_y).
\end{align*}
\section{Numerical experiments}
\label{sec:num}
\noindent In this section we report the results of our numerical
experiments. We estimate the numerical convergence order of the
high-order ADI scheme and then
perform additional experiments to validate its stability.
\subsection{Numerical convergence}
\noindent We perform a numerical study to compute the order of
convergence of the high-order ADI scheme.
Since the initial condition for the option pricing problem,
the payoff function $V(S,\sigma, T)$, is non-smooth at $S=E$, we
cannot in general expect to observe high-order convergence \cite{KrThWi70}.
A straightforward way to smooth the initial condition is to choose the
mesh in such a way that the non-smooth point of the initial condition is
not a point of the mesh. The construction of such a mesh is always
possible in a simple manner. Following this approach, the non-smooth payoff can be directly considered in our
scheme and, indeed, we observe high-order numerical convergence.
Alternatively, suitable smoothing operators can be employed to
achieve a similar effect, see \cite{KrThWi70,DuHe15}.
For convenience, we choose an equally sized space step $h = \Delta_x =
\Delta_y,$ creating an evenly-spaced mesh both horizontally and
vertically. We set the parameter $\phi=0.5$ in \eqref{HVscheme} in all
numerical experiments.
Figure~\ref{fig:1} shows the numerical
solution for the European option price at time $T=0.5$ using the
parameters from Table~\ref{defaultparams}.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{finalsurf}
\caption{Numerical solution for price of European Put Option at $T=0.5$}
\label{fig:1}
\end{figure}
\begin{table}
\begin{center}
\begin{tabular}[c]{l c}
\toprule
Parameter & Value \\
\midrule
Strike price & $E=100$ \\
Time to maturity & $T = 0.5$ \\
Interest rate & $r=0.05$ \\
Volatility of volatility & $v = 0.1$ \\
Mean reversion speed & $\kappa = 2$\\
Long run mean of volatility & $\theta = 0.1$ \\
Correlation & $\rho = -0.5$ \\
Parabolic mesh ratio & $\gamma = 0.5$ \\
Stochastic volatility drift parameter & $\alpha = 0$ \\
Stochastic volatility diffusion parameter & $\beta = 0.5$ \\
\bottomrule
\end{tabular}
\caption{Default input parameters for numerical experiments}
\label{defaultparams}
\end{center}
\end{table}
We compute the $l_2$-norm error $\varepsilon_2$ and the maximum norm
error $\varepsilon_\infty$ of the numerical solution with respect to a
numerical reference solution on a fine grid. We fix the parabolic mesh ratio $\gamma=\Delta_t/h^2$ to
a constant value which is natural for parabolic partial differential
equations as \eqref{PDEtransf}.
Then, asymptotically, we expect these
errors to converge as $\varepsilon = Ch^m$
for some $m$ and $C$ representing constants. This implies
$\ln(\varepsilon) = \ln(C) + m \ln(h) .$
Hence, the double-logarithmic plot $\varepsilon$ against $h$ should be
asymptotic to a straight line with slope $m$. This gives a method for
experimentally determining the order of the scheme. We expect to observe a numerical convergence rate of approximately order $\mathcal{O}(h^4)$ in space.
For comparison, we conduct additional
experiments using a standard, second-order ADI scheme based on
\eqref{HVscheme} combined with a
second-order central difference discretisation in space.
Figure~\ref{fig:2} shows the double logarithmic plot of
$l_\infty$-error versus space step $h=\Delta_y=\Delta_x$.
We observe that the numerical convergence order agrees well with the
theoretical order of the schemes.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{compareinf}
\caption{$l_{\infty}$-error comparison of the high-order ADI scheme
with standard second-order in space ADI scheme for various values of
the correlation parameter $\rho$}
\label{fig:2}
\end{figure}
In all cases, the high-order ADI scheme outperforms the standard
second-order ADI scheme for a given mesh width $h$. Or in other words,
to realise a
chosen level of accuracy we could use a coarser grid
for the high-order ADI scheme than the standard second-order scheme which translates into solving smaller linear systems and therefore is more computationally efficient.
\subsection{Numerical stability analysis}
\noindent In this section we investigate whether there are any stability
restrictions on the choice of the time-step $\Delta_t$ for the
high-order ADI scheme.
Unlike for standard second-order schemes, the algebraic complexity of
the numerical stability analysis of high-order compact schemes is
very high since the established stability
notions imply formidable algebraic problems for high-order compact
schemes. As a result, there are only few stability results for
high-order compact schemes in the literature \cite{DuHe15,DuFo12,FournieRigal}. This is even more
pronounced in higher spatial dimensions, as most of the existing
studies with analytical stability results for high-order compact
schemes are limited to a one-dimensional setting.
For diffusion
equations (without convection) with mixed derivative terms and
constant coefficients, a stability analysis of the ADI method
\eqref{HVscheme} with standard second-order discretisation in space
\cite{HouWel07} revealed it to be unconditionally stable. The analysis
in \cite{HouWel07} is based on studying the stability for a simplified, linear test
equation which implies the assumption that
all involved discretisation matrices are normal and commuting.
The discretisation matrices of high-order compact schemes generally do
not fulfil these assumptions and, hence, in the present case with
non-constant coefficients, the situation is much more involved. A
thorough stability analysis is therefore beyond the scope of the
present paper. Instead, to validate the stability of the scheme, we perform
additional numerical stability tests. We remark that in our numerical
experiments we observe stable behaviour throughout.
We compute numerical solutions for varying values of the parabolic
mesh ratio $\gamma=\Delta_t/h^2$ and the mesh width $h$. Plotting the
associated $l_2$-norm errors in the plane should allow us to detect
stability restrictions depending on $\gamma$ or oscillations that
occur for high cell Reynolds numbers (large $h$). This approach for a
numerical stability study was also used in \cite{DuFo12,DuFoHe14}.
We give results for the European Put option using the parameter from Table~\ref{defaultparams}. For our stability plots
we use $\gamma=k/10$ with $k=2,\ldots,10,$ and a descending sequence
of spatial grid points.
Figure~\ref{fig:3} shows the stability plots for the correlation
parameter $\rho = 0$ and $\rho = -0.5$.
\begin{figure}
\centering
\includegraphics[scale = 0.45]{contourplotrho0}%
\includegraphics[scale = 0.45]{contourplotrho05}
\caption{Contour plot of the $l_2$-error for $\rho=0$ (left) and
$\rho=-0.5$ (right) against parabolic mesh ratio $\gamma = {\Delta_{\tau}}/{h^2}$ and mesh width $h$}
\label{fig:3}
\end{figure}
We observe that
the influence of the parabolic mesh ratio $\gamma$ on the $l_2$-error
is only marginal and the relative error does not exceed $5 \times 10^{-3}$ as a
value for both stability plots. We can infer that there does not seem
to be a stability condition on $\gamma$ for either situation. For increasing
values of $h$, which also result in a higher cell Reynolds number, the
error grows gradually, and no oscillations in the numerical solutions
occur.
These observations are confirmed by additional numerical convergence
tests for varying parabolic mesh ratio $\gamma$. The numerical
convergence orders reported in Table~\ref{tab:1} show that the
numerical convergence order for the high-order scheme, measured both
in the $l_2$-norm and $l_{\infty}$-norm is very close to four, and
does not depend on the parabolic mesh ratio $\gamma$.
\begin{table}
\centering
\begin{tabular}[l]{ l c c c c c}
\toprule
$\gamma=\Delta_t/h^2$ & 0.2 & 0.4 & 0.6 & 0.8 & 1.0 \\
\midrule
HO-ADI $l_2$-error & 3.8871 & 3.8870 & 3.8868 & 3.8866 & 3.8864 \\
Standard ADI $l_2$-error & 2.4521 & 2.4519 & 2.4517 & 2.4514 & 2.4510 \\
HO-ADI $l_{\infty}$-error & 3.8960 & 3.8961 & 3.8961 & 3.8962 & 3.8964\\
Standard ADI $l_{\infty}$-error & 1.9744 & 1.9744 & 1.9744 & 1.9743 & 1.9742\\
\bottomrule
\end{tabular}
\caption{Numerical convergence order in space for varying parabolic mesh ratio $\gamma=\Delta_t/h^2$}
\label{tab:1}
\end{table}
\section{Conclusion}
\label{sec:conc}
\noindent By combining fourth-order (compact and non-compact) finite difference
schemes in space with Hundsdorfer and Verwer's second-order ADI time-stepping scheme, we have constructed a new numerical method for solving option pricing problems for stochastic volatility models. Numerical experiments for approximating the price of a European Put option using Heston’s stochastic volatility model with generic parameters confirm the numerical convergence of the scheme in space and time while the results for a range of parabolic mesh ratios suggest unconditional stability.
\section*{Acknowledgement}
\noindent BD acknowledges partial support by the Leverhulme Trust research project grant `Novel discretisations for higher-order nonlinear PDE' (RPG-2015-69).
JM has been supported in part by a studentship under the EPSRC Doctoral Training Partnership (DTP) scheme.
|
1,314,259,994,997 | arxiv | \section{Introduction}
Starting with the purpose of understanding the random motion of Brownian particles, diffusive phenomena
have been received great attention for a long time in the statistical physics as well as in recent various fields
such as human geographical \cite{hm,hm1,hm2,hm3}, hydrological \cite{river,hjkim},
biophysical \cite{bio1,bio2,bio3,bio4,bio5,bio6},
economic systems\cite{econo1,econo2}, and so on. The Brownian motion
follows the Fokker-Planck equation (FPE) well known as the diffusion equation for the probability density
function (PDF) from which the mean-squared displacement (MSD) is linearly dependent
on time, $\langle x_t ^2 \rangle = 2D_0 t$ where $D_0$ is the constant diffusion coefficient.
This Brownian process is well described by a stationary Markovian model known as random walk \cite{rw,ks}.
However recent studies
report that MSD shows the nonlinear behavior rather than the linear behavior for time \cite{godec,the1,the2,swarm,exp1,exp2}.
The MSD following the power-law behavior, $\langle x^2 (t) \rangle \sim t^{2H}$ characterizes
anomalous diffusion, where $H$ is called as the Hurst exponent which classifies superdiffusion
($ H > 1/2 $) in which the past and future random variables are positively correlated and thus
persistence is exhibited, and subdiffusion ($ 0 < H < 1/2 $) which behaves in the opposite way,
showing antipersistence.
Efforts to describe mechanisms underlying anomalous diffusions have been tried through
representative stochastic models such as fractional Brownian motion (fBM)
where long-ranged temporal correlation between steps is given and the Hurst exponent ranges from 0
to 1 \cite{fbm}, L\'{e}vy walk model that describes well superdiffusions by drawing a step length from the distribution with a heavy
power-law tail and keeping a constant speed for a random time \cite{levir,lrtlw,leviwalk},
continuous time random walks (CTRW) with the power-law distribution of time intervals for a step showing subdiffusions \cite{rw,ctrw},
and scaled Brownian motion(sBM) which is described by a diffusion equation with explicitly
time-dependent diffusion coefficient \cite{sbm,sbm1}.
These models show the non-stationarity or non-Markovianity which are responsible for the anomalous
diffusive behaviors. The fBM is non-Markovian but stationary, and the L\'{e}vy walk, the CTRW and the sBM
are semi-Markovian but non-stationary.
In addition, the stochastic models with the memory of whole previous trajectory in a walk process
mimicking the movements of animals such as elephants \cite{pre70} and monkeys \cite{boyer},
has been introduced
and it is known that memorizing the history of a process which make a process be non-Markovian and non-stationary
plays a key role in generating the long-term correlations between steps resulting in anomalous diffusions
\cite{pre70,boyer, mm1,mm2}.
However, memorizing whole history of previous steps is not easy and plausible except for
some specific cases, rather it is much more acceptable to consider short-term memory like
remembering just the immediate step.
Although it did not start from the perspective of the short-term memory,
it was already considered in the
persistent random walk model \cite{goldstein} in which a step follows the previous step with a constant probability,
resulting in a movement to the same direction that the walker was moving.
Also it is known that such a process does not follow the diffusion equation but the telegrapher equation (TE) \cite{tele}
which has an additional second order time derivative term of the PDF to the diffusion equation which introduces
wave equation property and is related to ballistic motion of the diffusion particles, but
asymptotically reduces to normal diffusive behavior \cite{goldstein,ftele}. That is, although the telegrapher process has
advantages in describing ballistic motion in early stages and is applicable to the diverse diffusion and
transport phenomena \cite{ftele, tele1,tele2}, eventually,
it is a stationary normal diffusive process and not sufficient to explain property of nonstationary
movements resulting in diverse diffusions appeared in nature .
The nonstationary movements of living organisms are natural in making a adaptation for the various types of
temporal stimuli coming from their natural environments\cite{ecoli}.
In particular, in kinetics of eukaryotic cells under temporal chemotatic or mechanotactic signaling,
it has been studied that cells response directly by changing their motion depending on temporal stimuli
\cite{ts1,ts2,ts3}.
That is, to respond to such complex temporal stimuli a walker may move to the opposite direction to a previous step in a momemt,
or conversely, strengthen movements to the same direction. Thus memorizing previous steps can change with time
and we
modelize it with a time-varying replication probability which controls the degree of following the immediate step at the next step.
Namely, a non-stationary persistent random walk model is introduced and a generalized TE
with time-dependent coefficients is derived. We also calculate the relations between the time-dependent coefficients and the replication
probability and thus show that explicit time dependence of the probability could produces long-term correlation between steps
which results in diverse diffusions deviated from a normal diffusive behavior.
\section{Non-stationary Markovian replication model}
We consider a walker on a one dimensional homogeneous lattice with the uniform spacing $l$ between the
neighboring sites. With a regular time interval denoted as $\tau$,
the walker moves to one of the two neighboring sites.
We denote the walker's position at time $t$ as $x_t$, and the step walker takes as
$\sigma_t$, which is defined by the relation
\begin{equation}
x_{t}=x_{t-\tau}+\sigma_{t}.
\label{xs}
\end{equation}
The walker is initially at the origin.
Details of the model is determined by a time-varying probability $\alpha(t)$ which controls
the dynamics of the process,
\begin{equation}
\sigma_{t} = \left \{
\begin{array}{ll}
\sigma_{t-\tau}, & \quad \text{with a probability $\alpha(t)$}\\
-\sigma_{t-\tau}, & \quad \text{with a probability $1-\alpha(t)$.}
\end{array} \right.
\label{erule}
\end{equation}
The first step($\sigma_\tau$) is randomly chosen between the two possibilities $\pm l$
with the equal probabilities $1/2$.
Successive steps and positions at $t \geq 2\tau$ are determined by Eq.~(\ref{xs}) and Eq. (\ref{erule}).
We note that the process defined as above is symmetric about the origin.
At each time $t$, a step $\sigma_t$ replicates or anti-replicates the latest step $\sigma_{t-\tau}$.
Since the next step is completely determined by the immediate step, this replication process is Markovian,
while the probability of replication, $\alpha(t)$, varies with time in general. Even if such a non-stationary nature is present,
Markovianity of the step process makes the process analytically tractable.
In terms of kinematics, anti-replication of the latest step corresponds to change of direction of motion,
and when $\alpha(t)$ is constant, the model reduces to the persistent random walk model \cite{goldstein}.
From the perspective of memorizing a trajectory
and concerning possible applications of the model not only to the diffusion processes in real space but also
to the analysis of general two states time series, we prefer looking Eq. (\ref{erule}) as a replication-antireplication
process rather than just alternating direction of motion. Thus, we call $\alpha(t)$ and the process~(\ref{erule})
the replication probability and the non-stationary Markovian replication process (NMRP), respectively.
\section{Time Evolution of the PDF and the MSD}
Now, we derive the time evolution of the probability distribution of the displacement $P(x,t)$ for the NMRP model, which
is the probability that the walker's position is $x$ at time $t$,
starting from the relation
\begin{equation}
P(x,t)=\sum_{x_{t-\tau},x_{t-2\tau}}^{} P(x,t|x_{t-\tau},x_{t-2\tau})P(x_{t-\tau},x_{t-2\tau}).
\label{mestart}
\end{equation}
Here, $P(x_{t-\tau},x_{t-2\tau})$ is the probability that the walker's positions
at times $t-\tau$ and $t-2\tau$ are $x_{t-\tau}$ and $x_{t-2\tau}$, respectively, and $P(x,t|x_{t-\tau},x_{t-2\tau})$ is
the second order transition probability, which is a conditional probability that
the walker's position is $x$ at time $t$ given the two previous positions.
The summation runs over all lattice sites.
Because of the Markovianity of the step process (\ref{erule}),
it is possible to use the second order transition probability which
is determined in terms of only $\sigma_{t}$ and $\sigma_{t-\tau}$ at time $t$ and is expressed by
\begin{equation}
\begin{split}
P(x,t|x_{t-\tau},x_{t-2\tau})=
\{ \delta_{\sigma_{t-\tau},l}+\delta_{\sigma_{t-\tau},-l}\} \\
\times \{ \alpha(t)\delta_{\sigma_{t},\sigma_{t-\tau}}
+[1-\alpha(t)]\delta_{\sigma_{t},-\sigma_{t-\tau}}\}.
\end{split}
\label{transition}
\end{equation}
Terms in the first bracket depict the two possible choices for the step $\sigma_{t-\tau}$, and
the other two terms in the second bracket indicate the probabilities that the replication or anti-replication occur at time $t$, respectively.
By substituting the Eq. (\ref{transition}) into the Eq. (\ref{mestart}),
the time evolution of $P(x,t)$ is described by the following master equation,
\begin{equation}
\begin{split}
P(x,t)=&\alpha(t) \{P(x+l,t-\tau)+P(x-l,t-\tau) \} \\
& + \{ 1 - 2\alpha(t) \} P(x,t-2\tau),
\end{split}
\label{me}
\end{equation}
which is valid for $t \geq 2\tau$.
If $\alpha(t)={1 / 2}$, the last term on the right hand side
vanishes, and Eq.~(\ref{me}) reduces to that of the normal random walk with symmetric probabilities.
Next, we take the continuum limit, considering the position and the time approximately as continuous variables. Expanding the master equation Eq. (\ref{me}) into a Taylor series keeping the lowest non-vanishing order terms, the time evolution of the PDF $\rho(x,t)$ in the continuum limit is obtained as follows,
\begin{equation}
{{\partial \rho(x,t)} \over {\partial t}} + \mathcal{R}(t) {{\partial ^ 2 \rho(x,t)} \over {\partial t^2}}\\
= \mathcal{D}(t) {{\partial ^ 2 \rho(x,t)} \over {\partial x^2}},
\label{fpe}
\end{equation}
where
\begin{equation}
\mathcal{R}(t)={\tau \over 2} \left[ {{3\alpha(t) -2} \over {1-\alpha(t)}} \right]
\label{rt}
\end{equation}
and
\begin{equation}
\mathcal{D}(t)= D_0 \left[ {{\alpha(t)} \over {1-\alpha(t)}} \right]
\label{dt}
\end{equation}
with $D_0=l^2 / {2\tau}$ being the diffusion coefficient for the normal diffusion. Eq. (\ref{fpe}) becomes a generalized TE with the
persistent coefficient $\mathcal{R}(t)$ and the diffusion coefficient $\mathcal{D}(t)$ depending on time.
The relation between the coefficients $\mathcal{R}(t)$ and $\mathcal{D}(t)$ and the replication probability $\alpha (t)$ are given
by the Eq. (\ref{rt}) and the Eq. (\ref{dt}), respectively, which indicates that the larger $\alpha (t) $, the larger coefficients.
Note that when $\alpha(t)$ approaches to 1, two coefficients $\mathcal{R}(t)$ and $\mathcal{D}(t)$ diverge, whereas the coefficient of the first term on the left hand side of Eq. (\ref{fpe}) remains constant. Therefore, if the divergence is fast enough, a contribution from the first term on the left hand side of Eq. (\ref{fpe}) becomes negligible.
In this case, Eq. (\ref{fpe}) evolves into the wave equation with the speed $v=l/\tau$, which implies the occurrence of the ballistic motion of the walker. In the
telegraph process, initial ballistic motion is transient as mentioned already. However, in the NMRP, ballistic motion appears whenever the replication is more dominant than the anti-replication, namely, $\alpha(t)$ becomes close to 1.
In more usual cases where $\alpha(t)$ does not tend to 1 so fast, because of the relatively small value of the $\tau$ in the continuum limit and the asymptotically small nature of the second order derivative of the PDF in $t$ compared to the first order one, the second term on the left hand side of Eq. (\ref{fpe}) can be neglected. In this case, Eq. (\ref{fpe}) reduces to the diffusion equation for the PDF $\rho(x,t)$ with the time dependent diffusion coefficient $D(t)$ \cite{sbm,sbm1,boyer},
\begin{equation}
{{\partial \rho(x,t)} \over {\partial t}} = D(t) {{\partial ^ 2 \rho(x,t)} \over {\partial x^2}}
\label{efk}
\end{equation}
and the solution is given by the Gaussian distribution provided zero mean of displacement,
\begin{equation}
\rho(x, t) = {1 \over {\sqrt {2 \pi \langle x_t ^2 \rangle}}} \text {exp} \left[ -{ {x^2} \over { 2 \langle x_t ^2 \rangle }} \right].
\label{gaussian}
\end{equation}
Now, we show that there is a unique relation between the MSD of NMRP and the replication probability, and by
manipulating the replication probability, nearly any form of the MSD can be generated if the MSD does not exceed the ballistic limit set by the finite
and constant maximum speed of the process, $v=l/\tau$.
By multiplying $x^2$ to both side of the Eq. (\ref{fpe}) and then integrating with respect to $x$ over all space,
the following relation between $\alpha(t)$ and the MSD is obtained,
\begin{equation}
\alpha(t)={ {\dot{\langle x^2_t \rangle} - \tau \ddot{\langle x^2_t \rangle}}
\over {{{l^2} \over \tau }+ \dot{\langle x^2_t \rangle}-{3\tau \over {2}}\ddot{\langle x^2_t \rangle}} }.
\label{alpha}
\end{equation}
When the Eq. (\ref{efk}) could be considered, $\ddot{\langle x^2_t \rangle}$ in Eq. (\ref{alpha}) is removed
and the MSD is calculated for general $\alpha(t)$ as
\begin{equation}
\langle x^2_t \rangle = 2 D_0 \int_{}^{t} {\alpha(s) \over {1-\alpha(s)} } ds.
\label{msd}
\end{equation}
If $\alpha(t)$ does not change in time, the process reduces to normal diffusion.
Thus, a stationary replication process can not make a deviation from the normal diffusive behavior in the asymptotic limit.
The time-varying property in the replication process becomes the key point inducing diverse diffusions.
\section{Diffusions using several specific replication probabilities}
\subsection{Alternating diffusions}
In the experiments for the cellular motion, external temporal stimuli have been simply imposed by a
step-function change in chemo-effector concentration \cite{ts1} or mechanotactic signaling of
repeating step-like type \cite{ts2} and exponentiated sine-waves for more complex fluctuating signaling
in time \cite{ts3}. For the responding movements to periodic temporal stimuli we can consider
the periodic replication probability and if the value of probability changes from 0 to 1, we can also study the
motion alternating from totally anti-persistent phase to persistent phase.
As such an example, we have chosen the replication probability of $\alpha(t)=\text{sin}^2 (\pi t/T)$, with the period $T=N/5$
where $N$ is the total number of steps.
\begin{figure}[ht]
\includegraphics[width=9cm]{fig1.png}
\caption{\label{fig1} Time evolution of $P(x,t)$ for the oscillatory replication process, $\alpha(t)=\text{sin}^2 (\pi t/T)$,
with the period $T=2 \times 10^4 \tau$. For the time interval between $0.4T$ and $0.8T$, the exact $P(x,t)$ obtained
from solving Eq. (\ref{me}) numerically (the black dashed lines) shows the perfect coincidence with the data obtained
by simulating the model (the green lines).
The inset (a) shows the simulation result of $P(x,t)$ for two periods,
in which periodic swelling and freezing of $P(x,t)$ is observed. The inset (b) shows the
simulated (the red circles) and numerically solved (the black solid line) MSD which shows a stair-like shape
having periodic plateaus and sudden jumps. In this study, all simulations for the models have been done with the fixed
values $\tau=l=1$.
}
\end{figure}
Overall, $P(x,t)$ is composed of periodically repeating swelling regions in which
$\alpha(t)$ is around the maximum value and thus
almost perfect replication happens, and freezing regions in which $\alpha(t)$ deviates from the
maximum and the nature of anti-persistent is realized, (the inset (a) of Fig. \ref{fig1}).
$P(x,t)$ around $\alpha(t)=1$ is enlarged in the main panel of Fig. \ref{fig1}
in which a single peak of $P(x,t)$ at the origin at $t=0.4T$ splits into two peaks away from the center
after $t=0.6T$.
It shows that the walkers around the center is divided to the two opposite directions due to the almost perfect replications
around $\alpha(t)=1$ and then decreasing $\alpha(t)$ results in freezing walkers and the peaks are maintained
until the next swelling point. However, after a several period such peaks
disappear because repeating of the perfect replications makes
much more small peaks and then a peak at the center is restored.
Characteristics of the periodic oscillation in the replication process directly propagate
into the probability and the MSD shows a interesting behavior with the periodically stair-like shape
in which plateaus of the MSD corresponds to the freezing regions of $P(x,t)$,
while sudden jumps appear in the swelling regions of $P(x,t)$ (the inset (b) of Fig. \ref{fig1}).
Such an stepwise increasing MSD has been reported in \cite{step}
where there are two alternating waiting time distributions, one of which centered around zero and the other centered around
some finite waiting time, where the standard deviations of both distributions are small.
Such a setup leads to a movement in which the walker periodically repeats two sudden jumps and waitings where the waiting time is nearly a constant, thus making step-wise increasing MSD. On the other hand, in this case
such a feature in the MSD does not occur because of the repitition of moving and stopping but continuous and periodic change
from ultraslow ($\alpha(t)\sim0$) to ballistic diffusion ($\alpha(t)\sim1$).
The oscillating replication probability implies periodic and continuous alternation of the phase of the process between superdiffusion and subdiffusion.
It can be compared to a dynamic system of intermittent locomotion which
have been importantly studied by intermittent search models
where discontinuous transition between explicitly defined two different diffusion phases are considered \cite{is}.
Pauses, along with changes in the duration and speed of motion form different intermittent locomotions
which happens in contexts where animals adjust their motion to changing circumstances
and thus the cumulant distances over time show step-like picture as well as another oscillatory pictures with increasing and pausing intervals \cite{stepdistance}.
So we consider another oscillatory motion with the MSD which has increasing and pausing intervals,
\begin{figure}[ht]
\includegraphics[width=8cm]{fig2.png}
\caption{\label{fig2}Simulated MSDs of the model with $\alpha(t)$
obtained by $\langle x^2_t \rangle = t/2 - \text{sin} (2at)/4a +1/2 + \text{sin}2a/4a $
with (a) $a=10^{-5}$ and (b) $a=5 \times 10^{-5}$. An additive constant has been included
in the MSD here and after to meet the condition $\langle x^2_t \rangle = 1$ at time $t = 1$.
Circles representing the simulation data match well with the MSD functions (solid lines).
Corresponding $\alpha(t)$'s are plotted in the insets.
}
\end{figure}
$\langle x^2_t \rangle \sim t/2 - \text{sin} (2at)/4a$.
$a$ is a constant and the MSDs with $a=10^{-5}$ and $a=5 \times 10^{-5}$
are shown in the Fig.~\ref{fig2} (a) and (b), respectively.
Corresponding $\alpha(t)$'s by Eq. (\ref{alpha}) with the MSDs oscillates
from 0 to 0.5 (the insets of Fig.~\ref{fig2}). The MSD is composed of periodically repeating plateaus
and smoothly increasing regions, which correspond to the regions with the dominant anti-replication
and the normal diffusive regions where $\alpha(t) \sim 1/2$, respectively.
The shape of the MSD is similar to that of the Fig.~\ref{fig1}, but the sudden increases have been smoothen
due to the smaller maximum value of $\alpha(t)$.
We have shown that different diffusive phases can be alternated by a oscillatory replication probability,
which implies that it may be a generative mechanism to be able to describe changes of diffusion phase with time shown in
the kinetics of cells in external stimuli and intermittent locomotions of animals.
However, what we have shown here is not about a specific system but about generic changes of diffusive phases,
and it needs to be more closely anaylzed to find which specific $\alpha(t)$ is appropriate to describe a specific system.
\subsection{Superdiffusion with $H=0.9$}
\begin{figure}[ht]
\includegraphics[width=9cm]{fig3.png}
\caption{\label{fig3} Simulation results of models created by the replication probabilities related to
the given MSD $\langle x^2_t \rangle = t^{2H}$ with $H=0.9$.
(a) The MSD data obtained using the $\alpha(t)$ in Eq. (\ref{alpha}). The solid line represents the given MSD.
(b) The MSD data obtained using $\alpha_D (t)$ which ignores the second order derivative of the MSD in Eq.~(\ref{alpha}).
Results show that the models generate the MSD expected,
while $\alpha(t)$ gives slightly more accurate coincidence witn the MSD than $\alpha_D (t)$.
Figure (c) and (d) show the PDFs corresponding to (a) and (b), respectively.
Shape of the PDF is strikingly different from the Gaussian, indicating
the effect of the second term on the left hand side of Eq.~(\ref{fpe}) at early times.
(e) The PDF at time $t=10^4$ for the model (a). The solid line represents the corresponding Gaussian
curve, which shows that the PDF will eventually converge to the Gaussian distribution
after a sufficiently long time.
}
\end{figure}
To compare the early behaviors of two cases where $\alpha (t)$ of Eq. (\ref{alpha}) and
$\alpha_D (t)$ obtained by ignoring $\ddot{\langle x^2_t \rangle}$ in Eq. (\ref{alpha}) are used, respectively,
we have used the MSD, $\langle x^2_t \rangle = t^{2H}$ with $H=0.9$.
Fig. \ref{fig3} (a) shows the MSD obtained through the simulation using the $\alpha (t)$,
which shows excellent agreement with the given MSD.
In Fig. \ref{fig3} (b), we also plotted the simulation result using $\alpha_D (t)$.
It shows that the data slightly deviate from the expected line.
Taking the second derivative in Eq.~(\ref{alpha}) into account gives more accurate MSD.
Although there is just a slight difference in the two MSDs, note that at the early times, $P(x,t)$ obtained
by $\alpha(t)$ is distinguished from the Gaussian distribution showing the peaks at the possible maximum
distances (Fig. \ref{fig3} (c)), while $P(x,t)$ obtained by $\alpha_D (t)$ relatively follows the Gaussian
(Fig. \ref{fig3} (d)). However, at large times $P(x,t)$ obtained by $\alpha (t)$ also converges
to the Gaussian distribution (Fig. \ref{fig3} (e)) and thus it is sufficient to take just $\alpha_D (t)$ for the most of
asymptotic behaviors (see Figure \ref{fig4}).
The peaks of $P(x,t)$ at early times
which resembles that of the L\'{e}vy walks \cite{levir}
is because the divergence of $\mathcal{R}(t)$ is not so fast
enough to make the second order derivative term in $t$ in Eq. (\ref{fpe}) dominant, but significantly slows the convergence
of $P(x,t)$ to the Gaussian distribution. In such cases, the effect survives
in early dynamics of stochastic processes.
Analysis of early dynamics is important in real and experimental environments
in which it is difficult to take a sufficiently long time and thus the second term on the left hand side of Eq. (\ref{fpe}) could play a important role
\begin{figure}[ht]
\includegraphics[width=8.5cm]{fig4.png}
\caption{\label{fig4} Plots of the MSDs for anomalous diffusions with various $H$
induced by $\alpha_D (t)$ obtained from $\langle x^2_t \rangle = t^{2H}$.
In the inset, the corresponding $\alpha_D (t)$'s are plotted.
The symbols representing the simulation data fall excellently on the solid lines of $t^{2H}$.
}
\end{figure}
in such contexts.
\subsection{Anomalous diffusions}
We have also considered anomalous diffusions induced by the NMRP model.
In Fig. \ref{fig4}, the MSDs for the anomalous diffusions
with the Hurst exponents ranging from 0.1 to 0.9 are shown. Models have been generated with
$\alpha_D (t)$ obtained by setting $\langle x^2_t \rangle = t^{2H}$.
For $H>0.5$, $\alpha_D (t)$ increases with time, which induces the persistence in the process over time,
resulting in the superdiffusions.
While for $H<0.5$, $\alpha_D (t)$ decreases with time, which invokes the anti-persistence showing
the subdiffusions.
Similar conclusion has been reported using the latest memory enhancement model \cite{mm1}
which can be thought of as a special case of the NMRP model.
For instance, the positive latest memory enhancement model can be reproduced
in the framework of the NMRP
\begin{figure}[ht]
\includegraphics[width=8cm]{fig5.png}
\caption{\label{fig5} The plot of the simulated MSDs using (a) $\langle x^2_t \rangle = \text{ln} t + 1$ and
(b) $\langle x^2_t \rangle = t \text{ln} t +1$.
The insets shows the corresponding $\alpha (t)$ calculated from Eq.~(\ref{alpha}), respectively.
Stars and circles represent the data obtained from the simulation and the solid lines in each plots
represent the analytic functions of the MSD.
}
\end{figure}
if we use $\alpha(t)=1-1/2t^p$ with $p$ being the memory parameter.
\subsection{Marginal diffusions}
Fig. \ref{fig5} shows simulation results for logarithmic MSDs which are generated by $\alpha(t)$'s using
(a) $\langle x^2_t \rangle \sim \text{ln}t$, and (b) $\langle x^2_t \rangle \sim t\text{ln}t$.
In each figures, excellent agreements between the given MSDs and the simulation results are shown.
In the NMRP model, logarithmically slow subdiffusion is achieved by fast decreasing replication probability
from the value of 0.5 (the inset of Fig. \ref{fig5} (a)), that is, the probability that the walker escapes away from a position
decreases more rapidly than that of subdiffusions with time and the anti-persistence is strongly developed.
Such ultra-slow diffusions have been reported in various contexts \cite{boyer,usbm,uctrw,sinai,us1,us2}, and
often arise as a marginal behavior of the subdiffusion with $H=0$.
The MSD of the type of $t\text{ln}t$ also appears in the marginal behaviors of the superdiffusions
\cite {mm1, pre70, marginal}, while in the NMRP model it is embodied
with $\alpha(t)$ increasing slowly compared to the cases of superdiffusions
as shown in the Fig.~\ref{fig5} (b). Thus the marginal behaviors of anomalous diffusions
can be also induced by a single origin, the NMRP with appropriate replication probabilities.
\section{Conclusion}
In conclusion, we have proposed a non-stationary random walk model
in which the steps are given by a Markov process replicating the immediate step with a time-varying
probability. The master equation for the probability
has been analytically acquired and the
generalized TE for the PDF has been derived, from which we have obtained the general relation between the time-varying
replication probability and the MSD with accuracy up to the second order.
We realized several interesting cases such as alternating diffusions, anomalous diffusions,
and marginal diffusions.
Although the stepping process is Markovian,
the time-varying nature of replication develops the long-term correlation between steps and
the corresponding diffusive behaviors, i.e.
ballistic, super, sub, slow-sub, and ultraslow diffusive phases as well as normal diffusion
have been induced depending on the values of the replication probability changing in time.
For oscillatory replication probability, alternating diffusions of different diffusion phases
have been induced, increasing (decreasing) replication probability with the value larger(smaller) than 0.5
have caused superdiffusions (subdiffusions).
This single mechanism inducing diverse diffusions may provide a theoretical guide to
experimental results of various types of diffusions
and furthermore, non-stationary stochastic processes shown in diverse fields.
\\
We also remark that the further studies of the relation between a general replication probability
and the autocorrelation function of steps will be helpful to elucidate the actual mechanism generating these long-term
correlations. Non-stationarity of the model would invoke the ergodicity breaking
\cite{ergodicity,metzler} and the characteristics represented by the time average should be dealt
separately from the results in this study which are obtained from the ensemble averages.
|
1,314,259,994,998 | arxiv | \section{Introduction}
In the average consensus problem the objective is to enable a group of communicating agents $\mathcal{V}=\{1,\cdots,N\}$ to arrive at the average of their local input $\mathsf{r}^i\in{\mathbb{R}}$, i.e., to obtain $\mathsf{r}^{\text{avg}}=\frac{1}{N}\sum_{j=1}^N\mathsf{r}^j$ using local interactions. The solution to this problem is of great importance in various multi-agent applications such as robot coordination~\cite{PY-RAF-KML:08}, sensor fusion~\cite{ROS-JSS:05,ROS:07,WR-UMA:17}, distributed estimation~\cite{SM:07} and formation control~\cite{JAF-RMM:04}. In these applications, reaching fast to the average consensus is of great interest to reduce the end-to-end delays and also the convergence error caused by premature termination of the algorithm because of time constraints.
The well-known solution for the average consensus problem is the first-order iterative \emph{Laplacian} algorithm
\begin{subequations}\label{eq::consensus-orig}
\begin{align}
& {x}^i(k+1)=x^i(k)-\delta\,\sum\nolimits_{j=1}^N\!\!a_{ij}(x^j(k)-
x^i(k)),\\
&~x^i(0)=\mathsf{r}^i,\quad i\in\mathcal{V}.
\end{align}
\end{subequations}
where $a_{ij}$s are the adjacency weights. In this algorithm, each agent $i$ uses the agreement feedback $\sum\nolimits_{j=1}^N\!a_{ij}(x^j(k)-x^i(k))$ to derive its local agreement state $x^i$ towards $\mathsf{r}^{\text{avg}}$. When the interaction topology of the agents is a connected undirected graph, see Fig.~\ref{fig::graph},~\cite{ROS-JAF-RMM:07} shows that with a proper choice of stepsize $\delta$, executing~\eqref{eq::consensus-orig} guarantees $x^i\!\to\!\mathsf{r}^\text{avg}$, $i\in\mathcal{V}$, as $k\!\to\!\infty$. Our objective in this paper is to obtain accelerated average consensus algorithms that have a provably faster convergence rate than algorithm~\eqref{eq::consensus-orig}.
We consider two approaches: one using outdated agreement feedback in~\eqref{eq::consensus-orig} and the other by constructing alternative algorithms using the first-order accelerated optimization algorithms for strongly convex unconstrained optimization problems.
\begin{figure}[h]
\centering
\begin{tikzpicture}[scale=0.7]
\tikzset
adv/.style={circle,minimum size=.5cm,fill=red!20,draw=red},
nor/.style={circle,minimum size=.5cm,fill=black!20,draw},
a1/.style={very thick,red!80!black},
n1/.style={very thick,blue!65!black}}
\node[nor] (1) at (0,0) {1};
\node[nor] (2) at (2,0) {2};
\node[nor] (3) at (2,-2) {3};
\node[nor] (4) at (-2,-2) {4};
\node[nor] (5) at (0,-2) {5};
\draw[n1][-] (1) -- (2);
\draw[n1][-] (1) -- (5);
\draw[n1][-] (2) -- (3);
\draw[n1][-] (3) -- (5);
\draw[n1][-] (4) -- (5);
\draw[n1][-] (1) -- (4);
\draw[n1][-] (2) -- (5);
\end{tikzpicture}\\
\caption{{\small A connected undirected graph $\mathcal{G}(\mathcal{V},\mathcal{E},\mathsf{A})$ with five agents. The adjacency weights of the agents here are $a_{ij}=a_{ji}=1$ if $(i,j)$ is in the edge set $\mathcal{E}$, and $a_{ij}=a_{ji}=0$ otherwise.}}
\label{fig::graph}
\end{figure}
For a multi-agent system with connected undirected communication graph, the convergence rate of the average consensus algorithm~\eqref{eq::consensus-orig} is tied to the connectivity of the graph~\cite{MF:73} through the spectral radius of matrix $(\vect{I}-\delta \vect{L})$ where $\vect{L}$ is the Laplacian matrix of the graph~\cite{ROS-RMM:04,SSK-BVS-JC-RAF-KML-SM:19}.
Given this connection,
various studies such as optimal adjacency weight selection for a given topology by maximizing the smallest non-zero eigenvalue of the Laplacian matrix~\cite{LX-SB:04,SB-AG-BP-DS:06} or rewiring the graph to create topologies such as small-world network~\cite{SK-JMF:06,PH-JSB-VG:08} with high connectivity are proposed in the literature. In this letter, instead of altering the communication graph, we investigate two methods that use buffered states to accelerate reaching average consensus over a given graph.
Our first accelerated consensus algorithm is motivated by evidences in the literature on the positive effect of using buffered feedback on increasing the stability margin and the rate of convergence of the continuous-time linear systems~\cite{HM-SSK:20tac,WQ-RS:13}, which led to use of buffered agreement feedback to accelerate the continuous-time Laplacian average consensus algorithm~\cite{HM-SSK:20tac,YG-MS-CS-NM:16,ZM-YC-WR:10,YC-WR:10, WQ-RS:13}. Since the results obtained for the continuous-time Laplacian algorithm cannot be trivially extended to discrete-time communication setting, we investigate using out-dated feedback in~\eqref{eq::consensus-orig} to increase the convergence rate. More precisely, we explore for what values of non-zero $\mathpzc{d}$ in
\begin{align}\label{eq::consensus-orig_dated}
& {x}^i(k+1)=x^i(k)-\delta\,\sum\nolimits_{j=1}^N\!a_{ij}(x^j(k-\mathpzc{d})-
x^i(k-\mathpzc{d})),\nonumber\\
&x^i(k)=0~\text{for}~ k\in\{-\mathpzc{d} ,\cdots,-1\},~x^i(0)=\mathsf{r}^i,
\end{align}
$i\in\mathcal{V}$, we can archive faster convergence than~\eqref{eq::consensus-orig}. Our contribution is to characterize values of $\mathpzc{d}\in\mathbb{Z}_{>0}$ for which convergence is accelerated.
Even though our results show that there always exists $\mathpzc{d}\in\mathbb{Z}_{>0}$ in~\eqref{eq::consensus-orig_dated} that convergence is accelerated, this method has its own limitations because of restricting the structure of the algorithm to the first-order form of algorithm~\eqref{eq::consensus-orig}. This leaves one to wonder whether faster convergence can be achieved by using alternative forms. With such motivation, for example, \cite{TCA-BNO-MJC:08} proposes to improve the rate of convergence by predicting future state values using a weighted summation of current and previous states, denoted as the mixing parameter. However, it requires a complex parameter design procedure since significant improvements in the rate
of convergence are usually achieved by values outside the identified range of the mixing parameter which also requires agents to know extra global information~\cite[Equation (13)]{TCA-BNO-MJC:08}. A simple and more effective approach, however, is reported in~\cite{JB-MF-MM:18}, which casts average consensus problem as a convex optimization problem and uses the accelerated Nesterov's optimization method to design a fast-converging average consensus algorithm. Nesterov algorithms~\cite{nesterov2013introductory}, for convex (denoted here as NAG-C) and for strongly-convex (denoted here as NAG-SC) cost functions, are gradient-based optimization methods that use the buffered one-step past gradient value to accelerate convergence. By casting the consensus algorithm as an optimization problem with the cost $\frac{1}{2}\vect{x}^\top\vect{L}\vect{x}$ where $\vect{x}$ is the aggregated agreement state of the agents,~\cite{JB-MF-MM:18} invokes NAG-C method to design its accelerated algorithm. The choice of NAG-C is because $\vect{L}$ of connected graphs is positive semi-definite and thus $\frac{1}{2}\vect{x}^\top\vect{L}\vect{x}$ is a convex function. In this letter, we show that with an alternative modeling approach, we can in fact use the NAG-SC to arrive at a faster converging average consensus algorithm. Our approach also opens the door for use of the so-called Triple Momentum (hereafter denoted as TM) algorithm which is the fastest known globally convergent
gradient-based method for minimizing
strongly convex functions~\cite{BVS-RAF-KML:18}. TM also uses the buffered one-step past gradient value, but has a provably faster convergence than the Nesterov algorithms.
\begin{comment}
\emph{Organization}:
Notations and preliminaries including a brief review of the relevant properties of the time-buffered discrete-time systems and the graph theoretic definitions are given in Section~\ref{sec::prelim}. Problem definitions and the objective statements are given in Section~\ref{sec::Prob_formu}, while the main results are given in~Sections~\ref{sec::main} and \ref{sec::acc}. Numerical simulations to illustrate our results are given in Section~\ref{sec::Num_ex}. Section~\ref{sec::Con} summarizes our concluding remarks.
\end{comment}
\noindent \emph{Notations and definitions}:
we let ${\mathbb{R}}$, ${\mathbb{R}}_{>0}$, ${\mathbb{R}}_{\ge 0}$, $\mathbb{Z}$, and $\mathbb{C}$
denote the set of real, positive real, non-negative real, integer, and complex numbers, respectively.
The transpose of a matrix $\vect{A}\in{\mathbb{R}}^{n\times m}$ is~$\vect{A}^\top$.
The set of eigenvalues of matrix $\vect{A}\in{\mathbb{R}}^{n\times n}$ is $\lambda(\vect{A})$ and its spectral radius is $\sigma(\vect{A})$. Recall that for a square matrix $\vect{A}$, we have~\cite{RAH-CRJ:90}
\begin{align}\label{eq::spectral_formula}
\lim_{k\to\infty}\|\vect{A}^k\|^{1/k}=\sigma(\vect{A}),
\end{align}
and when $\vect{A}$ is symmetric, we have $\|\vect{A}\|=\sigma(\vect{A})$.
We follow~\cite{FB-JC-SM:09} to define our graph related terminologies and notations. In addition, we denote $\textup{diam}(\mathcal{G})$ as the diameter of a graph $\mathcal{G}$ which is the length of the shortest path between the most distanced nodes. For an iterative algorithm with states $\|\vect{x}(k)\|$ converging to origin, the \emph{asymptotic convergence factor} is
$\mathfrak{r}=\underset{\vect{x}(0)\neq\vect{0}}{\sup}\lim_{k\to\infty}\left(\frac{\|\vect{x}(k)\|}{\|\vect{x}(0)\|}\right)^{\frac{1}{k}}$
and the associated convergence time is
$\mathfrak{t}=\frac{1}{\textup{log}(1/\mathfrak{r})}.$
The convergence time $\mathfrak{t}$ represents the (asymptotic) number of steps in which $\|\vect{x}(k)\|$ reduces by the factor $1/\text{e}$. In a network of $N$ agents with undirected connected graph
topology the graph is denoted by $\mathcal{G}(\mathcal{V},\mathcal{E},\vectsf{A})$ where $\mathcal{V}=\{1,\cdots,N\}$ is the node set, $\mathcal{E}\subset\mathcal{V}\times \mathcal{V}$ is the edge set and $\vectsf{A}=[a_{ij}]$ is the adjacency matrix of the graph. Recall that $a_{ii}=0$, $a_{ij}\in{\mathbb{R}}_{>0}$ if $j\in\mathcal{V}$ can send information to agent $i\in\mathcal{V}$, and zero otherwise. In an undirected graph the connection between the nodes is bidirectional and $a_{ij}=a_{ji}$ if $(i,j)\in\mathcal{E}$. The maximum degree of a graph is $\mathsf{d}_{\max}=\max\{\sum_{j=1}^Na_{ij}\}_{i=1}^N$. Finally, an undirected graph is connected if there is a path from every agent to every other agent in the network (see e.g.~Fig.~\ref{fig::graph}). The Laplacian matrix of the graph is $\vect{L}=\text{Diag}(\vectsf{A}\vect{1}_N)-\vectsf{A}$. The Laplacian matrix of a connected undirected graph is a symmetric positive semi-definite matrix that has a simple $\lambda_1=0$ eigenvalue, and the rest of its eigenvalues satisfy $\lambda_1=0< \lambda_2\leq\cdots\leq\lambda_N$. Moreover, $\vect{L}\vect{1}_N=\vect{0}$.
\section{Problem Definition}\label{sec::Prob_formu}
We study the accelerated average consensus problem over a connected undirected graph $\mathcal{G}(\mathcal{V},\mathcal{E},\vectsf{A})$. As stated earlier, algorithm~\eqref{eq::consensus-orig} is the well-known solution for the average consensus problem. The admissible stepsize for algorithm~\eqref{eq::consensus-orig} over a connected graph satisfies $\delta\in(0,\frac{2}{\lambda_N})$, for which algorithm~\eqref{eq::consensus-orig}
converges exponentially fast to the average of the initial conditions of the agents~\cite{ROS-RMM:04}.
The \emph{asymptotic convergence factor}
for the Laplacian average consensus algorithm~\eqref{eq::consensus-orig} is $\mathfrak{r}_0=\max\{|1-\delta\lambda_2|,|1-\delta\lambda_N|\}$. For $\delta\in(0,\frac{1}{\lambda_N}]$, given that $0<\lambda_2\leq\lambda_N$, $\mathsf{r}_0=|1-\delta\lambda_2|$. We can show that the exponential convergence rate of~\eqref{eq::consensus-orig} is equal to $\mathfrak{r}_0+\epsilon$ for an infinitesimally small $\epsilon\in{\mathbb{R}}_{>0}$. Given $\delta\in(0,\frac{1}{\lambda_N})$, if one wants to increase the rate of convergence of algorithm~\eqref{eq::consensus-orig} then the only possible mechanism is to decrease $\delta$, or in another word, increase the frequency of the communicated messages between the agents. The objective in this paper is to investigate algorithms that have provably faster convergence than the Laplacian average consensus algorithm~\eqref{eq::consensus-orig}.
Our first approach is to investigate using out-dated feedback in~\eqref{eq::consensus-orig}, i.e., using non-zero $\mathpzc{d}$ in~\eqref{eq::consensus-orig_dated}.
Our second approach is to cast the average consensus problem as a convex optimization problem and then seek faster converging algorithms using the first-order accelerated optimization algorithms.
\section{Accelerated average consensus via outdated agreement feedback}\label{sec::main}
In this section, we study convergence of algorithm~\eqref{eq::consensus-orig_dated} and determine for what values of $\mathpzc{d}\in\mathbb{Z}_{>0}$, this algorithm can converge faster than algorithm~\eqref{eq::consensus-orig}. According to~\cite{HM-SSK:18}, the modified average consensus algorithm~\eqref{eq::consensus-orig_dated} with $\delta\in(0,\frac{2}{\lambda_N})$ is guaranteed to converge when $\mathpzc{d}=1$.
The results in~\cite{HM-SSK:18} go further to show the admissible range of $\mathpzc{d}\in\mathbb{Z}_{>0}$ for which~\eqref{eq::consensus-orig_dated} converges, see~\cite[Lemma III.4]{HM-SSK:18}.
\begin{comment}
The results in~\cite{HM-SSK:18} go further to show that the admissible range of $\mathpzc{d}\in\mathbb{Z}_{>0}$ for which~\eqref{eq::consensus-orig_dated} converges is given as~follows.
\begin{lem}[Admissible range of $\mathpzc{d}$ for~\eqref{eq::consensus-orig} over connected undirected graphs~\cite{HM-SSK:18}]\label{lem::admissible-d}{\rm
Let $\mathcal{G}$ be a connected undirected graph. Assume that $\delta\in(0,\frac{2}{\lambda_N})$. Then, for any $\mathpzc{d}\in\{1,\cdots,\bar{\mathpzc{d}}\}$, the average consensus algorithm~\eqref{eq::consensus-orig_dated} satisfies $\lim_{t\to\infty}x^i=\mathsf{x}^\text{avg}(0)$, $i\until{N}$, (the algorithm converges asymptotically) if and only if
\begin{align}\label{eq::stability con_dis}
\bar{\mathpzc{d}}\!=\!\min\Big\{d\in\mathbb{Z}_{\geq 0}\big|\,d>\hat{d},~~\hat{d}=&\frac{1}{2}\big(\frac{\pi}{2\arcsin(\frac{\delta\,\lambda_i}{2})}-1\big),~\nonumber\\
&\,\,i\in\{2,\cdots,N\}\Big\},
\end{align}
where $\{\lambda_i\}_{i=2}^N$ are the non-zero eigenvalues of $\vect{L}$. }\hfill \ensuremath{\Box}
\end{lem}
It is shown in~\cite{ISL:05} that the the roots of the characteristic equation~\eqref{eq::char-lambda-i} lie inside the unit circle if and only if $\delta\lambda_i$ lies inside the region of complex plane enclosed by the curve
\begin{align*}
\left\{z\in \mathbb{C}|z=2\vect{i}\sin(\frac{\phi}{2d+1})e^{\vect{i}\phi},-\frac{\pi}{2}\leq\phi\leq\frac{\pi}{2}\right\}.
\end{align*}
\end{comment}
Next, we determine for what values of $\mathpzc{d}\in\{1,\cdots,\bar{\mathpzc{d}}\}$ the convergence of the modified algorithm~\eqref{eq::consensus-orig_dated} is faster than the convergence of the Laplacian average consensus algorithm. For convenience in our study, we implement the change of variable $\vect{z}(k)=\vect{T}^\top\vect{x}(k)$
to write~\eqref{eq::consensus-orig} in the following equivalent form
\begin{subequations}\label{eq::laclacian_equivalent}
\begin{align}
{z}_1(k+1)&=z_1(k),\quad~ {z}_1(0)=\mathsf{r}^{\text{avg}}=\frac{1}{\sqrt{N}}\sum\nolimits_{j=1}^N\mathsf{r}^j,\label{eq::laclacian_equivalent_z1}\\
z_i(k+1)&\!=z_i(k)\!-\delta\lambda_i z_i(k-\mathpzc{d}),\label{eq::laclacian_equivalent_z2} \\
z_{i}(0)&=[\vect{T}^\top\vect{x}(0)]_i, \quad z_{i}(k)=0~~~ k\in\{-\mathpzc{d},\cdots,-1\}, \nonumber
\end{align}
\end{subequations}
for $i\in\{2,\cdots,N\}$, where \begin{align}\label{eq::T}\vect{T}=\begin{bmatrix}\vect{v}_1&\vect{R}\end{bmatrix},\quad \vect{R}=\begin{bmatrix}\vect{v}_2&\cdots&\vect{v}_N\end{bmatrix}
\end{align}
with $\vect{v}_1=\frac{1}{\sqrt{N}}\vect{1}_N,\vect{v}_2,\cdots,\vect{v}_N$ being the normalized eigenvectors of $\vect{L}$.
Note that $\vect{T}^{-1}=\vect{T}^\top$ and $\vect{T}^\top\vect{L}\vect{T}=\vect{\Lambda}=\Diag{0,\lambda_2,\cdots,\lambda_N}$ because $\vect{L}$ of a connected undirected graph is a symmetric and real matrix, thus its eigenvectors are mutually orthogonal. Let $\vect{z}_{2:N}=(z_2,\cdots,z_N)^\top$.
Given~\eqref{eq::laclacian_equivalent_z1}, it follows from $\vect{z}(k)=\vect{T}^{\top}\,\vect{x}(k)$ that
\begin{align*}\lim_{t\to\infty}\vect{x}(k)=&\,\frac{1}{\sqrt{N}}\lim_{k\to\infty}z_1(k)\vect{1}_N+\lim_{t\to\infty}\vect{R}\,\vect{z}_{2:N}(k)\\=&\,\mathsf{r}^{\text{avg}}\vect{1}_N+\vect{R}\lim_{k\to\infty}\vect{z}_{2:N}(k).
\end{align*}
Therefore, the correctness and the convergence factor of the average consensus algorithm~\eqref{eq::consensus-orig} are determined, respectively, by asymptotic stability and the worst convergence factor of the scalar dynamics in~\eqref{eq::laclacian_equivalent_z2}.
\begin{comment}
The characteristic equation of the scalar dynamics in~\eqref{eq::laclacian_equivalent_z2} is given by
\begin{align}\label{eq::char-lambda-i}
\mathcal{T}(s)=s^{\mathpzc{d}+1}-s^\mathpzc{d}+\delta \lambda_i,\quad i\in\{2,\cdots,N\}.
\end{align}
The roots of~\eqref{eq::char-lambda-i} are all simple except when $\lambda_i=\frac{d^d}{\delta(d+1)^{d+1}}$ \cite{SAK:94}.
It is shown in~\cite{ISL:05} that the the roots of the characteristic equation~\eqref{eq::char-lambda-i} lie inside the unit circle if and only if $\delta\lambda_i$ lies inside the region of complex plane enclosed by the curve
\begin{align*}
\left\{z\in \mathbb{C}|z=2\vect{i}\sin(\frac{\phi}{2d+1})e^{\vect{i}\phi},-\frac{\pi}{2}\leq\phi\leq\frac{\pi}{2}\right\}.
\end{align*}
Based on this observation and considering the asymptotic stability condition of Theorem~\ref{thm::rate_discrete},~\cite{HM-SSK:18} derives the admissible range of buffer for the average consensus algorithm~\eqref{eq::consensus-orig} as~follows.
\end{comment}
Given~\eqref{eq::laclacian_equivalent}, the convergence factor of the average consensus algorithm~\eqref{eq::consensus-orig_dated} is
\begin{align}
\mathfrak{r}_\mathpzc{d}=\max \{\mathfrak{r}_{\mathpzc{d},i}\}_{i=2}^N,
\end{align}
where $\mathfrak{r}_{\mathpzc{d},i}$ is the convergence factor of scalar dynamics~\eqref{eq::laclacian_equivalent_z2}, $i\in\{2,\cdots,N\}$. For each scalar dynamics~\eqref{eq::laclacian_equivalent_z2}, for $i\in\{2,\cdots,N\}$, we have
$\mathfrak{r}_{\mathpzc{d},i}=\max\{|s^i|\,|\, s^i\in\mathcal{S}\}$, where $\mathcal{S}$ is the set of roots that is determined by
the characteristic equation of the scalar dynamics~\eqref{eq::laclacian_equivalent_z2} given by
\begin{align}\label{eq::char-lambda-i}
\mathcal{T}(s)=s^{\mathpzc{d}+1}-s^\mathpzc{d}+\delta \lambda_i,\quad i\in\{2,\cdots,N\}.
\end{align}
The roots of~\eqref{eq::char-lambda-i} are all simple except when $\lambda_i=\frac{\mathpzc{d}^\mathpzc{d}}{\delta(\mathpzc{d}+1)^{\mathpzc{d}+1}}$ \cite{SAK:94}. The roots of $\mathcal{T}(s)$ and consequently the size of $\mathfrak{r}_{\mathpzc{d},i}$ depend on $\mathpzc{d}$ and $\delta \lambda_i$. The following result, whose proof is given in the appendix, specifies the values of $\mathpzc{d}>0$ for which the convergence factor of~\eqref{eq::consensus-orig_dated} is less than of~\eqref{eq::consensus-orig}. Recall that smaller convergence factor means faster convergence.
\begin{thm}[The range of $\mathpzc{d}$ for which the convergence factor of \eqref{eq::consensus-orig_dated} is less than of~\eqref{eq::consensus-orig}]\label{lem::beta-dis_scalar}
Let the communication graph be undirected and connected. For any $\delta \in(0,\frac{2}{\lambda_N})$ the convergence factor of~\eqref{eq::consensus-orig_dated} is less than that of~\eqref{eq::consensus-orig} if
\begin{align}\label{eq::buffer_bound_dis_rate}
\mathpzc{d}<\min\left\{\frac{\textup{ln}(\frac{\delta\lambda_i}{\sqrt{(1-\delta\lambda_i)^2+1-2|1-\delta\lambda_i|\cos{\phi}}})}{\textup{ln}(|1-\delta\lambda_i|)},\frac{|1-\delta\lambda_i|}{1-|1-\delta\lambda_i|}\right\},
\end{align}
where $\phi\in[0,\frac{\pi}{\mathpzc{d}+1}]$ is the solution of $\frac{\sin{\mathpzc{d}\phi}}{\sin{(\mathpzc{d}+1)\phi}}=\frac{1}{|1-\delta\lambda_i|}$ for all $i\in\{2,\cdots,N\}$. Moreover, the convergence factor is a decreasing function of $\mathpzc{d}\in\mathbb{Z}_{>0}$ if $\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}<\{\delta\lambda_i\}_{i=2}^{N}$ holds. \hfill \ensuremath{\Box}
\end{thm}
\section{Accelerated average consensus via first-order accelerated optimization algorithms} \label{sec::acc}
In this section, we use the first-order accelerated optimization framework to devise accelerated average consensus algorithms that have proven faster convergence than the well-known average consensus algorithm~\eqref{eq::consensus-orig}. The work in this section is inspired by the results in~\cite{JB-MF-MM:18}.~\cite{JB-MF-MM:18} argued that the conventional average consensus algorithm~\eqref{eq::consensus-orig} can be viewed as the gradient descent algorithm with fixed stepsize $\delta\in(0,\frac{2}{\lambda_N})\subset{\mathbb{R}}_{>0}$ where the cost function is the agreement potential $f(\vect{x})=\frac{1}{2}\vect{x}^\top\vect{L}\vect{x}$. Note here that $\vect{0}\leq \nabla^2 f(\vect{x})=\vect{L}\leq \lambda_N\vect{I}$. Based on this observation and since the cost function~$f(\vect{x})=\frac{1}{2}\vect{x}^\top\vect{L}\vect{x}$ is convex,~\cite{JB-MF-MM:18} proposes to use the first-order accelerated NAG-C optimization algorithm
\begin{subequations} \label{eq::ref15}
\begin{align}
\vect{y}(k+1)&=\vect{x}(k)-\delta\nabla f(\vect{x}(k)),\\
\vect{x}(k+1)&=\vect{y}(k+1)+\frac{k+1}{k+3}(\vect{y}(k+1)-\vect{y}(k)),
\end{align}
\end{subequations}
with $\vect{x}(0)=\vect{y}(0)=\vectsf{r}$, $\vectsf{r}=[\mathsf{r}^1,\cdots,\mathsf{r}^N]^{\top}$, where $\nabla f(\vect{x}(k))=\vect{L}\vect{x}(k)$. The choice of coefficient $\frac{k+1}{k+3}$, which tends to one, is fundamental for the argument used by Nesterov to establish the following inverse quadratic convergence rate of $f(\vect{x}(k))-f(\vect{x}^\star)\leq O\left(\frac{1}{\delta \,k^2}\right)$, for any stepsize $0<\delta\leq 1/\lambda_N$, with the best step size being $\delta=\frac{1}{\lambda_N}$.
For a $m-$strongly convex objective $f$ with a $L-$Lipschitz gradient, i.e., $m\vect{I}\leq \nabla^2 f(\vect{x})\leq L\vect{I}$, the NAG-SC algorithm achieves a faster linear convergence of $f(\vect{x}(k))-f(\vect{x}^\star)\leq O((1-\sqrt{\delta m})^k)$ for any stepsize of $\delta\in(0,\frac{1}{L}]$ with the best rate being achieved at $\delta=\frac{1}{L}$. The fastest accelerated globally convergent gradient-based algorithm for a $m-$strongly convex objective $f$ with $L-$Lipschitz gradients however, is the TM algorithm proposed in~\cite{BVS-RAF-KML:18}, which achieves $f(\vect{x}(k))-f(\vect{x}^\star)\leq O\left((1-\sqrt{\frac{m}{L}})^{2k}\right)$. Building on the structure of these two optimization algorithms, we propose the following TM-based accelerated average consensus algorithm
\begin{subequations} \label{eq::TM}
\begin{align}
\vect{\xi}(k+1)&=(1+\beta)\vect{\xi}(k)-\beta\vect{\xi}(k-1)-\alpha\vect{L}\vect{y}(k),\\
\vect{y}(k)&=(1+\gamma)\vect{\xi}(k)-\gamma\vect{\xi}(k-1),\\
\vect{x}(k)&=(1+\delta)\vect{\xi}(k)-\delta\vect{\xi}(k-1),
\end{align}
\end{subequations}
where $(\alpha,\beta,\gamma,\delta)=\Big(\frac{1+\rho}{\lambda_{N}},\frac{\rho^2}{2-\rho},\frac{\rho^2}{(1+\rho)(2-\rho)},\frac{\rho^2}{1-\rho^2}\Big)$, $\rho~=~1-\sqrt{\frac{\lambda_{2}}{\lambda_{N}}}$, $\vect{\xi}(0)=\vect{\xi}(1)=\vectsf{r}$, and the NAG-SC-based accelerated average consensus algorithm
\begin{subequations} \label{eq::NAG-SC}
\begin{align}
\vect{x}(k+1)&=\vect{y}(k)-\alpha\vect{L}\vect{y}(k),\\
\vect{y}(k)&=(1+\beta)\vect{x}(k)-\beta\vect{x}(k-1),
\end{align}
\end{subequations}
with $(\alpha,\beta)=\Big(\frac{1}{\lambda_{N}},\frac{\sqrt{\lambda_{N}}-\sqrt{\lambda_{2}}}{\sqrt{\lambda_{N}}+\sqrt{\lambda_{2}}}\Big)$, $\vect{x}(0)=\vect{x}(1)=\vectsf{r}$. In the following, we prove the convergence of $x^i\to\frac{1}{N}\sum_{i=1}^{N}\mathsf{r}^i$ as $k\to\infty$ for the TM-based algorithm~\eqref{eq::TM}, and omit the proof of the NAG-SC algorithm, since a similar approach can be applied.
Consider the change of variable $\begin{bmatrix}w_{1} \quad \vect{w}^{\top}_{2:N}\end{bmatrix}^{\top}=\vect{T}^\top\vect{\xi}$, $\begin{bmatrix}q_{1} \quad \vect{q}^{\top}_{2:N}\end{bmatrix}^{\top}=\vect{T}^\top\vect{y}, \begin{bmatrix}p_{1} \quad \vect{p}^{\top}_{2:N}\end{bmatrix}^{\top}=\vect{T}^\top\vect{x},$
where $\vect{T}$ is~\eqref{eq::T}. Then,~\eqref{eq::TM} can be written in an equivalent form
\begin{subequations}\label{eq::TMequ}
\begin{align}
w_{1}(k+1)&=(1+\beta)w_{1}(k)-\beta w_{1}(k-1), \label{eq::TMequ-a}\\
q_1(k)&=(1+\gamma)w_1(k)-\gamma w_1(k-1), \label{eq::TMequ-b} \\
p_1(k)&=(1+\delta)w_1(k)-\delta w_1(k-1), \label{eq::TMequ-c} \\
\vect{w}_{2:N}(k+1)&=(1+\beta)\vect{w}_{2:N}(k)-\beta \vect{w}_{2:N}(k-1) \nonumber \\
&-\alpha\vect{L}^{+}\vect{y}_{2:N}(k), \label{eq::TMequ-d}\\
\vect{p}_{2:N}(k)&=(1+\gamma)\vect{w}_{2:N}(k)-\gamma \vect{w}_{2:N}(k-1), \label{eq::TMequ-e} \\
\vect{q}_{2:N}(k)&=(1+\delta)\vect{w}_{2:N}(k)-\delta \vect{w}_{2:N}(k-1), \label{eq::TMequ-f},
\end{align}
\end{subequations}
where $\vect{L}^{+}=\vect{R}^{\top}\vect{L}\vect{R}$. The following theorem proves that~\eqref{eq::TM} is a solution for the average consensus problem. The reason that we can use the TM and NAG-SC algorithms to design our accelerated average consensus algorithms reveals itself in the proof of this theorem.
\begin{thm}\label{thm::TM_based_Convereg}
Consider a network of $N$ agents communicating over a connected graph. Let the agents of the network implement algorithm~\eqref{eq::TM}. Then, $x^i\to\frac{1}{N}\sum_{i=1}^{N}\mathsf{r}^i$ as $k\to\infty$.
\end{thm}
\begin{proof}
Let us consider the equivalent form of the TM algorithm in~\eqref{eq::TMequ}. From \eqref{eq::TMequ-a}-\eqref{eq::TMequ-c}, it is trivially concluded that $q_1(k)=p_1(k)=w_1(0)$ for $k\in\mathbb{Z}_{>0}$. On the other hand, since $\vect{L}^{+}$ is a positive definite matrix with eigenvalues $\lambda_2,\cdots,\lambda_N$, by virtue of the TM algorithm of~\cite{JB-MF-MM:18}, \eqref{eq::TMequ-d}-\eqref{eq::TMequ-f} minimize the $\lambda_2$-strongly convex function $f(\vect{p}_{2:N})=\frac{1}{2}\vect{p}_{2:N}\vect{L}^{+}\vect{p}_{2:N}$ with $\lambda_N$-Lipschitz gradient to the optimal point $\vect{p}^{\star}_{2:N}=\vect{0}$ with a rate of convergence of $f(\vect{p}_{2:N}(k))-f(\vect{0})\leq O\left(\left(1-\sqrt{\frac{\lambda_2}{\lambda_N}}\right)^{2k}\right)$. Therefore, as $k\to\infty$, $\vect{p}_{2:N}\to\vect{0}$. Considering the change of variables, we know that $q_1(0)=\frac{1}{N}\sum_{i=1}^{N}\mathsf{r}^i$ and thus, $x^i\to\frac{1}{N}\sum_{i=1}^{N}\mathsf{r}^i$ as $k\to\infty$.
\end{proof}
We showed in the result above that the algorithm presented in~\eqref{eq::TM} solves the average consensus problem. A Similar result can be established for~\eqref{eq::NAG-SC}. Based on the developments in~\cite{JB-MF-MM:18}, it is proved that \eqref{eq::ref15} converges to the average of local reference values asymptotically and faster than the popular solution~\eqref{eq::consensus-orig}. Moreover, from~\cite{BVS-RAF-KML:18}, we know that the TM algorithm benefits the fastest exponential convergence rate among other accelerated first-order gradient methods in optimization, e.g.,~\eqref{eq::ref15}. We illustrate this comparison in a numerical example in the following section with also simulating the algorithm in~\cite{TCA-BNO-MJC:08}.
Similar to~\cite{TCA-BNO-MJC:08}, we note that faster convergence in~\eqref{eq::TM} and \eqref{eq::NAG-SC} comes with requiring the agents to know $\lambda_2$ and $\lambda_N$ globally in order to compute $(\alpha,\beta,\gamma,\delta)$. The knowledge of $\lambda_N$ to choose the stepsize $\delta$ is universal among all discrete-time average consensus algorithms, including the Laplacian algorithm~\eqref{eq::consensus-orig}. Such knowledge is especially useful in choosing the best stepsize for the fastest convergence. In practice, instead of $\lambda_N$, often its upper-bound $\bar{\lambda}_N=2\mathsf{d}_{\max}$, which is easier to compute, is used~\cite{ROS-JAF-RMM:07}. On the other hand, $\lambda_2$ can be either computed through a dedicated distributed algorithm, see, e.g., \cite{yang2010decentralized}, or replaced by a lower bound such as $\underbar{$\lambda$}_2=\frac{4}{N \textup{diam}(\mathcal{G})}$, see e.g., \cite{mohar1991eigenvalues}.
\section{Performance demonstration in distributed Gaussian Mixture Model (GMM) Estimation
}\label{sec::Num_ex}
Average consensus is an important tool to enable many distributed schemes in networked systems. To demonstrate the benefit of using our accelerated average consensus, we conduct a simulation study of a distributed expectation-maximization (EM) algorithm used in sensor networks to obtain a Gaussian Mixture Model (GMM) of a set of observed targets. In our case study, the setting consists of $N$ agents observing the location $\vectsf{p}\in{\mathbb{R}}^2$ of $M$ targets in a 2D plane. The agents want to collaboratively obtain the GMM model of the distribution of the targets, i.e., obtain the weight, mean and covariance of basis of the GMM model, i.e., $(\pi_l,\vect{\mu}_l,\Sigma_l)$ in $\hat{f}(\vectsf{p})=\sum_{l=1}^{N_s}\pi_l\mathcal{N}(\vectsf{p}|\vect{\mu}_{l},\vect{\Sigma}_l)$, where $N_s$ is the number of the bases of the GMM model which is known to all agents. A popular method to construct a GMM with a determined number of bases $N_s$ from observed data is the EM algorithm~\cite{dempster1977maximum} which is an iterative method that alternates between an expectation (E) step and a maximization (M) step. In the E-step, the posterior probability is computed as
\begin{align}\label{eq::posterior}
\zeta_{ln}:=\textup{Pr}(z=l|\vectsf{p}_n)=\frac{\pi_{l}\mathcal{N}(\vectsf{p}_n|\vect{\mu}_{l},\vect{\Sigma}_l)}{\sum_{j=1}^{N_s}\pi_{j}\mathcal{N}(\vectsf{p}_n|\vect{\mu}_{j},\vect{\Sigma}_j)}
\end{align}
using the target points $\vectsf{p}_n$, $n\in\{1,\cdots,M\}$ and the current values of $\{\pi_{l},\vect{\mu}_{l},\vect{\Sigma}_l\}_{l=1}^{N_s}$. Next, in the M-step, the parameters of each $l\in\{1,\cdots,N_s\}$ are updated by
\begin{subequations}\label{eq::update}
\begin{align}
\pi_l&=\frac{\sum_{n=1}^{M}\zeta_{ln}}{M}, \label{eq::updateA} \\
\vect{\mu}_{l}&=\frac{\sum_{n=1}^{M}\zeta_{ln}\vectsf{p}_n}{\sum_{n=1}^{M}\zeta_{ln}}, \\
\vect{\Sigma}_l&=\frac{\sum_{n=1}^{M}\zeta_{ln}(\vectsf{p}_n-\vect{\mu}_l)(\vectsf{p}_n-\vect{\mu}_l)^{\top}}{\sum_{n=1}^{M}\zeta_{ln}},
\end{align}
\end{subequations}
using the current values of $\zeta_{ln}$. In the distributed EM algorithm, each agent only observes a $M^i$ number of targets; the set is given by $\mathcal{M}^i\subset\{1,\cdots,M\}$ where $\cup_{j=1}^N\mathcal{M}^j=\{1,\cdots,M\}$, $\mathcal{M}^i\cap\mathcal{M}^j=\{\}$. The distributed EM algorithms assume that each agent has a local copy of the GMM parameters~\cite{valdeira2022decentralized, altilio2019distributed}. Each agent $i\in\{1,\cdots,N\}$ performs the E-step in~\eqref{eq::posterior} locally using its own GMM parameters for $n\in\mathcal{M}^i$. However, the summation terms in~\eqref{eq::update} are fragmented among the agents. Therefore, agents use a set of three average consensus algorithms to compute the summation terms that appear in the M-step~\eqref{eq::update}.
In our numerical example, the number of agents is $N=20$, and the agents communicate over a ring graph. These agents observe $M=1000$ target points in a rectangle area of $(-80, 80)\times (-60, 60)$. The target points are drawn from a GMM model with $N_s=12$ so that we can check the accuracy of the estimated GMM via distributed EM algorithms against this true model. Each agent initializes its $\{\pi_{l},\vect{\mu}_{l},\vect{\Sigma}_l\}_{l=1}^{N_s}$ locally. Let us denote $T_{\textup{EM}}~=~10$ as the number of iterations in the EM algorithm, and $T_{\textup{consensus}}$ as the number of the consensus steps performed in each iteration of the M-step.
First, we compare the performance of the EM algorithm when it uses the Laplacian algorithm in~\eqref{eq::consensus-orig} (Laplacian-based EM) vs. when it uses our proposed TM-based algorithm~\eqref{eq::TM}. We conduct a set of four simulations for $T_{\textup{consensus}}\in\{8,15,30,50\}$. When using the TM algorithm, we consider two cases. In one, we assume that the agents know $\lambda_2$ and $\lambda_N$ to compute the parameters of the TM algorithm (referred to as TM-based EM (exact)), and in the other case, we assume that agents replace $\lambda_2=\frac{4}{N \textup{diam}(\mathcal{G})}$ by its lower bound and $\lambda_N$ with its upper bound $2\mathsf{d}_{\max}$ (referred as TM-based EM (via bounds)).
Due to the limited space, we only show the results generated by agent 1 in all the simulations; the other agents have similar results. Fig.~\ref{fig::GMM} depicts the 3$\sigma$-plot of the bases of the estimated GMM for $T_{\textup{consensus}}=8$. Here, the thin gray, the thin colored, and the thick colored ellipses represent, respectively, the true GMM model, the estimated GMM model using the Laplacian-based EM, and the estimated GMM model using the TM-based EM (via bounds). As seen in Fig.~\ref{fig::GMM}, the results generated by the TM-based EM (via bounds) are closer to the true model, especially in some bases, such as the top right purple and the bottom center magenta one. The results for TM-based EM (exact) are not shown to reduce clutter in Fig.~\ref{fig::GMM}. To better show the accuracy of each estimated GMM model, Fig.~\ref{fig::Loglikelihood} depicts the maximum Log-likelihood of the distributed EM algorithms in comparison to the central EM. By using the same number of communications, the TM-based EM algorithms, even when we use the bounds instead of the exact values for $\lambda_2$ and $\lambda_N$ achieve better results compared to the Laplacian algorithm in the sense that the maximum log-likelihood of the TM-based estimates are closer to the central solution. This difference is especially larger in cases where the number of communications is limited, e.g., $T_{\textup{consensus}}=8$.
\begin{figure}[h]
\centering
\subfloat[]{\includegraphics[trim= 1pt 5pt 0 0 ,clip,width=1\linewidth]{GMM.png}\label{fig::GMM}}\\
\subfloat[]{\includegraphics[trim= 1pt 5pt 0 0 ,clip,width=1\linewidth]{loglikelihood.png}\label{fig::Loglikelihood}}
\caption{{\small Plot (a) depicts the GMM estimates of Agent 1 after $T_{\textup{EM}}=10$ iterations of the EM algorithm.
Plot (b) represents the maximum Log-likelihood of the estimated models of the central EM, TM-based EM (exact), TM-based EM (via bounds), and Laplacian-based EM.
}}
\end{figure}
In order to compare the convergence rates of the proposed algorithms against the existing ones in the literature, Fig.~\ref{fig::convergence} illustrates the evolution of the estimates of $\pi_l$, for $l=1$, over $T_{\textup{consensus}}=50$ steps in the first iteration of the EM algorithm. Here, the convergence error trajectories of the consensus algorithms is denoted by $e(k)=\textup{log}\sum_{i=1}^{N}(\pi_1^i(k)-\pi_1^{\star})^2$, where $\pi_1^{\star}$ is the central solution obtained by~\eqref{eq::updateA}. It is shown that the TM algorithm achieves the fastest convergence rate and also, the buffered Laplacian algorithm with $\mathpzc{d}=2$ is faster than the original Laplacian algorithm without using buffered feedback.
\begin{figure}[h]
\centering
{\includegraphics[trim= 1pt 5pt 0 0 ,clip,width=.8\linewidth]{convergence.png}}
\caption{{This plot shows the convergence error of $e(k)$ for obtaining $\pi_1^i$ with respect to the actual average computed as~\eqref{eq::updateA}. Five different average consensus algorithms have been implemented and compared as above. For better visual comparison, the convergence error trajectories of the Laplacian algorithm with $\mathpzc{d}=0$ and $\mathpzc{d}=2$ for $k=\{40,\cdots,50\}$, is maximized in the top right corner.
}}
\end{figure}\label{fig::convergence}
\section{Accelerated distributed linear regression}
In this section, we investigate the numerical examples that show the effect of outdated feedback data and the implementation of accelerated first-order optimization methods. We use real data from~\cite{utts2021mind} to solve a linear regression problem by reformulating it as two average consensus problems. In the first example, the common Laplacian average consensus algorithm is compared with the proposed algorithm in~\eqref{eq::consensus-orig_dated} with different number of buffers to analyze the effect of $d$ on convergence. In the second example, convergence of the proposed accelerated algorithms~\eqref{eq::ref15}-\eqref{eq::NAG-SC} are compared against \eqref{eq::consensus-orig_dated} and the algorithm in~\cite{TCA-BNO-MJC:08}.
The dataset in~\cite{utts2021mind} has a size of 50 points for the 50 states in the United States. The variables are $y$ which is year 2002 birth rate per 1000 females 15 to 17 years old, and $x$ which is the poverty rate (the percent of the state’s population living in households with incomes below the federally defined poverty level). The objective is to find a linear relation between $x$ and $y$, i.e., solve the following problem:
\begin{align}\label{eq::regression}
\textup{min}_{a,b\in\mathbb{R}}\sum\nolimits_{i=1}^{50}\|y^i-(ax^i+b)\|^2
\end{align}
for $a$ and $b$. Since our goal is to study the effect of buffer in convergence, we simplify the problem and assume to know the optimal value of $b$. Thus, we only solve for the variable $a$ as the slope of the fitted line. Here, $b=4.267$. By taking a derivative from~\eqref{eq::regression} with respect to $a$, setting it to zero and substituting the value of $b$, we conclude that
\begin{align}\label{eq::RegressionSolution}
a=\frac{\sum_{i=1}^{50}x^i(y^i-b)}{\sum_{i=1}^{50}(x^i)^2}.
\end{align}
Let us now solve this problem while the dataset is distributed over a network of 5 agents where each agent has access to 10 arbitrary state data points. Agents of the network communicate over a connected graph depicted in Fig.~\ref{fig::graph}. This solution can be achieved by solving two average consensus problems for the nominator and the denominator, and computing the division. Let $\eta_1^i(k)$ and $\eta_2^i(k)$ denote agent $i$'s local estimate of the nominator and the denominator of~\eqref{eq::RegressionSolution}, respectively. The reference values of the first average consensus problem are $\mathsf{r}_1^1=\sum_{i=1}^{10}x^i(y^i-b)$, $\mathsf{r}_1^2=\sum_{i=11}^{20}x^i(y^i-b)$,$\cdots$, $\mathsf{r}_1^5=\sum_{i=41}^{50}x^i(y^i-b)$. The same process is applied to the second average consensus problem with the reference values $\mathsf{r}_2^1=\sum_{i=1}^{10}(x^i)^2$, $\mathsf{r}_2^2=\sum_{i=11}^{20}(x^i)^2$,$\cdots$, $\mathsf{r}_2^5=\sum_{i=41}^{50}(x^i)^2$. Each agent computes the estimate $\hat{a}^i(k)=\frac{\eta_1^i(k)}{\eta_2^i(k)}$ at every step $k\in\mathbb{Z}_{\geq0}$.
\emph{Outdated feedback}:
Let the agents implement algorithm~\eqref{eq::consensus-orig_dated} with three different values of $d=\{1,5,10\}$ and a buffer-free case. Fig.~\ref{fig::error1} depicts the convergence error of agent's trajectories with respect to the optimal value in~\eqref{eq::RegressionSolution}, i.e., $e(k)=\textup{log}\sum_{i=1}^{N}(\hat{a}^i(k)-a)^2$. The Green line, representing the implementation of one step buffer, as seen in the figure, converge faster than the buffer-free case. By increasing $d$ and using further outdated feedback, faster convergence is achieved. This shows the effect of buffer in our analysis. However, as mentioned previously, exceeding $\bar{d}$ may result in divergence or slower convergence. The Turquoise trajectories with $d=10$ illustrate the fluctuation in convergence.
\emph{Accelerated consensus via first-order accelerated optimization algorithms}:
Next, we compare the convergence rate of the accelerated algorithms~\eqref{eq::ref15}, \eqref{eq::TM} and \eqref{eq::NAG-SC} with that of the accelerated algorithm proposed in~\cite{TCA-BNO-MJC:08} and also algorithm~\eqref{eq::consensus-orig_dated}. Let the agents of the network solve two average consensus problems to reach $a$ globally. Fig.~\ref{fig::error2} shows the convergence error trajectories $e(k)=\textup{log}\sum_{i=1}^{N}(\hat{a}^i(k)-a)^2$ reaching the agreement similar to the previous example. Algorithm~\eqref{eq::consensus-orig_dated} with $d=5$ converges slower compared to others, while the TM algorithm converges the fastest. Despite using the optimal parameters for the algorithm of in~\cite{TCA-BNO-MJC:08}, still TM-based algorithm delivers the fastest convergence.
\begin{figure}[h]
\centering
\subfloat[]{\includegraphics[trim= 1pt 5pt 0 0 ,clip,width=.9\linewidth]{error1.png}\label{fig::error1}} \vspace{-0.1in}\\
\subfloat[]{\includegraphics[trim= 1pt 5pt 0 0 ,clip,width=.9\linewidth]{error2.png}\label{fig::error2}}
\caption{{\small Plot (a) compares the effect of different values of buffer ($d=\{0,1,5,10\}$) on the convergence error $e(k)$ over $k$. We can see that $d=5$ generates the best results. Plot (b) shows the comparison between the optimization inspired algorithms~\eqref{eq::ref15}-\eqref{eq::NAG-SC} and the algorithm~\eqref{eq::consensus-orig_dated} with $d=5$. As expected from the results in section~\ref{sec::acc}, algorithm~\eqref{eq::TM} converges faster than the other methods.
}}\vspace{-0.1in}
\label{fig::error}
\end{figure}
\section{Conclusion}\label{sec::Con}\vspace{-2pt}
In this letter, we proposed two methods to accelerate reaching the solution in the average consensus problem over connected graphs in a discrete-time communication setting. In contrast to some existing methods in the literature where graph connectivity is increased or edge weights are optimized for faster convergence, we used buffered states to accelerate reaching average consensus. First, we proposed to use buffered states in the well-known Laplacian algorithm in order to achieve a faster convergence rate. Furthermore, we obtained the admissible ranges of delay that allow agents to reach the solution. In the second method, we proposed two accelerated average consensus algorithms inspired by the NAG-SC and TM algorithms. We showed that the average consensus algorithm can be cast as a convex optimization problem which can be solved using the NAG-SC and TM algorithms. To demonstrate the efficacy of our results, we
conducted a simulation study of a distributed EM algorithm which is used vastly, e.g., in sensor networks, to estimate a Gaussian Mixture Model of a set of observed targets. By measuring the maximum Log-likelihood of the estimates of the GMM model using the proposed algorithms against other algorithms in the literature, we showed that the TM-based EM achieves more accurate estimates. The estimated models of the distributed algorithms were compared with respect to the central solution of the EM algorithm.
\section*{Appendix}
We use the following auxiliary lemma in the proof of Theorem~\ref{lem::beta-dis_scalar}, which we present afterwards.
\begin{lem}[Location of roots of $s^{\mathpzc{d}+1}-s^\mathpzc{d}+c=0$~\cite{SAK:94}]\label{thm::dis_time}{\rm
Let $\mathcal{S}=\{s^i\in\mathbb{C}|s^{\mathpzc{d}+1}-s^\mathpzc{d}+c=0\}$ and $s^1=\max\{|s^i|\,|s^i\in\mathcal{S}\}$ for any $i\in\{1,\cdots,\mathpzc{d}+1\}$. Then for $\mathpzc{d}\in\mathbb{Z}_{>1}$ and $c\in{\mathbb{R}}_{>0}$ all the roots are inside the disk $|s|<\frac{1}{|a|}$ if and only if $|a|<\frac{\mathpzc{d}+1}{\mathpzc{d}}$ and
\begin{align}
\frac{(|a|-1)}{|a|^{\mathpzc{d}+1}}<c<\frac{\sqrt{a^2+1-2|a|\cos\phi}}{|a|^{\mathpzc{d}+1}}
\end{align}
where $\phi\in[0,\frac{\pi}{\mathpzc{d}+1}]$ is the solution of $\frac{\sin(\mathpzc{d}\phi)}{\sin((\mathpzc{d}+1)\phi)}=\frac{1}{|a|}$.
Moreover if $0<c<\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}$, as $c$ increases then the value of
$|s^1|$ decreases while the absolute value of
all the other roots increase. In addition the smallest value of $|s^1|$ occurs at $c=\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}$ where $|s^1| =\frac{\mathpzc{d}}{\mathpzc{d}+1}$.}\hfill \ensuremath{\Box}
\end{lem}
\noindent Table.~\ref{table.1} shows different values of $\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}$ for a given $\mathpzc{d}\in\{1,\cdots,5\}$.
\begin{proof}[Proof of Theorem~\ref{lem::beta-dis_scalar}]
Notice that for $\mathpzc{d}=0$ the asymptotic convergence factor of \eqref{eq::laclacian_equivalent_z2} is equal to $\mathsf{r}_0=|1-\delta\lambda_i|$. Hence, our aim is to find the values of $\mathpzc{d}$ such that $\mathsf{r}_\mathpzc{d}<\mathsf{r}_{0}$, which means that the roots of the characteristic equation~\eqref{eq::char-lambda-i} lie inside the disk, $|s|<|1-\delta\lambda_i|$.
Theorem~\ref{thm::dis_time} implies that this holds if and only if
\begin{subequations}
\begin{align}
&\frac{1}{|1-\delta\lambda_i|}<\frac{\mathpzc{d}+1}{\mathpzc{d}}\label{eq::dis_scal_proof_a}\\
&\frac{|\frac{1}{1-\delta\lambda_i}|\!-\!1}{|\frac{1}{1-\delta\lambda_i}|^{\mathpzc{d}+1}}<\!\delta\lambda_i\!<\!\frac{\sqrt{\frac{1}{(1-\delta\lambda_i)^2}\!+\!1\!-\!2|\frac{1}{1-\delta\lambda_i}|\cos{\phi}}}{|\frac{1}{1-\delta\lambda_i}|^{\mathpzc{d}+1}},\label{eq::dis_scal_proof_b}
\end{align}
\end{subequations}
where $\phi\in[0,\frac{\pi}{\mathpzc{d}+1}]$ is the solution of $\frac{\sin{\mathpzc{d}\phi}}{\sin{(\mathpzc{d}+1)\phi}}=\frac{1}{|1-\delta\lambda_i|}$.~For any $\delta\in(0,\frac{2}{\lambda_N})$,
we know $-1<1-\delta\lambda_i<1$. From~\eqref{eq::dis_scal_proof_a} we get $\mathpzc{d}<\frac{|1-\delta\lambda_i|}{1-|1-\delta\lambda_i|}$. The left-side inequality of~\eqref{eq::dis_scal_proof_b}~is satisfied for any $\delta\in(0,\frac{2}{\lambda_N})$ with $|1-\delta\lambda_i|\!<\!1$. By some algebraic manipulation, the right-side inequality deduces to $$\mathpzc{d}\!<
\!\frac{\text{ln}(\frac{\delta\lambda_i}{\sqrt{(1-\delta\lambda_i)^2+1-2|1-\delta\lambda_i|\cos{\phi}}})}{\text{ln}(|1-\delta\lambda_i|)},$$ which concludes \eqref{eq::buffer_bound_dis_rate}. The last~statement is the direct application of Lemma~\ref{thm::dis_time} for~$c=\delta\lambda_i$.
\end{proof}
\begin{table}[]
\caption{The values of $\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}$ for a given $\mathpzc{d}$
}
\label{table.1}
\centering
\begin{tabular}{ |c|c|c|c|c|c| }
\hline
$\mathpzc{d}$ & $1$ & $2$ & $3$ & $4$ & $5$\\
\hline
$\frac{\mathpzc{d}^\mathpzc{d}}{(\mathpzc{d}+1)^{\mathpzc{d}+1}}$
& $0.250$
& $0.148$
& $0.105$
& $0.082$
& $0.067$ \\
\hline
\end{tabular}
\end{table}
\bibliographystyle{ieeetr}
|
1,314,259,994,999 | arxiv | \section{Introduction}
The goal of self-supervised learning (SSL) \citep{self-supervised-original} is to learn good representations from unlabeled examples. A good representation is often defined as the one that reflects underlying class structures well. The quality of a representation obtained from SSL is evaluated by measuring downstream classification accuracy on a labelled dataset. In recent years, two families of approaches have emerged as the state-of-the-art for SSL: contrastive and non-contrastive learning.
At its core, a contrastive learning algorithm stochastically creates two views from each training example, called positive and anchor examples, and selects one of the other training examples as a negative \citep{amdim,cmc,mocov2,simclr}. The positive and anchor examples are brought closer in the representation space, while the negative example is pushed away from the anchor. This definition of contrastive loss brings in two interconnected issues. First, there is no principled way to choose negative examples, and hence these negatives are chosen somewhat arbitrarily each time. Second, the contrastive loss function is not decomposed over training examples because negative examples come from other training examples.
Partly to address these limitations, recent studies have proposed non-contrastive approaches that have removed the need for negative examples \citep{byol,swav,barlow-twins}. These approaches avoid the necessity of explicit negatives by constraining or regularizing dataset-level statistics of internal representation \citep{barlow-twins,dino,online-sinkhorn}. Dataset-level statistics, which are intractable to compute, are instead approximated using a minibatch of training examples. This often results in the need of large minibatches. Also, the use of batch-level statistics means that non-contrastive losses are not decomposable as well.
Despite the apparent differences between these two families of algorithms, they all recognize the importance of and rely heavily on data augmentation as a way of incorporating domain knowledge. For instance, \citet{simclr} have highlighted that the downstream accuracy after finetuning varied between 2.6\% and 69.3\% on ImageNet \citep{imagenet-dataset}, depending on the choice of data augmentation.
This is perhaps unsurprising since the importance of domain knowledge has been reported in various domains beyond computer vision. In reinforcement learning, \citet{aug-all-you-need-rl} and \citet{auto-aug-rl} have shown the benefit of adding domain information via pixel-level data augmentation in continuous control. In natural language processing, \citet{ssmba} demonstrate the effectiveness of domain-specific augmentation by using a pretrained denoising autoencoder to build a robust classifier.
A variational autoencoder (VAE) implements a latent variable model using a composition of two neural networks. A neural net decoder maps a latent variable configuration to an observation, and a neural net encoder approximately infers the latent variable configuration given the observation \citep{vae_kingma2013} . It is often trained to maximize the variational lowerbound or its variant \citep{vae_kingma2013,beta-vae}. Careful inspection of this learning objective shows two parts: autoencoding and latent-space regularization. Autoencoding ensures that there is an approximately one-to-one mapping between individual inputs and internal representations. This prevents the collapse of internal representations onto a single point, similar to what negative examples in contrastive learning and regularization of batch-level statistics in non-contrastive learning do. Latent-space regularization, on the other hand, ensures that the internal representation is arranged semantically in a compact subset of the space. It is often done by minimizing the KL divergence \citep{kl-divergence} from the approximate posterior, returned by the encoder, to the prior distribution and adding noise to the representation during training (i.e., sampling from the approximate posterior). This performs a role similar to that of data augmentation in contrastive and non-contrastive approaches but is different in a way that it is agnostic to the input domain.
Based on these observations: (1) the importance of data augmentations and (2) variational autoencoders for representation learning, we propose a third family of self-supervised learning algorithms in which we augment variational autoencoders with data augmentation. We refer to this family of models as Augmentation-Augmented Stochastic Autoencoders (AASAE). In AASAEs, we replace the usual KL-divergence \citep{kl-divergence} term in ELBO \citep{vae_kingma2013} with a denoising criterion \citep{denoising-ae,stacked-denoising-ae} based on domain-specific data augmentation. We hypothesize that this new approach allows the representations learned by AASAEs to encode domain-specific data invariances and equivariances. The resulting model offers a few advantages over the existing contrastive and non-contrastive methods. First, the loss function is not dependent on the batch-level statistics, which we suspect enables us to use smaller minibatches. Second, the AASAE does not necessitate an arbitrary choice of negative sampling strategy.
We pretrain AASAEs on image datasets: CIFAR-10 \citep{cifar-10}, STL-10 \citep{stl-10} and Imagenet \citep{imagenet-dataset}, and as is the norm with other recently proposed approaches \citep{scaling-benchmarking-ssl,simclr,swav}, we evaluate them on classification tasks corresponding to the dataset using a single linear layer without propagating gradients back to the encoder. We find that our autoencoding-based method gives a downstream classification performance comparable to the current state-of-the-art SSL methods, with $87.14\%$ accuracy on CIFAR-10 and $84.72\%$ on STL-10. On Imagenet, the AASAE outperforms the carefully crafted pretext tasks for SSL, such as Colorization \citep{colorization}, Jigsaw \citep{jigsaw} and Rotation \citep{rotation-prediction}, demonstrating that designing such complex pretext tasks is unnecessary. As anticipated from our formulation, representation learned by the AASAE is robust to the choice of hyperparameters, including minibatch size, latent space dimension, and the network architecture of the decoder. Our observations strongly suggest that autoencoding is a viable third family of self-supervised learning approach in addition to contrastive and non-contrastive learning.
\section{Self-Supervised Learning}
Self-supervised learning (SSL) aims to derive training signal from the implicit structure present within data \citep{self-supervised-original}. This enables SSL methods to leverage large unlabeled datasets to learn representations \citep{seer} which can then be used to solve downstream tasks, such as classification and segmentation, for which it is often expensive to collect a large number of annotations. Here, we summarize quite a few variations of this approach proposed over the last few years.
\subsection{Pretext tasks}
Pretext tasks are designed to train a neural network to predict a non-trivial but easily applicable transformation applied to the input. For example, \citet{rotation-prediction} randomly rotate an input image by $0^{\circ}$, $90^{\circ}$, $180^{\circ}$, or $270^{\circ}$ and train a network to predict the angle of rotation. The colorization pretext task \citep{colorization} creates a training signal by converting RGB images to grayscale and training a network to restore the removed color channels. Image inpainting \citep{inpainting} learns representations by training an encoder-decoder network to fill in artificially-occluded parts of an image. Both jigsaw \citep{jigsaw} and relative patch prediction \citep{relative-patch-preds} tasks divide an input image into patches. The jigsaw task \citep{jigsaw} shuffles the spatial ordering of these patches and trains a network to predict the correct order. In contrast, relative patch prediction \citep{relative-patch-preds} selects two patches of an image and asks the network to predict their relative spatial positions. More recently, \citet{multi-task-ssl} combined various pretext tasks into a single method. \citet{scaling-benchmarking-ssl} have, however, shown that training neural network backbones using pretext tasks often does not capture representations invariant to pixel-space perturbations. Consequently, these representations perform poorly on downstream tasks while they solve the original pretext task well.
\subsection{Contrastive learning}
Between the two major families of state-of-the-art methods for self-supervised learning, we discuss the one based on the so-called contrastive loss function \citep{contrastive-loss}. The contrastive loss is defined such that when minimized, the representations of similar input points are pulled towards each other, while those of dissimilar input points are pushed away from each other. The contrastive loss has its roots in linear discriminant analysis \citep{lda} and is closely related to the triplet loss \citep{triplet-loss}. Recent approaches in contrastive learning are characterized by the InfoNCE loss proposed by \citet{cpc}. CPC uses InfoNCE as a lower bound of mutual information (MI) and maximizes this lowerbound, by using negative examples. Deep InfoMax \citep{dim} similarly proposes to use the idea of maximizing MI while considering global and local representations of an image. \citet{dim} tested three bounds on MI: Donsker-Varadhan \citep{dv-mi}, Jensen-Shannon \citep{jsd-mi}, and InfoNCE \citep{cpc}, and found that the InfoNCE objective resulted in the best downstream classification accuracies. Since then, several more advances in contrastive self-supervised learning have happened, such as AMDIM \citep{amdim} and CMC \citep{cmc}, both of which focus on using multiple views of each image. \citet{cpcv2} extend CPC with an image patch prediction task, and YADIM \citep{yadim} combines these ideas of augmentation and InfoNCE loss from both CPCv2 \citep{cpcv2} and AMDIM \citep{amdim} under a single framework.
The success of contrastive learning comes from using a large number of negative examples. \citet{pirl} empirically demonstrate with PIRL the benefits of using a large number of negative examples for downstream task performance. PIRL uses a momentum-updated memory bank \citep{memory-bank} to provide this large cache of negatives. Memory bank models \citep{memory-bank,pirl} need to store and update representations for each data point and hence cannot be scaled up efficiently. To remove the dependence on memory bank, MoCo \citep{moco, mocov2} instead introduces a momentum-updated encoder and a comparatively smaller queue of representations to set up positive and negative pairs for contrastive learning. SimCLR \citep{simclr} removes memory banks and momentum-updated encoders and scales up the batch size to provide a large number of negatives from within each mini-batch. The necessity of a large quantity of negatives for the contrastive loss function to work well proves to be a major challenge in scaling up these methods.
\subsection{Non-contrastive approaches}
The second family consists of non-contrastive learning algorithms that aim to learn good representations without negative samples by relying on data-level or batch-level statistics. These algorithms can be classified into two groups: clustering-based \citep{deep-cluster,sela,swav} and distillation-based \citep{byol,sim-siam,obow,dino} approaches. A more recently proposed method Barlow Twins \citep{barlow-twins} does not fall under either group.
Clustering-based methods, such as DeepCluster \citep{deep-cluster}, generate pseudo-labels for training examples by grouping them in the latent space of a neural network. The pseudo-labels are then used to train the neural network. These two steps are repeated several times. Like any classical clustering algorithm, such as $k$-means, this approach exhibits degenerate solutions and requires additional regularization to avoid these solutions. One such degenerate solution is to put all examples into a single cluster. SeLA \citep{sela} regularizes the clustering process with the Sinkhorn-Knopp algorithm \citep{sinkhorn-knopp}, encouraging training examples to be equally distributed across the clusters. \citet{swav} extend this approach to use data augmentations and online soft assignments of training examples.
Instead of clustering examples, distillation-based approaches \citep{mean-teacher} rely on having a separate neural network called a teacher network to provide a student network with a target class for each training example. Similar to clustering-based approaches above, this strategy also exhibits trivial solutions, such as the teacher and student networks being constant functions without proper regularization. BYOL \citep{byol,byol-no-batch-norm,understanding-byol}, and its simpler variant called SimSIAM \citep{sim-siam}, rely on asymmetry in the network architecture between the teacher and student to avoid such degeneracy. To simplify things, SimSIAM \citep{sim-siam} goes one step further than BYOL \citep{byol} and removes the momentum-based updates for the teacher network. On the other hand, DINO \citep{dino} retains the momentum-based updates for the teacher network, replaces the architectural asymmetry with centering of representations of examples within each minibatch, and demonstrates that these techniques combined with a tempered softmax are sufficient regularizers to avoid degeneracy.
Barlow Twins \citep{barlow-twins} stands out as an alternative to these two families of approaches. It mixes three principles; (1) batch-level statistics, (2) data augmentation, and (3) whitening (redundancy reduction). At each update, Barlow Twins \citep{barlow-twins} normalizes the representations of the training examples within each minibatch to have zero-mean and unit-variance along each dimension. It then tries to maximize the cosine similarity between the representation vectors coming out of a pair of samples drawn from a stochastic data augmentation pipeline applied over a single training example. Finally, Barlow Twins \citep{barlow-twins} minimizes the cross-correlation between different coordinates of these vector representations, which amounts to reducing redundancy at the second-order moment. A similar approach has also been proposed by \citet{vicreg}.
\begin{figure}[!t]
\centering
\includegraphics[width=0.8\textwidth]{images/model.png}
\caption{AASAE: The input to the model is an augmented view of $x^+ \sim A(x)$, the target is the original input $x$. The loss is the reconstruction term of the ELBO (Eq. \ref{eq:final-aasae-loss}) without the KL-divergence.}
\label{fig:model}
\end{figure}
\section{Augmentation-Augmented Stochastic Autoencoders}
Here we revive the idea of autoencoding as a third paradigm for self-supervised learning, in addition to contrastive and non-contrastive learning, which are described in the previous section. In particular, we start from variational autoencoders (VAEs) \citep{vae_kingma2013} to build a new self-supervised learning algorithm for representation learning. There are three mechanisms by which a VAE captures good representations of data; (1) autoencoding, (2) sampling at the intermediate layer, and (3) minimizing KL divergence \citep{kl-divergence} from the approximate posterior to the prior distribution, all of which are largely domain-agnostic. We thus introduce domain-specific knowledge by replacing the first mechanism (autoencoding) with denoising \citep{denoising-ae,stacked-denoising-ae} via data augmentation. Furthermore, we remove the third mechanism as we expect KL divergence minimization to be redundant in representation learning. In this section, we explain the original VAE and then carefully describe our proposal of augmentation-augmented stochastic autoencoder.
\subsection{Training a VAE with the evidence lowerbound (ELBO)}
We describe algorithms in this section with the assumption that we are working with images, as has been often done with recent work in self-supervised learning \citep{cpc,dim,simclr}. Hence, let the input $x$ be an image, where $x \in \mathbb{R}^{c \times h \times w}$ with $c$ color channels of height $h$ and width $w$. The VAE then uses a continuous latent variable $z \in \mathbb{R}^d$ to map the high dimensional input distribution, as $p(x) = \int_z p(x|z) p(z) \mathrm{d}z$.
It is however intractable to marginalize $z$ in general, and instead we use a tractable lowerbound to the average log-probability of the training examples. Let $q_{\phi}(z|x)$ be an approximate posterior distribution to the intractable distribution $p(z|x)$, parametrized by the output of the encoder $E_{\phi}(x)$. $p_{\psi}(x|z)$ is a probability distribution over the input $x$, parametrized by the output of the decoder $D_{\psi}(z)$. The \textit{variational lowerbound} (ELBO) \citep{vae_kingma2013} to the log-marginal probability $\log p(x)$ is
\begin{align}
\label{eq:variational-lowerbound}
\log p(x)
\geq
\tilde{\mathcal{L}}(x)
=
\mathbb{E}_{z \sim q_{\phi}(z|x)}
\left[
\log p_{\psi}(x | z) + \beta \left( \log p(z) - \log q_{\phi}(z|x)\right)
\right].
\end{align}
The VAE is then trained by minimizing
\begin{align}
\label{eq:vae-loss-2}
J_{\text{VAE}}(\phi, \psi)
=&
-\frac{1}{N} \sum_{n=1}^N \tilde{\mathcal{L}}(x^n),
\end{align}
where $x^n$ is the $n$-th training example.
The first term in Eq.~\ref{eq:variational-lowerbound} serves two purposes. First, it minimizes the reconstruction error, which encourages the intermediate representation of the VAE to be more or less unique for each observation. In other words, it ensures that the internal representations of the inputs do not collapse onto each other. The second purpose, expressed as the expectation over the approximate posterior, is to make the representation space smooth by ensuring a small perturbation to the representation does not alter the decoded observation dramatically.
The second term, the KL divergence \citep{kl-divergence} from the approximate posterior to the prior, serves a single purpose. It ensures that the representation of any observation under the data distribution is highly likely under the prior distribution. The prior distribution is often constructed to be a standard Normal, implying that the probability mass is highly concentrated near the origin (though not necessarily on the origin). This ensures that the representations from observations are tightly arranged according to their semantics, without relying on any domain knowledge.
\subsection{Augmentation-augmented stochastic autoencoder}
The AASAE removes the KL divergence \citep{kl-divergence} from the formulation because it does not embed domain-specific information and replaces it in favor of an augmented view of the original example. Mathematically, this proposed replacement results in the following loss function:
\begin{align}
\label{eq:final-aasae-loss}
J_{\text{AASAE}}(\phi, \psi)
=&
\frac{1}{N} \sum_{n=1}^N
\textcolor{red}{\mathbb{E}_{x^+_n \sim A(x_n)}}
\textcolor{red}{[}
\mathbb{E}_{z \sim q_{\phi}(z_n|\textcolor{red}{x^+_n})}
\left[
\log p_{\psi}(x_n | z_n)
\right]
\textcolor{red}{]},
\end{align}
where $A = \left(a_1, a_2, ..., a_n \right)$ is a stochastic process that applies a sequence of stochastic input transformations $a_n$. $A$ transforms any input $x$ to generate a view $x^+ \sim A(x)$, while preserving the major semantic characteristics of $x$.
The proposed replacement effectively works by forcing the encoder of the AASAE to put representations of different views of each example close to each other since the original example must be reconstructed from all of them. This is unlike the original KL divergence term, which packs the representations globally into the prior. In other words, we replace this global packing with the local packing, where the domain-specific transformations define the local neighborhood. Furthermore, domain-aware transformations have the effect of filling in the gaps between training examples, which indirectly achieves the goal of global packing.
\paragraph{Comparison to existing approaches}
Compared to the existing approaches, both contrastive and non-contrastive ones, the AASAE has a unique advantage. AASAE's loss function is decomposed over the examples, which avoids the need of approximating data-level statistics and computing its gradient for learning. This is advantageous, because we know precisely what we are computing when we use a small minibatch to approximate the gradient of the whole loss function. Generally, this is not the case with algorithms where we need to approximate the gradient of data-level statistics using a small mini-batch. Based on this observation, we expect our approach to be robust to the minibatch size, which we later confirm experimentally in the paper.
A relatively minor but related advantage of the proposed approach over constrastive learning is that there is no need to design a strategy for selecting negatives for each training example. Considering a flurry of recent work reporting on the importance of mining better negative examples \citep{good-views-contrastive-learning,debiased-contrastive-learning,contrastive-learning-hard-negatives}, our approach based on autoencoding greatly simplifies self-supervised learning by entirely eliminating negative examples.
\section{Experiments}
\subsection{Setup}
\paragraph{Architecture}
The encoder $\phi$ in our experiments is composed of a residual network backbone \citep{resnet} followed by a projection layer similar to the one described in \citep{simclr}. The decoder $\psi$ is an inverted version of residual backbone with its batch normalization \citep{batch-norm} layers removed. We use Resnet-50 as a default option for both the encoder and decoder, but later experiment with varying the decoder architecture.
\paragraph{Datasets}
We test the proposed AASAE and other more conventional autoencoder models by pretraining them on three datasets: CIFAR-10 \citep{cifar-10}, STL-10 \citep{stl-10} and Imagenet \citep{imagenet-dataset}. CIFAR-10 consists of 50,000 32x32 images in the training set and 10,000 images in the test set. These images are equally divided across 10 labeled classes. For pretraining we use 45,000 image from the training set while 5,000 images are kept for validation. The STL-10 dataset consists of 100,000 unlabelled images resized to 96x96 which are split into 95,000 images for self-supervised pretraining and 5,000 for validation. It further consists of 5,000 training images and 8,000 test images that are labelled across 10 classes. We split the 5,000 training images into 4,500 images for training the downstream classification task and the remaining 500 are kept for validation. Imagenet consists of ~1.2 million images in the training split and 50, 000 images in the validation split, spread across 1000 classes. We separate 5000 images from the training set to create our own validation set for finetuning the hyperparameters. The official validation set of Imagenet is what we report the final results on.
\paragraph{Augmentation pipeline}
As mentioned in the paragraph above, we choose image datasets for our experiments with AASAEs, and hence setup the denoising criterion with an appropriate domain-specific data augmentation pipeline. We define a sequence of common image augmentations $A = \{a_1, a_2, ..., a_n\}$ such as random flip, random channel drop. We also define $a_c$ as a special transform that applies a random resize and crop to an input $x$. Formally, $a_c$ maps $x: \mathbb{R}^{c \times h \times w} \longrightarrow \mathbb{R}^{c \times g \times u}$ where $g \leq h$ and $u \leq w$. For every input $x$ to a AASAE we define $x^+ \sim A(a_c(x))$ as a view of $x$. The augmentation pipeline defined here is kept the same as that of SimCLR \citep{simclr}, for a fair comparison with other self-supervised learning approaches.
\paragraph{Optimization and Hyperparameters}
We use Adam optimizer \citep{kingma2014adam} during pretraining. We use a linear warmup schedule for the learning rate, which is held fixed after the initial warmup. For all our ablation experiments, we keep the weight decay coefficient fixed at 0. When studying the effect of minibatch size, we follow \citep{imagenet-1-hour} and linearly scale the learning rate and the warmup epoch count with minibatch size. For the hyperparameter sensitivity ablations on CIFAR-10, we vary a particular hyperparameter while keeping the others fixed to their default values. By default, we use a learning rate of $2.5 \times 10^{-4}$, warmup the learning rate until 10 epochs, and keep the batch size at 256. For STL-10 experiments, we set the learning rate at $5 \times 10^{-4}$, warmup epochs count at 10, and keep the batch size at 512. For Imagenet pretraining, we set the total batch size at 512 across 4 GPUs, the learning rate at $5 \times 10^{-4}$, warmup epochs count at 10 and run the pretraining for all autoencoder models until 5 million training iterations.
\paragraph{Finetuning}
Downstream classification accuracy via finetuning has become a widely-used proxy for measuring representation quality. We follow the finetuning protocol put forward by \citet{simclr}. After pretraining without any labels, we add and train a linear layer on the pretrained encoder (representation), without updating the encoder. We train the linear layer for 90 epochs with a learning rate defined by: $0.1 * \text{BatchSize} / 256$, using SGD with Nesterov momentum.
\paragraph{Semi-supervised learning evaluation}
We run semi-supervised classification task on our models that have been pretrained on the Imagenet dataset. We follow the evaluation process mentioned in previous works \citep{swav, barlow-twins}, and train the model on $1\%$ and $10\%$ labeled splits of Imagenet. The training is carried out for 20 epochs with a batch size of 256, using an SGD optimizer with a momentum of 0.9 and no weight decay. Since this is a semi-supervised learning setup with a certain percentage of labels available from the dataset, the backbone is unfrozen during the training process and is trained at a learning rate of 0.01 for the $10\%$ labeled split and at 0.02 for the $1\%$ labeled split. The linear layer appended on top of the backbone is trained at a learning rate of 0.2 for the $10\%$ labeled split and at a rate of 0.5 for the $1\%$ labeled split.
\paragraph{Transfer learning tasks}
For the linear classification transfer learning task we use Places205 dataset with the commonly used evaluation protocol \citep{barlow-twins, swav}. We train a single linear layer on top of our model for 14 epochs with an SGD optimizer with a learning rate of 0.01, momentum of 0.9 and a weight decay of 5e-4. The learning rate is multiplied by a factor of 0.1 at equally spaced intervals during the training.
For the object detection transfer learning task, we use the VOC07$+$12 $trainval$ set for training and VOC07 test set for eval as previously done by \citet{barlow-twins}. Faster R-CNN with a C4 backbone is used for this downstream task. We train with a batch size of 16 across 8 GPUs for 24000 iterations with a base learning rate of 0.01. We use detectron2 \citep{detectron2} library to perform this evaluation.
\paragraph{Pretraining duration}
As we demonstrate in the paper, the proposed AASAE benefits from being trained as long as it is feasible. We report the downstream accuracies measured at different points of pretraining. More specifically, we run linear evaluation on our encoder after 400, 800, 1600, and 3200 epochs for the CIFAR-10 experiments. For STL-10, we pretrain our models till 3200 epochs. For Imagenet, we train upto 5 million training steps, which is approximately 2100 epochs.
\paragraph{Compute and Framework}
All CIFAR-10 \citep{cifar-10} experiments are done on a single GPU with a memory size of at least 16GB. All STL-10 experiments are done using two GPUs in the same category. We select GPUs from a mix of NVIDIA RTX 3090s and V100s for CIFAR-10 and STL-10 experiments. Imagenet experiments and downstream evaluations are carried out on 4 A100s. Our codebase uses PyTorch Lightning \citep{lightning}.
\subsection{Quality of representation: downstream classification accuracies}
\begin{table}[!b]
\centering
\caption{Classification performance of Resnet-50 \citep{resnet} backbone on CIFAR-10 \citep{cifar-10}, STL-10 \citep{stl-10} and Imagenet \citep{imagenet-dataset} across different methods. All models were pretrained on the corresponding dataset without labels and finetuned using the protocol described in SimCLR \citep{simclr}. The autoencoder trained with our denoising criterion (AASAE) outperforms the baseline VAE by 30\% on CIFAR-10, 40\% on STL-10 and 45\% on Imagenet. Methods marked with \ding{68} either use a different backbone than Resnet-50 or a different (non-linear) evaluation strategy.}
\label{tab:main-results}
\hfill
\begin{minipage}{0.69\textwidth}
\centering
\begin{tabular}{llll}
\bf{Method} & \bf{CIFAR-10} & \bf{STL-10} & \bf{Imagenet} \\
\hline
\ding{68} CPC (large) & - & - & 48.7 \\
\ding{68} CPCv2 & 84.52 & 78.36 & 63.8 \\
\ding{68} AMDIM (small) & 92.10 & 91.50 & 63.5 \\
\ding{68} YADIM & 91.30 & 92.15 & 59.19 \\
SIMCLR & 94.00 & 92.36 & 69.3 \\
\hline
AE & 56.34 & 42.26 & 0.89 \\
AAAE & 50.62 & 41.94 & 1.29 \\
VAE & 57.16 & 44.15 & 4.58 \\
\bf{AASAE} & \bf{87.14} & \bf{84.72} & \bf{51.0} \\
\end{tabular}
\end{minipage}
\begin{minipage}{0.29\textwidth}
\begin{tabular}{ll}
\bf{Method} & \bf{Imagenet} \\
\hline
Colorization & 39.6 \\
Rotation & 48.9 \\
Jigsaw & 45.7 \\
BigBiGAN & 56.6 \\
NPID & 54.0 \\
\hline
MoCo & 60.6 \\
SwAV & 75.3 \\
BYOL & 74.3 \\
Barlow Twins & 73.2 \\
\end{tabular}
\end{minipage}
\end{table}
First, we look at the accuracies from variants of autoencoders, the family to which the proposed AASAE belongs, presented in the bottom half of Table \ref{tab:main-results} (left). We consider the vanilla autoencoder (AE), augmention-augmented autoencoder (AAAE), and the variational autoencoder (VAE) as baselines. Our first observation is that there is a significant gap between the proposed AASAE and all the baselines, with up to 30\% points on CIFAR-10, 40\% points on STL-10, and 45\% points on Imagenet. This demonstrates the importance of data augmentation and noise in the intermediate representation space in making autoencoding a competitive alternative for self-supervised learning. When we add only one of these components, augmentation in the case of AAAEs or sampling in the case of VAEs, we see a big performance degradation from AASAE. The gap between VAE and AASAE exposes the inadequacy of KL-divergence as a regularizer for the latent space.
We then put the performance of the proposed AASAE in the context of existing self-supervised learning algorithms presented in the top half of Table~\ref{tab:main-results} (left), and Table~\ref{tab:main-results} (right). We confirm once again what others have observed as to why autoencoding fell out of interest in recent years. All three autoencoder baselines (AE, AAAE, and VAE) severely lag behind the other state-of-the-art self-supervised learning approaches. However, the proposed modification that led to AASAE significantly narrows this gap on CIFAR-10 and STL-10. On Imagenet, the AASAE lags behind the current crop of state-of-the-art methods; however, it performs better than any existing pretext task designed for SSL. These results suggest that autoencoding is a viable alternative to contrastive and non-contrastive learning algorithms when designed and equipped appropriately and developed further on from here.
\subsection{Represenational quality does not deteriorate}
\label{sec:convergence}
\begin{figure}[!b]
\centering
\makebox[\textwidth][c]{\includegraphics[width=\textwidth]{images/convergence-plots.png}}
\caption{The AASAE uses a Gaussian likelihood on pixels for the reconstruction loss with a specified width of the distribution (logscale). In (a), we let the decoder learn the logscale and observe the illusion of overfitting as mentioned in \citet{parsimony-gap}. In (b), we fix the logscale parameter to an arbitrary scalar by sampling uniformly between [-5, 2]. In both cases, we fail to observe any correlation between the quality of density estimation and learned representation. Plots shown for CIFAR-10 \citep{cifar-10} dataset.}
\label{fig:convergence}
\end{figure}
A major downside of the proposed strategy of replacing the KL divergence term in the original loss with data augmentation is that we lose the interpretation of the negative loss as the lowerbound to the log probability of an observation. However, we find it less concerning as the quality of representation is not necessarily equivalent to the quality of density estimation. Furthermore, we make a strong conjecture that the representation quality, which largely depends on the encoder, does not suffer from overfitting (in terms of downstream classification accuracy), even when the quality of density estimation does. Our conjecture comes from the observations that the representation output of the encoder must cope with multiple copies of the same input and noise added in the process of sampling. On the other hand, the decoder can arbitrarily shrink the width of the output distribution per latent configuration, resulting in overfitting to training examples. This conjecture is important since it implies that we should train the AASAE as long as the computational budget allows, rather than introducing a sophisticated early stopping criterion. More importantly, this would also imply that we do not need to assume the availability of downstream tasks at the time of pretraining.
We test two setups. First, we let the decoder determine the width (in terms of the diagonal covariance of Gaussian) on its own. In this case, we expect the model to overfit the training examples severely, as was observed and argued by \citet{parsimony-gap}, while the representation quality never deteriorates. In the second setup, we fix the width to an arbitrary but reasonable scalar, which would prevent overfitting in the context of density estimation as long as it is chosen to be reasonably large.
As presented in Fig.~\ref{fig:convergence}, in both cases, we observe that the quality of representation, measured in terms of the downstream accuracy, does not deteriorate. Furthermore, as anticipated, we observe that the quality of density estimation quickly overfits in learning the width of output distribution (Figure \ref{fig:convergence} (a)). Fixing the width to a scalar did not necessarily help avoid the issue of overfitting (Figure \ref{fig:convergence} (b)). Still, more importantly, we fail to observe any clear relationship between the qualities of density estimation and learned representation.
This finding suggests the need for further study to define and measure the quality of representation distinct from both density estimation quality and downstream accuracy. The former will not only help us measure the learning progress in pretraining time, but will also shed light on what we mean by representation and representation learning. The latter will be needed for future downstream tasks, as the main promise of pretraining is that it results in representations that are useful in the unknown.
\subsection{Combining VAE and AASAE}
\begin{figure}[!t]
\centering
\makebox[\textwidth][c]{\includegraphics[width=0.4\textwidth]{images/beta-coeff.png}}
\caption{Downstream classification accuracy on CIFAR-10 \citep{cifar-10} when we add back KL divergence based regularization with a $\beta$-coefficient \citep{beta-vae} to the loss function of AASAE defined in Eq. \ref{eq:final-aasae-loss}. We observe a negligible change in the quality of representations, as measured by the classification task, when the KL-term is weighted with a $\beta \ll 1$. For values of $\beta \geq 1$, the quality of representation starts deteriorating, as is seen by the decrease in classification accuracy.}
\label{fig:beta-coeff}
\end{figure}
Although we designed AASAE by {\it replacing} the KL divergence based regularization with data augmentation based denoising, these two may well be used together. Earlier, \citet{denoising-vae} studied this combination with a simple corruption distribution that is agnostic to the input domain in the context of density estimation. Here, we investigate this combination, with domain-specific transformations, in the context of representation quality.
While keeping the data augmentation based perturbation scheme intact, we vary the coefficient $\beta$ of the KL divergence term. When $\beta=0$, it is equivalent to the proposed AASAE. We present the downstream classification accuracies on CIFAR-10 in Figure \ref{fig:beta-coeff}.
We first observe that the KL divergence term has negligible impact when the coefficient is small, i.e., $\beta \ll 1$. However, as $\beta$ grows, we notice a significant drop in the downstream classification accuracy, which we view as a proxy to the representation quality. We attribute this behavior to the tension, or balance, between domain-aware and domain-agnostic regularization of the representation space. As $\beta \to \infty$, the domain-agnostic regularization overtakes and results in the arrangement of the representations that does not reflect the domain-specific structures, leading to worse downstream classification accuracy.
From this experiment, we conclude that for self-supervised pretraining, the proposed approach of data augmentation is a better way to shape the representation space than the domain-agnostic KL divergence based regularization.
\subsection{Hyperparameter sensitivity}
\begin{figure}[!t]
\centering
\makebox[\textwidth][c]{\includegraphics[width=\textwidth]{images/invar-bars.png}}
\caption{On CIFAR-10 \citep{cifar-10}, we demonstrate AASAEs insensitivity to hyperparameters: (a) batch size, (b) latent space dimension, (c) decoder architecture, and (d) logscale parameter (width of the Gaussian likelihood). We vary one specific hyperparameter while keeping the rest fixed for these insensitivity ablations. We select the minibatch size between 128-1024, the dimensionality of the latent space between 64-512, the decoder architecture from decoders that mirror \{resnet18, resnet34 or resnet50\} encoders, and sample the logscale values from a uniform distribution between [-5, 2].}
\label{fig:invar-bars}
\end{figure}
The proposed AASAE, or even the original VAE, sets itself apart from the recently proposed self-supervised learning methods in that its loss function is decomposed over the training examples (within each minibatch.) Thus, we believe that training the AASAE is less sensitive to minibatch size, as even with a single-example minibatch, our estimate of the gradient is unbiased. This is often not guaranteed for a loss function that is not decomposed over the training examples. We test this hypothesis by running experiments with varying sizes of minibatches.
As shown in Fig. \ref{fig:invar-bars} (a), we observe almost no difference across different minibatch sizes, spanning from 128 to 1024. This is true for both the downstream accuracy (representation quality) and the speed of learning. This is contrary to recent findings from self-supervised learning algorithms, where large minibatches have been identified as an important ingredient \citep{simclr,understanding-byol}.
This insensitivity to the minibatch size raises a question about other hyperparameters, such as the dimensionality of latent space (Fig. \ref{fig:invar-bars} (b)), the decoder architecture (Fig. \ref{fig:invar-bars} (c)) and the logscale or width of the output distribution (Fig. \ref{fig:invar-bars} (d)). We test the sensitivity of the proposed AASAE to each of these hyperparameters. We find that the quality of representation, measured by the downstream classification accuracy, is largely constant to the change in these hyperparameters. Together with the insensitivity to the minibatch size, this finding further supports our claim that autoencoding-based approaches form a valuable addition to self-supervised learning.
\subsection{Semi-supervised learning}
\begin{table}[!b]
\centering
\caption{Semi-supervised evaluation of Resnet-50 encoder with $1\%$ and $10\%$ labels on Imagenet. Entries with $*$ next to them performed equivalent to chance result on for Imagenet.}
\label{table:semi-sup}
\begin{tabular}{lll}
& \multicolumn{2}{c}{\textbf{Imagenet}} \\
\cline{2-3}
\textbf{Method} & 1\% & 10\% \\
\hline
Supervised & 25.4 & 56.4 \\
\hline
SimCLR & 48.3 & 65.6 \\
Barlow Twins & 55.0 & 69.7 \\
BYOL & 53.2 & 68.8 \\
\hline
AE & $0.1^*$ & $0.1^*$ \\
AAAE & $0.1^*$ & 0.31 \\
VAE & $0.1^*$ & 0.98 \\
\textbf{AASAE} & \textbf{21.37} & \textbf{39.85} \\
\end{tabular}
\end{table}
We finetune the Resnet-50 \citep{resnet} backbone pretrained by AASAEs on specified labeled subsets of Imagenet. The two subsets used contain $1\%$ and $10\%$ labeled images of the total number present in the dataset. Table \ref{table:semi-sup} shows the results for the baseline autoencoder models and our proposed AASAE. The baseline autoencoders are pretty poor in their performance for this semi-supervised evaluation task. In some instances, their performance is $0.1\%$ accuracy on Imagenet, which is equivalent to chance. The AASAE outperforms the remaining autoencoders considerably on this task with $21.37\%$ accuracy on the $1\%$ labeled subset and a $39.85\%$ accuracy on the $10\%$ labeled subset. However, this is still quite behind when compared against the supervised results or results from other current SSL methods.
\subsection{Transfer learning to other tasks}
For transfer learning to classification tasks, we finetune a linear layer on top of the frozen Resnet-50 backbone pretrained by VAE and AASAE on Places205 dataset for scene classification. The finetuning protocol is kept the same as the previous works of \citet{barlow-twins, pirl}. Table \ref{table:downstream} shows the results for this downstream evaluation. For comparison, we also include results on Places205 from pretext tasks of Jigsaw \citep{jigsaw} and Rotation \citep{rotation-prediction}, while at the same time including results from one of the current high performers on this evaluation, namely, Barlow Twins \citep{barlow-twins}.
The finetuning process of object detection transfer task is done on VOC07$+$12 $trainval$ dataset and the task is evaluated on VOC07 test set. The results are shown in Table \ref{table:downstream}. Even though the AASAE performed comparable to the Jigsaw and Rotation pretext tasks on Places205 classification, its performance is greatly affected on the VOC07 detection task. It is far behind the results of these pretext tasks. This result asks whether reconstruction-based SSL techniques are a good fit for transferring representations for object detection tasks. This is something that can be explored in future work.
\begin{table}[!t]
\centering
\caption{Transfer performance of Imagenet pretrained Resnet-50 backbones on classification and object detection tasks. Places205 dataset is used for classification transfer task with the table reporting classification accuracy. For object detection, we use VOC07$+$12 dataset with Faster R-CNN algorithm and C4 bakcbone.}
\label{table:downstream}
\begin{tabular}{lllll}
& \multicolumn{1}{c}{\textbf{Places205}} & \multicolumn{3}{c}{\textbf{VOC07+12}} \\
\cline{2-5}
\textbf{Method} & $Acc.$ & $AP_{all}$ & $AP_{50}$ & $AP_{75}$ \\
\hline
Supervised & 51.1 & 53.5 & 81.3 & 58.8 \\
\hline
Jigsaw & 41.2 & 48.9 & 75.1 & 52.9 \\
Rotation & 41.4 & 46.3 & 72.5 & 49.3 \\
Barlow Twins & 54.1 & 56.8 & 82.6 & 63.4 \\
\hline
VAE & 6.78 & 2.45 & 6.49 & 1.67 \\
\textbf{AASAE} & \textbf{41.45} & \textbf{15.22} & \textbf{35.69} & \textbf{10.09} \\
\end{tabular}
\end{table}
\subsection{Direct inspection of representation}
\begin{figure}[!b]
\centering
\includegraphics[width=\textwidth]{images/cos-sim.png}
\caption{Part (a) shows cosine similarity matrices between pairs of vectors produced by views of a particular example and between pairs of vectors produced by views of different examples. We observe a posterior collapse in the case of VAEs in (a)(i). For AASAEs in (a)(ii), we see strong alignment between views of the same example while the views of different examples are far apart from each other in the representation space. In (b), we show images from the STL-10 dataset \citep{stl-10} and their corresponding perturbed versions that generate the cosine similarity matrices in (a).}
\label{fig:cos-sim}
\end{figure}
A major motivation behind our proposal was to use domain-specific data augmentation to encourage representations to encode domain-specific invariances. If AASAEs indeed reflect such invariances, we expect vector representations coming out of domain-specific perturbations of an individual example to be highly aligned with each other. We test whether this property holds with the AASAE more strongly than the original VAE by inspecting cosine similarities between pairs of perturbed inputs produced by the same example and between pairs of perturbed inputs produced by different examples. When the former is higher than the latter, we can say the representation encodes domain-specific invariances induced by data augmentation.
In Fig. \ref{fig:cos-sim} (a)(i), we make two observations. First, the representation vectors are all extremely aligned for the original VAE. We can interpret this from two perspectives. The first perspective is the so-called posterior collapse \citep{beta-vae,skip-vae}, in which all the approximate posterior distributions, i.e., the representation vectors, are detached from the input and collapse onto each other. The second perspective is the lack of domain-specific invariance, which is evident from the lack of any clusters. Either way, it is obvious that the representations extracted by the original VAE do not reflect the underlying structure of the data well.
On the other hand, with the proposed AASAE, we see clear patterns of clustering in Fig. \ref{fig:cos-sim} (a)(ii). The vectors produced from one example are highly aligned with each other, while the vectors produced from two different examples are less aligned. In other words, the representations capture domain-specific invariances, induced by data augmentation, and the AASAE does not suffer from posterior collapse. Both these things were well anticipated from the design of our algorithm.
\section{Conclusion}
In this paper, we attempt to revive the idea of autoencoding for self-supervised learning of representations. We start by observing that data augmentation is at the core of all recently successful self-supervised learning algorithms, including both contrastive and non-contrastive approaches. We then identify the KL divergence in variational autoencoders (VAE) as a domain-agnostic way of shaping the representation space and hypothesize that this makes it inadequate for representation learning. Based on these two observations: the importance of data augmentations and KL divergence's inadequacy, we propose replacing the KL divergence regularizer with a denoising criterion and domain-specific data augmentations in the VAE and call this variant an augmentation-augmented stochastic autoencoder (AASAE).
Our experiments reveal that the AASAE learns substantially better data representation than the original VAE or any other conventional variant, including the vanilla autoencoder and the augmentation-augmented denoising autoencoder. We use downstream classification accuracy from finetuning a linear layer as the metric to measure representation quality and observe more than a 30\% improvement on all datasets over the VAE. This result is better than any pretext task for SSL and one of the earlier versions of contrastive learning, CPC. Although the AASAE still lags behind the more recent methods for SSL, this gap is significantly narrower with the AASAE than with any other autoencoding variant.
One consequence of autoencoding is that the loss function of AASAE is decomposed over the examples within each minibatch, unlike contrastive learning (with negative examples from the same minibatch) and non-contrastive learning (which often relies on minibatch statistics). We anticipated that this makes AASAE learning less sensitive to various hyperparameters, especially the minibatch size. Our experiments reveal that the AASAE is indeed insensitive to the minibatch size, latent space dimension, and decoder architecture.
Although the proposed AASAE has failed to outperform or perform comparably to the existing families of self-supervised learning algorithms, our experiments indicate the potential for the third category of self-supervised learning algorithm based on autoencoding. The quality of representations can be significantly pushed beyond that of the vanilla autoencoder and variational autoencoder by making them encode domain specific invariances. Furthermore, autoencoding-based methods, represented by the AASAE, are robust to the choice of hyperparameters. Based on these observations, we advocate for further research in the direction of autoencoding-based self-supervised learning.
\begin{ack}
Ananya Harsh thanks Margaret Li, Tushar Jain, Jiri Borovec, Thomas Chaton and Marc Ferradou for helpful discussions on ideas, experiments and the paper draft. William thanks Yann LeCun, Philip Bachman, Carl Doersch, Cinjon Resnick, Tullie Murrell for helpful discussions.
We are grateful to the PyTorch Lightning team for their support of this project and Grid AI for providing compute resources and cloud credits needed to run our research workloads at scale. We thank the PyTorch team and the PyTorch Lightning community for their contributions to PyTorch, Lightning and Bolts which made the code base for this project possible.
KC was partly supported by NSF Award 1922658 NRT--HDR: FUTURE Foundations, Translation, and Responsibility for Data Science.
\end{ack}
\newpage
|
1,314,259,995,000 | arxiv | \section{Introduction}
Developing non-perturbative approaches to strongly interacting quantum field theories is undoubtedly an important subject in theoretical physics. Among various approaches attempted over the years, the one which is gaining renewed interest and has been producing numerous breathtaking results in recent years is the application of the {\it bootstrap} method. The basic idea of bootstrap is to start with a minimal set of assumptions and constrain the observables by imposing the self-consistency conditions. It was proposed initially in the study of scattering amplitudes in the late 60's \cite{Eden:1966dnq} in order to describe strong interaction. Although the idea was abandoned to a large extent soon after the advent of quantum chromodynamics, it was revived decades later in a different guise in the study of two-dimensional conformal field theories \cite{Belavin:1984vu}. Rather recently the approach was also successfully applied to conformal field theories in higher dimensions \cite{Rattazzi:2008pe}, most notably to the three-dimensional Ising model \cite{ElShowk:2012ht}, with the help of numerical implementation and has been a subject of active research since then. Motivated by this success, the original idea of the S-matrix bootstrap was also revisited, resulting in several interesting outcomes \cite{Caron-Huot:2016icg,Paulos:2016but,Paulos:2017fhb, Doroud:2018szp, He:2018uxa,Cordova:2018uop,Paulos:2018fym}.
Another, perhaps more elementary approach to non-perturbative physics is the resummation of diagrams\footnote{When combined with other techniques such as Borel resummation and conformal mapping, the standard perturbation theory can also be a powerful tool for studying nonperturbative physics. This was recently demonstrated for $\lambda \phi^{4}$ theory in two dimensions in an impressive work \cite{Serone:2018gjo}.} based on the Schwinger-Dyson equation. Although the method is tractable only in specific situations such as the large-$N$ limit, it has an advantage that it allows us to compute various observables explicitly as functions of the coupling constants and study the renormalization group flows and the critical points analytically.
The main goal of this article is to shed new lights on the conformal and the S-matrix bootstraps and the relation between them, by analyzing large-$N$ field theories using a combination of Schwinger-Dyson techniques and the idea of bootstrap. More specifically, we consider the $O(N)$ vector model and the Gross-Neveu model in $(d+1)$-dimensional anti-de-Sitter space (AdS$_{d+1}$).
There are mainly three motivations for studying these theories. Firstly it helps to make connections between the correlation functions in conformal field theories (CFTs) and the S-matrix in flat space: On the one hand, thanks to the isometries of the AdS spacetime, the observables in quantum field theories in AdS$_{d+1}$ are constrained by the $d$-dimensional conformal group SO$(d,2)$ even in the presence of mass deformation. One can therefore study them using the standard techniques of conformal field theories such as the operator product expansion and crossing symmetry. On the other hand, by taking an appropriate limit, one can extract from them the flat-space S-matrix.
\begin{figure}[t]
\centering
\begin{minipage}{0.3\hsize}
\centering
\includegraphics[clip,height=5cm]{flow1.pdf}\\
(a)
\end{minipage}
\begin{minipage}{0.3\hsize}
\centering
\includegraphics[clip,height=5cm]{flow2.pdf}\\
(b)
\end{minipage}
\begin{minipage}{0.35\hsize}
\centering
\includegraphics[clip,height=5cm]{flow3.pdf}\\
(c)
\end{minipage}
\caption{Schematic pictures of RG flows of massive QFTs in flat space and AdS. (a) The RG flow in flat space. In flat space, the theory starts from the UV fixed point and flows either to a gapped phase or to a fixed point governed by CFT. (b) The RG flow in AdS for $\Lambda_{\rm AdS}\gg \Lambda_{\rm QFT}$. In this case, the theory starts seeing the AdS curvature as soon as it flows away from the UV fixed point, and simply flows to the gapped phase in AdS. (c) The RG flow in AdS for $\Lambda_{\rm AdS}\ll \Lambda_{\rm QFT}$. When $\Lambda_{\rm AdS}$ is small enough, the theory does not see the AdS curvature until it reaches the deep IR. Therefore there is a wide range of scales in which the physics can be well-approximated by the massive QFT in flat space.}\label{fig:flowsflat}
\end{figure}
To appreciate this point further, it is helpful to briefly discuss the physics of a massive quantum field theory in AdS (see also figure \ref{fig:flowsflat}). In flat space, the theory starts from the UV fixed point and flows either to a gapped phase or to an IR fixed point. Also in AdS, the UV limit is described by the same flat-space UV fixed point since the AdS curvature becomes negligible in the UV limit. However, the details of how it flows to the IR depend on the relative magnitude of the AdS scale $\Lambda_{\rm AdS}=1/L_{\rm AdS}$ and the mass scale of the theory $\Lambda_{\rm QFT}$. If the AdS scale is much larger than the mass scale of the theory ($\Lambda_{\rm AdS}\gg\Lambda_{\rm QFT}$), the theory starts seeing the AdS curvature as soon as it flows away from the UV fixed point. Therefore there is no scale in which the theory is governed by nontrivial flat-space physics and it flows simply to a gapped phase in AdS. In asymptotically-free theories, this also implies that the physics is controlled by perturbation theory in AdS since the effective coupling constant at the AdS scale is small. This advantage was first pointed out by Callan and Wilczek \cite{Callan:1989em} and further discussions on the case of Yang-Mills theory in AdS were given in \cite{Aharony:2012jf}\footnote{Other recent studies of quantum field theory on rigid AdS background are \cite{Aharony:2010ay,Aharony:2015zea, Aharony:2015hix,Doyon:2004fv}.}.
On the other hand, if the AdS scale is much smaller $(\Lambda_{\rm AdS}\ll \Lambda_{\rm QFT})$, the theory does not see the AdS curvature until it reaches the deep IR and therefore it can be well-described by a massive QFT in flat space for a wide range of scales\footnote{We can see all these features explicitly in the $O(N)$ vector model. See section \ref{sec:Corr}}. Thus by considering a theory in AdS and sending $\Lambda_{\rm AdS}$ to zero, one can compute the observables in flat space, in particular the S-matrix, from the observables in AdS. This allows us to analyze the physics of massive quantum field theories in flat space using the powerful techniques of conformal field theories. Such an idea was already employed in \cite{Paulos:2016fap} in which they studied the S-matrix in flat space by analyzing the conformal bootstrap numerically and taking the flat-space limit\footnote{Some of the relations between the S-matrix bootstrap in flat space and the conformal bootstrap were clarified recently with the help of the analytic functional bootstrap \cite{Mazac:2016qev,Mazac:2018mdx}.}. In this paper, we will see another use of this idea which is more analytical; namely we compute resummed loop diagrams by imposing the consistency of the operator product expansion in the boundary conformal theory and take the flat-space limit to reproduce the S-matrix. For details, see section \ref{sec:Corr}. Besides being an efficient computational tool, the connection between the flat-space S-matrix and the conformal correlators may also be used as a way to understand general analytic properties of the nonperturbative S-matrix in flat space. Unlike the correlators in conformal field theories whose analytic properties are well under control thanks to the conformal block expansion, not much is known about the analytic properties of the S-matrix in flat space at the non-perturbative level. Given such a situation, it would be interesting to see what the correlators in AdS, which are constrained by the conformal symmetry, can tell us about the properties of the S-matrix upon taking the flat-space limit. Although we are still quite far from satisfactory understanding of the analyticity of the flat-space S-matrix, we will see in this paper that several important features of the flat-space S-matrix, namely resonances and bound states, can be reproduced from the correlators of large-$N$ vector models in AdS.
The second reason for studying these theories is to better understand non-perturbative solutions to the crossing equation in CFT. As a result of intensive studies in the last couple of years, various results for CFTs in higher dimensions were obtained not just numerically but also analytically. However, most of the analytical results so far concern infinitesimal corrections to generalized-free-field CFTs\footnote{In generalized-free-field CFTs, correlation functions are simply given by a product of two-point functions and the operator spectrum consists of ``multi-trace operators''.}. For instance, it was shown in \cite{Komargodski:2012ek,Fitzpatrick:2012yx} that every CFT contains a universal large-spin sector in which the operator spectrum asymptotes to that of the generalized-free-field CFT, and infinitesimal corrections to it are governed by the low-twist operators of the theory\footnote{There have been remarkable development in the large spin expansion in the last few years. See for instance \cite{Alday:2015eya,Alday:2015ewa,Alday:2016njk,Simmons-Duffin:2016wlq,Caron-Huot:2017vep,Lemos:2017vnx} for other important developments.}. Although it is quite remarkable that one can make such a universal statement about general CFTs, it is indispensable to develop better understanding of the CFT data away from such a universal sector since the operator spectrum, or even the number of operators, in a general CFT is quite different from that of generalized free fields. From this perspective, it would be desirable to have examples of solutions to the crossing equation which exhibit a non-perturbative reorganization of the spectrum. The large-$N$ vector models that we study in this paper precisely provide such an example: Both in the O($N$) vector model and the Gross-Neveu model, we show that the dimensions of the operators receive finite shifts from those of the generalized free field. Furthermore, in the Gross-Neveu model we show the existence of an extra operator which corresponds to a bound state in flat space, while in the $O(N)$ vector model in the symmetry-breaking phase we find a distinctive pattern of the anomalous dimensions of the operators which can be thought of as the AdS analogue of the resonance phenomenon (see figure \ref{fig:phaseshiftano}). Both of these phenomena can only be seen after the resummation of diagrams in AdS and cannot be deduced from the existing analytical results in the literature.
Thirdly, it encompasses the study of conformal boundary conditions in flat space. When the theory in AdS is tuned to be at a critical point, we obtain a conformal field theory in AdS. In such a special case, one can view the theory as describing a conformal field theory with a boundary (BCFT) since the AdS spacetime is conformally equivalent to the flat half-space ($\mathbb{R}_{+}\times \mathbb{R}^{d}$). Using this correspondence, one can extract BCFT data directly from the observables in AdS. We will show this explicitly by reproducing the existing results in the literature and also finding new ones about the conformal boundary conditions of the critical $O(N)$ and Gross-Neveu models.
The rest of the paper is organized as follows: In section \ref{sec:generalities}, we review the generalities of the $O(N)$ model, including the Hubbard-Stratonovich trick and the large-$N$ effective action. We then discuss in section \ref{sec:Phases} the phases of the $O(N)$ model in AdS$_2$ and AdS$_3$ by analyzing the effective potential, and show the existence of the symmetry-breaking vacua and of the gapped vacuum. In particular, we discuss the parameter regions in which the two phases coexist owing to the Breitenlohner-Freedman bound. In section \ref{sec:Corr}, we compute the two-point functions of $\sigma$ and the AdS 4-particle scattering amplitude of $O(N)$ vector fields $\phi^i$ in the gapped phase. We do so by first resumming Witten diagrams and expressing the final result in terms of a single unknown function. We then bootstrap the unknown function by requiring the consistency of the OPE of the boundary conformal theory. We also provide results in the symmetry-breaking phase, and in that context we describe the AdS analogue of a resonance in flat-space. We furthermore propose a relation between symmetry breaking in AdS and the existence of a conformal manifold in the boundary conformal theory.
In section \ref{sec:Critical}, we discuss in detail the case when the bulk theory becomes conformal. We first analyze the structure of the OPE expansion in the bulk and propose a diagnosis for bulk conformality in AdS background. We then apply this idea to find the critical point of the O($N$) model, and extract BCFT data from the previously-computed correlators. In section \ref{sec:fermion}, we perform similar analyses in the Gross-Neveu model. In particular, we show the existence of a bound state in AdS, we analyze the critical point and we compute some data of the associated BCFT. Finally we conclude and comment on future directions in section \ref{sec:conclusion}.
\section{Generalities of the $O(N)$ model}\label{sec:generalities}
\subsection{Review of the $O(N)$ Model}
The Lagrangian of the $O(N)$ model is
\begin{equation}
\mathcal{L} = \frac12 (\partial \phi^i)^2 + \frac{m^2}{2} (\phi^i)^2 + \frac{\lambda}{2 N} ((\phi^i)^2)^2~,
\end{equation}
where $i = 1, \dots, N$ and summation over $i$ is implicit.
The model admits a $1/N$ expansion with $\lambda$ fixed \cite{Coleman:1974jh, Moshe:2003xn}, as we will now review. A convenient way to obtain the large-$N$ expansion is by introducing a Hubbard-Stratonovich (HS) auxiliary field $\sigma$
\begin{equation}\label{eq:HSLag}
\mathcal{L} = \frac12 (\partial \phi^i)^2 + \frac{m^2}{2} (\phi^i)^2 - \frac{1}{2 \lambda} \sigma^2 + \frac{1}{\sqrt{N}} \,\sigma (\phi^i)^2 ~.
\end{equation}
The integration contour for $\sigma$ runs on the imaginary axis. Note that the equation of motion simply sets
\begin{equation}
\sigma = \frac{\lambda}{\sqrt{N}}(\phi^i)^2~,
\end{equation}
hence $\sigma$ is identified with the composite operator $(\phi^i)^2$ inside correlation functions.
At large $N$ and fixed $\lambda$ there are still loop corrections coming from the Lagrangian \eqref{eq:HSLag} that are not suppressed by inverse powers of $N$. To see this, consider the 1PI 1-point and 2-point correlation functions of $\sigma$ at 1 loop (see figure \ref{fig:notsuppressed}): they are given by a closed loop of the $\phi^i$ field with either 1 or 2 external lines of $\sigma$, giving a contribution of order $\mathcal{O}(\sqrt{N})$ and $\mathcal{O}(1)$, respectively. As a consequence, higher-loop connected diagrams built out these 1PI diagrams will also fail to be suppressed by inverse powers of $N$. In order to take these contributions into account, there are two standard ways to proceed: one is to write the Schwinger-Dyson equation and explicitly resum the diagrams. The other is to consider the path integral of $\sigma$ and $\phi^{i}$, determine the saddle point and compute the fluctuations around it. Both approaches yield the same integral equation in the end and are physically equivalent. In this paper, we adopt the second approach since it is more algorithmic and helps to consider different phases of the theory.
\begin{figure}
\centering
\includegraphics[clip, height=2cm]{nonsuppressedbubble.pdf}
\caption{Examples of 1PI diagrams for the one- and two-point functions of $\sigma$ which are not suppressed by powers of $N$. The black lines are the propagators of $\phi^{i}$, and we also depicted how O($N$) indices are contracted.\label{fig:notsuppressed}}
\end{figure}
In practice, the path integral calculation amounts to considering a modified Lagrangian, obtained by adding to \eqref{eq:HSLag} the generating functional of all 1-loop 1PI correlators of $\sigma$. The latter is simply computed by summing the bubbles of $\phi^i$ with an arbitrary number of insertions of the $\sigma$ field, giving
\begin{equation}\label{eq:Gamma}
\Gamma[\sigma]_{\rm{1\,loop}} = \frac{N}{2} \, \mathrm{tr\,} \log\left(-\square + m^2 + \frac{2}{\sqrt{N}}\sigma \right)~.
\end{equation}
Hence the modified, non-local Lagrangian is
\begin{equation}
\mathcal{L}_{\rm eff} = \frac12 (\partial \phi^i)^2 + \frac{m^2}{2} (\phi^i)^2 - \frac{1}{2 \lambda} \sigma^2 + \frac{1}{\sqrt{N}} \,\sigma (\phi^i)^2 + \frac{N}{2} \, \mathrm{tr\,} \log\left(-\square + m^2 + \frac{2}{\sqrt{N}}\sigma \right)~.\label{eq:LagEff}
\end{equation}
Using this Lagrangian \eqref{eq:LagEff}, we can derive the following results:
\begin{itemize}
\item[(i)]{The $\mathcal{O}(\sqrt{N})$ 1-point function of $\sigma$ is determined simply by minimizing the potential. Expanding the fields around constant values
\begin{align}
\sigma & = \sqrt{N} \Sigma + \delta\sigma~, \label{eq:expS}\\
\phi^i & = \sqrt{N} \Phi^i + \delta\phi^i~, \label{eq:expC}
\end{align}
the potential is given by the constant term in the Lagrangian, namely
\begin{equation}
V(M^2, \Phi^i) = N\left(- \frac{(M^2 - m^2)^2}{8\lambda} + \frac{M^2}{2} (\Phi^i)^2 + \frac12 \mathrm{tr\,}\log\left(-\square + M^2\right)\right)~,\label{eq:effpot}
\end{equation}
where we are using the shifted variable $M^2 = m^2 + 2\Sigma$. We find that the equation for the vacuum at leading order at large $N$ are
\begin{align}
0 & = \frac{\partial V}{\partial \Phi^i} =N M^2 \Phi^i ~, \label{eq:vacC} \\
0 & = \frac{\partial V}{\partial M^2}=\frac{N}{2}\left( \frac{m^2 - M^2}{2 \lambda} + (\Phi^i)^2 + \mathrm{tr\,} \frac{1}{-\square + M^2} \right)~; \label{eq:vacS}
\end{align}
}
\item[(ii)]{Expanding to second order in $\delta\sigma$ around the minimum, we can determine the full $\mathcal{O}(1)$ propagator, which resums the $\phi^i$ bubble diagrams. We can formally write it as follows: denote with $B(x,y)$ the bubble diagram, i.e.
\begin{equation}
B(x,y) = \left[\left(\frac{1}{-\square + M^2}\right)(x,y)\right]^2~,
\end{equation}
where $M^2$ is obtained by solving the equations for the vacuum \eqref{eq:vacC}-\eqref{eq:vacS} above. Then we have
\begin{equation}
\langle \delta\sigma(x) \delta\sigma(y)\rangle = -\left[\frac{1}{\lambda}\mathds{1} + 2 \, B \right]^{-1}(x, y)~.\label{eq:Sprop}
\end{equation}
Here the symbol $\mathds{1}$ is the identity operator and the inverse $[\ast]^{-1}$ is the operator inverse. In other words we should view the two-point function and the bubble as integral operators acting on functions of spacetime $f(x)$ through convolution, e.g. $B[f](x)\equiv \int d^{d+1}y\sqrt{g(y)}B(x,y)f(y)$ ;
}
\item[(iii)]{We can compute observables in the large-$N$ expansion using ordinary Feynman diagrams, with the Feynman rules induced by \eqref{eq:LagEff}, namely: the propagator for $\delta\sigma$ is the resummed one in eq. \eqref{eq:Sprop}, the propagator for $\delta\phi^i$ is just a free-field massive propagator with mass-squared $M^2$, there is a cubic vertex between $\delta\sigma$ and two $\delta\phi^i$'s of order $1/\sqrt{N}$, and there are self-interactions of $\delta\sigma$ induced by the 1-loop 1PI effective action. In this perturbation theory we do not include diagrams containing as a subdiagram any 1PI 1-loop n-point function of $\delta\sigma$, because those are already accounted for by the the full propagator and self-interactions of $\delta\sigma$.}
\end{itemize}
The approach described here is valid also on any curved background, if we interpret $\square$ as the scalar Laplacian on the background. In this general case we include a possible quadratic coupling to curvature in the definition of $m^2$. Depending on the number $d+1$ of spacetime dimensions, some couplings can have UV divergences and will have to be appropriately renormalized. In the following we will work in $2\leq d+1 < 4$, where the theory is super-renormalizable and at most a renormalization of the vacuum energy and of the parameter $m^2$ is needed. An important modification to the vacuum equation arises if the Euclidean spacetime has a finite volume \cite{Hartnoll:2005yc}, but we do not need to consider it because Euclidean AdS has infinite volume. On the other hand, in AdS we have to specify boundary conditions.
\subsection{Boundary Conditions on AdS Background}
We need to prescribe boundary conditions for the fields $\phi^i$ at the boundary of AdS. Working at large $N$ and finite $\lambda$, the theory is not a free-field theory in the usual sense, and therefore one seems to face the hard problem of understanding boundary conditions for interacting fields. Luckily, the virtue of the HS trick is that at leading order at large $N$ the interaction is just encoded in the non-trivial propagator of the auxiliary field $\sigma$, whereas the $\phi^i$ are decoupled from $\sigma$ and free. They only start interacting via the exchange of $\sigma$ at subleading order in $1/N$, and that interaction can be treated perturbatively, so it does not require us to understand interacting boundary conditions.
More precisely, what we just wrote applies to the fluctuations $\delta\phi^i$ and $\delta\sigma$ around the vacuum configuration. However, while it is true that the path integral for the fluctuations $\delta\phi^i$ is gaussian at leading order at large $N$, even at this order the coupling $\lambda$ enters through the effective mass-squared $M^2$, which is determined dynamically by the VEV of $\sigma$.
Let us briefly remind what are the possible boundary conditions on AdS$_{d+1}$ for a given mass-squared $M^2$ and AdS radius $L$, as discussed in \cite{Klebanov:1999tb}. Define $\Delta_+$, $\Delta_-$, with $\Delta_+ = d -\Delta_- \geq \Delta_-$, to be the two solutions to the quadratic equation
\begin{equation}
\Delta(\Delta-d) = L^2M^2~,
\end{equation}
for the variable $\Delta$. Choosing Poincar\'e coordinates $(z, \vec{x})$, where $z > 0$ and $\vec{x}\in \mathbb{R}^d$, with metric
\begin{equation}
d s^2 = L^2\frac{dz^2 + (d\vec{x})^2}{z^2}
\end{equation}
the solution to the Klein-Gordon equation behaves near the boundary at $z = 0$ as
\begin{equation}
\delta\phi^i(z, \vec{x}) \underset{z\to 0}{\longrightarrow} z^{\Delta_+}(A_+^i(\vec{x})+\mathcal{O}(z^2) ) + z^{\Delta_-}(A_-^i(\vec{x})+\mathcal{O}(z^2) )~.
\end{equation}
We restrict the functions $\delta\phi^i$ in the path integral to have the same $z$-dependence near the boundary as either the $+$ or $-$ mode. This defines a boundary condition that preserves the isometries of AdS and the $O(N)$ symmetry. In addition we need to require that the Euclidean action is a finite function of the $\delta\phi^i$'s, i.e. that the allowed modes are normalizable. For $L^2M^2 \geq -\frac{d^2}{4}+1$ only the $+$ mode is normalizable and gives a valid boundary condition, while for $- \frac{d^2}{4} \leq L^2M^2 < -\frac{d^2}{4}+1$ both the $+$ and $-$ modes are normalizable, hence there are two possible boundary conditions. If the boundary condition $\pm$ is chosen, the boundary conformal theory contains an operator in the vector representation of $O(N)$ of scaling dimension $\Delta_\pm$.
Note that the case with $+$ boundary condition is continuously connected to the $O(N)$ model in flat space, via the flat-space limit $L \to \infty$ with $M^2$ and $\lambda$ fixed. On the other hand, the case with $-$ boundary condition is an intrinsically curved-space regime. In the following we will concentrate on the case of $+$ boundary condition.
\section{Phases of the $O(N)$ Model on AdS}\label{sec:Phases}
We now move on to the study of phases of the $O(N)$ model on AdS. As was pointed out initially by Callan and Wilczek, the AdS spacetime acts as a symmetry-preserving IR regulator\footnote{See also for \cite{Kiritsis:1994yv} the application of a similar idea to superstring.}. This fact leads to several important differences from the flat-space analysis, such as the coexistence of the gapped vacuum and the symmetry-preserving vacuum, and the existence of the symmetry breaking vacua even in two dimensions evading the famous Coleman-Mermin-Wagner theorem in flat space \cite{Mermin:1966fe, Coleman:1973ci}.
\subsection{Effective Potential on AdS$_{d+1}$}
To obtain the effective potential we need to compute the trace in eq. \eqref{eq:effpot}. Calculations of functional determinants in AdS have appeared in \cite{Burgess:1984ti, Inami:1985wu, Camporesi:1993mz, Gubser:2002zh, Hartman:2006dy, Giombi:2013fka}. Here we will employ the spectral representation of the bulk-to-bulk propagator, reviewed in the appendix \ref{app:SpRep}. With the $+$ boundary condition
\begin{equation}
\left(\frac{1}{-\square + M^2}\right)(x,y) = \frac{1}{L^{d-1}}\int_{-\infty}^{+\infty} d\nu \,\frac{1}{\nu^2 + \left(\Delta -\frac{d}{2}\right)^2} \, \Omega_\nu(x, y)~,\label{eq:srprop}
\end{equation}
where the conformal dimension $\Delta$ is related to the mass $M$ by $\Delta=\Delta_{+}=\tfrac{d}{2}+\sqrt{\tfrac{d}{2}^2+M^2}$ and $\Omega_{\nu}$ is the harmonic function in AdS
\begin{align}
-\square_{x} \Omega_{\nu}(x,y)=\left(\frac{d^2}{4}+\nu^2\right)\Omega_{\nu}(x,y)\,.
\end{align}
Using the result \eqref{eq:coincpt} for the coincident point limit of the harmonic function, we obtain\footnote{Here the $\mathrm{tr\,}$ is normalized dividing by the volume of the Euclidean spacetime.}
\begin{equation}
\mathrm{tr\,}\frac{1}{-\square + M^2} = \frac{ \Gamma\left(\frac{d}{2}\right)}{4 \pi^{\frac{d+2}{2}}\Gamma(d)L^{d-1}}\int_{-\infty}^{\infty} d\nu\frac{\Gamma\left(\frac{d}{2} \pm i \nu\right)}{(\nu^2 + \frac{d^2}{4} + L^2 M^2)\Gamma(\pm i \nu)}~,\label{eq:scaltadpole}
\end{equation}
where $\Gamma(z\pm a)\equiv\Gamma(z+a)\Gamma(z-a)$.
This integral is UV divergent for $d\geq1$. The UV divergence can be reabsorbed in a renormalization of the parameter $m^2/\lambda$, which in fact by power counting is expected to be UV divergent in $d\geq 1$. We can evaluate the integral using dimensional regularization, i.e. taking $d < 1$ and then analytically continuing the result in $d$. For $d<1$ the integral can be evaluated by closing the contour at infinity in the complex $\nu$ plane, giving
\begin{equation}
\mathrm{tr\,}\frac{1}{-\square + M^2} = \frac{\Gamma\left(\frac{d}{2}\pm \sqrt{\frac{d^2}{4} + L^2 M^2}\right)\sin\left(\pi \left(\frac{d}{2} - \sqrt{\frac{d^2}{4} + L^2 M^2}\right)\right)}{(4\pi)^{\frac{d+1}{2}}\Gamma(\frac{d+1}{2})\cos(\frac{\pi d}{2}) L^{d-1}}~.\label{eq:trprop}
\end{equation}
This dimensionally-regularized result is finite for integer $d\geq 1$ and even, corresponding to power-law divergences in a cutoff regularization, while it has poles for integer $d\geq 1$ and odd, corresponding to logarithmic divergences.
The trace just computed determines the derivatives of the effective potential w.r.t. $M^2$, see eq. \eqref{eq:vacS}. Knowing the derivative is sufficient to determine the vacuum. The constant in the integration over $M^2$ is a UV divergent vacuum energy.
To ensure stability, we will only consider $\lambda > 0$, while the mass-squared parameter $m^2$ can take either sign. It is convenient to work with dimensionless quantities by setting $L = 1$ and measuring the other parameters $m^2$ and $\lambda$, as well as the variables $M^2$ and $\Phi^i$ that the effective potential depends on, in units of $L$. In these conventions, the flat-space limit is obtained by sending all the quantities with positive mass dimension to infinity, at a relative rate fixed by their units, e.g. $m^2 /\lambda^{\frac{2}{3-d}}$ and $m^2/M^2$ are kept fixed in the limit.
\subsection{AdS$_3$}
Plugging $d=2$ in eq. \eqref{eq:trprop} the expression simplifies to
\begin{equation}
\mathrm{tr\,}\frac{1}{-\square + M^2} =-\frac{\sqrt{1+ M^2}}{4\pi}~.
\end{equation}
Hence the effective potential, up to a constant, is
\begin{equation}
\frac{V(M^2,\Phi^i)}{N} = -\frac{(M^2-m^2)^2}{8\lambda} +\frac12 M^2 (\Phi^i)^2 -\frac{(1+ M^2)^\frac32}{12 \pi}~,
\end{equation}
and the equations for the vacuum are
\begin{align}
0 &= \frac{2}{N}\,\partial_{M^2} V = \frac{m^2-M^2}{2 \lambda} + (\Phi^i)^2 - \frac{\sqrt{1 + M^2}}{4\pi} \label{eq:minSAdS} \\
0 &= \frac{1}{N}\partial_{\Phi^i} V = \Phi^i \,M^2 \label{eq:minCAdS}~.
\end{align}
The solution of eq. \eqref{eq:minSAdS} is
\begin{equation}
M^2_{\Phi^i} = - 1+\frac{\lambda^2}{16\pi^2}\left(-1 + \sqrt{1+\frac{16\pi^2}{\lambda^2}\left(m^2 +2 \lambda (\Phi^i)^2 +1\right)}\right)^2~.\label{eq:TofrAdS}
\end{equation}
Plugging this solution back in the effective potential, we obtain a function $V(\Phi^i) \equiv V(M^2_{\Phi^i},\Phi^i)$ of a single variable $|\Phi|\equiv \sqrt{(\Phi^i)^2}$, with $\partial_{\Phi^i} V(\Phi^i) = \Phi^i M^2_{\Phi^i}$. In fig. \ref{fig:potd2} we show the plot of this function for $\lambda =1$ and various values of $m^2$. Dots represents the position of stable vacua. We stress that $m^2$ is a renormalized mass-squared parameter, whose value depends on the scheme, and we recall that we are adopting dimensional regularization as explained in the previous subsection.
\begin{figure}[t]
\centering
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot2d1.pdf}
\label{fig:sub1}
\end{subfigure}%
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot2d2.pdf}
\label{fig:sub2}
\end{subfigure}
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot2d3.pdf}
\label{fig:sub3}
\end{subfigure}
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot2d4.pdf}
\label{fig:sub4}
\end{subfigure}
\caption{The large-$N$ effective potential $\frac{1}{N \lambda^3}V(\Phi^i)$ as a function of $|\Phi|$ in $d=2$ (i.e. in AdS$_3$), for $\lambda =1$ and various values of $m^2$. Dimensionful quantities are expressed in units of the AdS radius $L$. The position of the symmetry-preserving (-breaking) vacuum, when it exists, is indicated with a red (blue) dot. The line interrupts when there is no real solution to eq. \eqref{eq:minSAdS}, i.e. when $\frac{m^2+1}{2} + \lambda(\Phi^i)^2 < 0$.}
\label{fig:potd2}
\end{figure}
For $m^2 > - 1$ there is a symmetry-preserving vacuum at $\Phi^i =0$. This vacuum is stable because the effective mass-squared of the fluctuations is $M^2 =M^2_{\Phi^i = 0}>- 1$, above the Breitenlohner-Freedman (BF) bound \cite{Breitenlohner:1982jf}. This fact---the mass squared can be slightly negative without affecting the stability of the vacuum---is in marked contrast to the analysis in flat space, and leads to the coexistence of the vacua which we discuss in the next paragraph. Note that from eq. \eqref{eq:TofrAdS} that $M^2_{\Phi^i = 0}$ is real and above the BF bound also in the range $ - 1 - \frac{\lambda^2}{16\pi^2}\leq m^2 < - 1$, but in this range $\sqrt{1 + M^2_{\Phi^i = 0}}$ would be negative, therefore the solution is not acceptable.
For $m^2< \frac{\lambda}{2 \pi}$ the points at
\begin{equation}
(\Phi^i)^2 =\frac{1}{2\lambda} \left(\frac{\lambda}{2 \pi} -m^2\right)\equiv |\Phi|^2 > 0~,
\end{equation}
are symmetry-breaking vacua. At any of these points $M^2_{\Phi^i}= 0$, giving $N-1$ Goldstone bosons. The radial mode in the classical limit $\lambda \ll 1$ has a non-zero mass-squared $m^2_\rho=4 |\Phi|^2 \lambda$, while at the quantum level it mixes with $\delta \sigma$, as we will see in more details in section \ref{subsec:corrsymbreak}.
The symmetry-breaking vacua and the symmetry-preserving vacuum coexist in the region of parameters
\begin{equation}
-1 < m^2 \leq \frac{\lambda}{2\pi}~,\label{eq:coex}
\end{equation}
and the two solutions coincide at $m^2=\frac{\lambda}{2 \pi}$.\footnote{While the values at the extrema of this window are scheme-dependent, their difference is not.} The symmetry-breaking vacuum is always energetically favored in this range. A similar situation happens in flat space when there is a first-order phase transition. In such a case, the vacuum with larger energy is meta-stable meaning that it can decay to the true vacuum via a bubble nucleation. There are a few important differences in AdS background:\footnote{Since here we are considering a theory without dynamical gravity, the classic result of \cite{Coleman:1980aw} cannot be applied directly.} there is an additional potential due to the curvature that can hinder the expansion of the bubble towards the boundary, and the evolution of the bubble is periodic in real time. It is therefore possible that both of these two vacua can play the role of the ``true vacuum'' of the theory in this parameter range. Further studies are needed to clarify this point.
\subsection{AdS$_2$}\label{eq:AdS2}
Expanding eq. \eqref{eq:trprop} around $d=1$ we find
\begin{align}
\mathrm{tr\,}\frac{1}{-\square + M^2} &= \frac{1}{4\pi}\left(-\frac{2}{d-1} + \log(4\pi )-\gamma\right)\nonumber \\& -\frac{1}{2\pi}\left( \psi\left(\tfrac12 + \sqrt{\tfrac 14 + M^2 }\right)\right) + \mathcal{O}(d-1)~,
\end{align}
where $\gamma$ is the Euler-Mascheroni constant, and $\psi (x)=d\log \Gamma(x)/dx$ is the digamma function. We reabsorbe the pole, together with the $M^2$-independent terms $\frac{1}{4\pi}(\log(4\pi)-\gamma)$, in a renormalization of the parameter $m^2/\lambda$, namely
\begin{equation}
\left(\frac{m^2}{\lambda}\right)_{\rm bare} = \mu^{1-d} \left(\frac{m^2}{\lambda}\right)_{\rm ren}\left(1-\frac{1}{2\pi}\left(-\frac{2}{d-1} + \log(4\pi )-\gamma\right)+ \mathcal{O}(d-1)\right)~.
\end{equation}
Here $\mu$ is the scale introduced by dimensional regularization. Therefore we have
\begin{equation}
\mathrm{tr\,}\frac{1}{-\square + M^2}|_{\rm ren} = -\frac{1}{2\pi} \psi\left(\tfrac12 + \sqrt{\tfrac 14 + M^2 }\right) + \frac{1}{4\pi}\log(\mu^2)~,
\end{equation}
which gives the following effective potential up to a constant
\begin{align}
\frac{V(M^2,\Phi^i)}{N} & = -\frac{(M^2-m^2)^2}{8\lambda} +\frac12 M^2 (\Phi^i)^2 \nonumber\\ & -\frac{1}{4\pi}\int_0^{M^2} dz \, \psi\left(\tfrac12 + \sqrt{\tfrac 14 + z }\right) + \frac{M^2}{8\pi}\log(\mu^2)~.
\end{align}
The vacuum equations are
\begin{align}
0 &= \frac{2}{N}\,\partial_{M^2} V = \frac{m^2-M^2}{2 \lambda} + (\Phi^i)^2 -\frac{1}{2\pi} \psi\left(\tfrac12 + \sqrt{\tfrac 14 + M^2 }\right) + \frac{1}{4\pi}\log(\mu^2)~, \label{eq:minSAdSd1} \\
0 &= \frac{1}{N}\partial_{\Phi^i} V = \Phi^i \,M^2 \label{eq:minCAdSd1}~.
\end{align}
Here we see an important difference from the flat-space case. On $\mathbb{R}^2$, the digamma function in eq. \eqref{eq:minSAdSd1} is replaced by its flat-space limit, giving $-\frac{1}{4\pi} \log(M^2/\mu^2)$. Therefore, there is no solution of the vacuum equations with $M^2 = 0$ and $\Phi^i \neq 0$, i.e. there is no symmetry breaking \cite{Coleman:1974jh}. On the other hand, on AdS$_2$ there is no singularity as $M^2$ goes to $0$ and in fact we can find symmetry-breaking solutions. In the corresponding vacua we have $N-1$ fields with $M^2=0$, i.e. the Goldstone bosons. In the scheme we are adopting the symmetry-breaking solutions exist for
\begin{equation}
\frac{-m^2}{\lambda} \geq \frac{1}{\pi} \gamma +\log\mu\,.
\end{equation}
The existence of symmetry-breaking vacua for the $O(N)$ model at large $N$ on AdS$_2$ background was first observed in \cite{Inami:1985dj}.
The educated reader might object to our assertion of the existence of the Goldstone phenomenon in two dimensions. The Coleman-Mermin-Wagner theorem \cite{Mermin:1966fe, Coleman:1973ci} ---which states that a continuous symmetry cannot be spontaneously broken in two dimensions--- is evaded here because the curvature of the background cures the IR singularity in the propagator of the massless scalars. Note that we recover the absence of symmetry breaking in the flat-space limit, because $\mu $ goes to $\infty$ and the lower bound on $|m^2|/\lambda$ goes to $+\infty$. It is also worth stressing that while the large-$N$ limit sometimes gives rise to symmetry breaking and phase transitions even in situations in which they are impossible at finite $N$ (e.g. the spontaneous breaking of the continuous axial symmetry in the Nambu-Jona-Lasinio (NJL) model in two dimensions \cite{Gross:1974jv}, or the confinement-deconfinement transition for gauge theories on a compact spatial manifolds \cite{Witten:1998zw, Aharony:2003sx}), this is not what is happening here, as we clearly see from the fact that the same large-$N$ theory in flat space does not have symmetry breaking. To understand this better, it is useful to compare the large-$N$ $O(N)$ model in $\mathbb{R}^2$ with the large-$N$ NJL model in $\mathbb{R}^2$. In the first example the spontaneous breaking of the symmetry would give rise to a number of Goldstone bosons that grows with $N$, hence taking the large-$N$ limit is not helpful for taming the IR singularity in the massless propagator. By contrast, in the second example there is only one Goldstone boson, whose loops are suppressed at large $N$, hence the IR singularity is only visible at subleading orders in $1/N$. Having understood the reason for the absence of symmetry breaking in flat space, we conclude that it is really the curvature of the background that is responsible for the existence of symmetry breaking in the $O(N)$ model on AdS$_2$.
\begin{figure}[t]
\centering
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot1d1.pdf}
\label{fig:sub1}
\end{subfigure}
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot1d2.pdf}
\label{fig:sub2}
\end{subfigure}
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot1d3.pdf}
\label{fig:sub3}
\end{subfigure}
\begin{subfigure}{.45\textwidth}
\centering
\includegraphics[width=1\linewidth]{Pot1d4.pdf}
\label{fig:sub4}
\end{subfigure}
\caption{The large-$N$ effective potential $\frac{1}{N \lambda}V(\Phi^i)$ as a function of $|\Phi|$ in $d=1$ (i.e. in AdS$_2$), for $\lambda = 0.5$ and various values of $m^2$. Dimensionful quantities are expressed in units of the AdS radius $L$. The position of the symmetry-preserving (-breaking) vacuum, when it exists, is indicated with a red (blue) dot. The line interrupts when there is no solution to eq. \eqref{eq:minSAdSd1}, i.e. when $\frac{m^2+\frac14}{2} + \lambda(\Phi^i)^2 +\frac{\lambda}{2\pi}(\gamma + \log(4\mu))< 0$.}
\label{fig:potd1}
\end{figure}
Let us next discuss the symmetry-preserving vacuum. Note that $M^2 + \frac{\lambda}{\pi} \psi\left(\tfrac12 + \sqrt{\tfrac 14 + M^2 }\right)$ increases monotonically to $+\infty$ in the range $M^2 > -\frac{1}{4 }$ above the BF bound. Hence we can always find a stable symmetry-preserving solution for $M^2$ by setting $\Phi^i = 0$ and solving \eqref{eq:minSAdSd1}, as long as $m^2 > - \frac{1}{4} - \frac{\lambda}{\pi}(\gamma + \log(4\mu)) $.
Similarly to the AdS$_3$ case, the symmetry-breaking and symmetry-preserving vacua coexist in the range
\begin{equation}
-\frac{1}{4 } - \frac{\lambda}{\pi}(\gamma + \log(4 \mu )) < m^2 \leq -\frac{\lambda}{\pi}(\gamma + \log(\mu))~.
\end{equation}
In fig. \ref{fig:potd1} we show the plot of the effective potential, re-expressed as a function of a single variable $|\Phi|$ by plugging the (numerical) solution of eq. \eqref{eq:minSAdSd1} for $M^2$, for $\lambda = 0.5$. and various values of $m^2$.
Let us finally point out an interesting analogy with Yang-Mills theory in AdS$_4$, which was studied in \cite{Aharony:2012jf}. At sufficiently low energies, the $O(N)$ model in the symmetry-breaking vacua can be well-described by a large-$N$ non-linear sigma model, which on $\mathbb{R}^2$ is asymptotically free and generates a mass dynamically, consistently with Coleman's theorem. On the other hand, in AdS$_2$ the symmetry-breaking vacuum is stable in the IR and the theory contains massless Goldstone bosons if the coupling $\lambda/ |m^2|$ is sufficiently weak (or equivalently if the AdS radius is sufficiently small), as was shown in the analysis of this section. Similarly, the Yang-Mills theory in $\mathbb{R}^4$ is asymptotically free and gapped due to confinement, while in AdS$_4$ it can also be in the deconfined phase and contain light, weakly-coupled gluons if the AdS radius is sufficiently small \cite{Aharony:2012jf}. Therefore, from the point of view of the non-linear sigma model, the symmetry-breaking phase of the $O(N)$ model can be thought of as an analogue of the deconfined phase, and the phase transition between the gapped phase and the symmetry-breaking phase is in analogy with the confinement-deconfinement transition of the Yang-Mills theory on AdS$_4$. It would be interesting to study more in details this transition of the large-$N$ non-linear sigma model, and we hope to return to this problem in the near future.
\section{Correlators of the $O(N)$ Model in AdS}\label{sec:Corr}
We now compute correlation functions of the $O(N)$ vector model in AdS. The computation proceeds in three steps: As a first step, we derive the spectral representation for the bulk two-point functions of the $\sigma$ field. The result is expressed in terms of a single unknown function which physically describes the bubble integral in AdS. Then in the second step, we contract this two-point function with bulk-boundary propagators and construct the boundary four-point functions of the fundamental fields $\phi^{i}$. Lastly in the third step, we require the consistency of the OPE expansion of the boundary conformal theory---in particular the absence of the double-trace operators---and then determine the unknown function. The idea of determining the correlator by imposing the absence of the double-trace operators is similar to the so-called Mellin/Polyakov bootstrap \cite{Gopakumar:2016wkt,Gopakumar:2016cpb,Gopakumar:2018xqi}, but in our case the large-$N$ limit (in the bulk) helps to simplify the analysis greatly and allows us to obtain more powerful results.
After determining the correlator, we analyze it in several parameter regimes and discuss the physics it describes. Our primary focus is on the correlators in the gapped phase, but we also provide several interesting results for the symmetry-breaking phase. In particular, in the symmetry-breaking phase we find a distinctive pattern of the anomalous dimensions of the double-trace operators which can be interpreted as the AdS analogue of a particle resonance in flat space.
\subsection{``Bootstrapping'' Correlators in the Gapped Phase}\label{sec:Corr41}
Let us perform the computation following the strategy outlined above.
\subsubsection{Computation at large N}
\paragraph{Step 1: Spectral representation of the bulk two-point function} The first step is to compute the bulk two-point function of $\sigma$, namely $\langle\delta \sigma(x) \delta \sigma(y)\rangle$. Its formal expression can be straightforwardly derived from the quadratic fluctuations in the effective action \eqref{eq:LagEff} as discussed in section \ref{sec:generalities}. The result reads
\begin{align}\label{eq:delsigxy}
\langle \delta\sigma(x) \delta\sigma(y)\rangle = -\left[\frac{1}{\lambda}\mathds{1} + 2 \, B \right]^{-1}(x, y)~,
\end{align}
where $B(x,y)$ is a product of two bulk-to-bulk propagators
\begin{align}\label{eq:productofbtob}
B(x,y) = \left[\left(\frac{1}{-\square + M^2}\right)(x,y)\right]^2~.
\end{align}
Physically the function $B(x,y)$ describes the bubble diagram in the bulk and the expression \eqref{eq:delsigxy} is the sum of the geometric series of bubble diagrams (see figure \ref{fig:geometricseries}),
\begin{align}
-\left[\frac{1}{\lambda}\mathds{1} + 2 \, B \right]^{-1}(x, y) =-\left[\lambda -2 \lambda^2 B +4\lambda^{3} B\star B -8\lambda^{4}B\star B\star B+\cdots \right]\,,
\end{align}
where $B\star B$ denotes the convolution integral $\int d^{d+1}z\sqrt{g (z)} B(x,z) B(z,y)$. To perform this sum explicitly, we need to express $B(x,y)$ in the basis in which its action is diagonal. In flat space, this can be achieved simply by the Fourier-transformation since $B(x,y)$ in flat space depends only on the difference of the coordinates.
\begin{figure}
\centering
\includegraphics[clip,height=1cm]{geometricseries.pdf}
\caption{The resummation of the two-point function of $\delta\sigma$. The thick black lines are the bare propagators of $\sigma$ while the red curves are propagators of $\phi^{i}$. \label{fig:geometricseries}}
\end{figure}
The analogue of the Fourier transform in AdS is called the {\it spectral representation}, which we review in Appendix \ref{app:SpRep}. We already employed the spectral representation of the propagator for the calculation of the effective potential in section \ref{sec:Phases}. Here we will use that more generally it can be defined for any bi-local function $F(x,y)$ in AdS, that only depends on the distance between the two points. Namely, any such function can be expanded in the basis of harmonic functions
\begin{equation}
F(x,y) = \int^{\infty}_{-\infty}d\nu \tilde{F}(\nu)\Omega_{\nu}(x,y)~.\label{eq:specrecall}
\end{equation}
The spectral representation shares one important property with the standard Fourier transform: It converts convolutions into products. Namely we have
\begin{align}\label{eq:convolutiontoproduct}
F\star G(x,y)=\int d\nu\, \tilde{F}(\nu) \tilde{G}(\nu) \Omega_{\nu} (x,y)
\end{align}
where $\tilde{F}(\nu)$ and $\tilde{G}(\nu)$ are the spectral representations of $F(x,y)$ and $G(x,y)$.
Now, unlike a single bulk-to-bulk propagator, the spectral representation of the product of two bulk-to-bulk propagators \eqref{eq:productofbtob} takes a complicated form in general. For the time being, we do not need its explicit form and we will just treat it as an unknown function $\tilde{B} (\nu)$,
\begin{align}
B(x,y) = \left[\left(\frac{1}{-\square + M^2}\right)(x,y)\right]^2=\int_{-\infty}^{\infty} d\nu \,\tilde{B}(\nu )\Omega_{\nu}(x,y)\,.
\end{align}
From this expression, we can immediately derive the expression for the two-point function of $\sigma$ using the property \eqref{eq:convolutiontoproduct} as
\begin{align}\label{eq:spectralforsigma}
\langle\delta \sigma (x)\delta\sigma (y)\rangle=-\int_{-\infty}^{\infty} d\nu \,\frac{1}{\lambda^{-1}+2 \tilde{B} (\nu)}\Omega_{\nu}(x,y)\,.
\end{align}
\paragraph{Step 2: Computing the four-point functions of $\phi^{i}$}
The next step is to construct the four-point function of $\phi^{i}$. For this purpose, it is convenient to introduce the embedding coordinates of AdS and of the boundary CFT. The embedding coordinates of a point on the boundary of AdS are given by
\begin{align}
P=\left(\frac{1+\vec{x}^{\,2}}{2},\frac{1-\vec{x}^{\,2}}{2},\vec{x}\right)\,,
\end{align}
where $P$ satisfies $P^{I}P_{I}=0$ and the signature\footnote{Throughout this section, we consider Euclidean AdS. If one wants to obtain the result for the original (Lorentzian) AdS, one can simply analytically continue the final results.} is $(-,+,+,\cdots)$. The inner product of two different $P$'s is related to the distance between two points,
\begin{align}
P_{12}\equiv -2 P_1\cdot P_2 = |\vec{x}_1-\vec{x}_2|^2
\end{align}
On the other hand, the embedding coordinates for a point in the bulk are given by
\begin{align}
X=\left(\frac{1}{2}\left(z+\frac{1}{z}+\frac{\vec{x}^{\,2}}{z}\right),\frac{1}{2}\left(-z +\frac{1}{z}-\frac{\vec{x}^{\,2}}{z}\right), \frac{\vec{x}}{z}\right)\,,
\end{align}
where $z$ and $\vec{x}$ are the Poincar\'e coordinates of AdS,
\begin{align}
ds^2=\frac{dz^2 +(d\vec{x})^2}{z^2}\,.
\end{align}
Using both $P$ and $X$, one can express the bulk-to-boundary propagator of the scalar field (with dimension $\Delta$) in the following simple way,
\begin{align}
K_{\Delta}(P,X)=\frac{\sqrt{\mathcal{C}_{\Delta}}}{(-2 P\cdot X)^{\Delta}}\,,
\end{align}
with
\begin{align}\label{eq:CDeltaDef}
\mathcal{C}_{\Delta}=\frac{\Gamma (\Delta)}{2\pi ^{d/2}\Gamma (\Delta-\frac{d}{2}+1)}\,.
\end{align}
Using the embedding coordinates, one can express the four-point function of $\phi^{i}$ at the leading order in the $1/N$ expansion, which is simply given by mean-field theory
\begin{align}
\langle \phi^{i}(P_1)\phi^{j}(P_2)\phi^{k}(P_3)\phi^{l}(P_4)\rangle|_{\mathcal{O}(1)}= \frac{\delta^{ij}\delta^{kl}}{(P_{12})^{\Delta}(P_{34})^{\Delta}}+\frac{\delta^{ik}\delta^{jl}}{(P_{13})^{\Delta}(P_{24})^{\Delta}}+\frac{\delta^{il}\delta^{jk}}{(P_{14})^{\Delta}(P_{23})^{\Delta}}\,.\nonumber
\end{align}
At the next order, the four-point function can be computed by contracting the bulk two-point function of $\sigma$ with the bulk-to-boundary propagators using the vertex $\sigma (\phi^{i})^2/\sqrt{N}$ in the effective action \eqref{eq:LagEff}. The result reads
\begin{equation}
\begin{aligned}
\langle \phi^{i}(P_1)\phi^{j}(P_2)\phi^{k}(P_3)\phi^{l}(P_4)\rangle|_{\mathcal{O}(1/N)}= &\frac{\delta^{ij}\delta^{kl} g_{12|34}+\delta^{ik}\delta^{jl} g_{13|24}+\delta^{il}\delta^{jk} g_{14|23}}{N}\,,\label{eq:scalarfourpfun}
\end{aligned}
\end{equation}
with\footnote{The prefactor $4$ is a standard combinatorial factor for the Feynman diagram.}
\begin{align}
\begin{aligned}
g_{12|34}& =4\int d X_1 dX_2 \,\,\langle\delta\sigma (X_1)\delta\sigma(X_2)\rangle\\
&\qquad \times K_{\Delta}(P_1,X_1)K_{\Delta}(P_2,X_1) K_{\Delta}(P_3,X_2)K_{\Delta}(P_4,X_2)\,.
\end{aligned}
\end{align}
To evaluate this integral, we use the spectral representation for $\langle\delta\sigma (X_1)\delta\sigma(X_2)\rangle$ in eq. \eqref{eq:spectralforsigma} and the following formula for the integral of the harmonic function, which we prove in Appendix \ref{ap:split}:
\begin{align}\label{eq:integralweneedtocompute}
\begin{aligned}
\int dX_1 dX_2\, \Omega_{\nu}(X_1,X_2)K_{\Delta}(P_1,X_1)K_{\Delta}(P_2,X_1)K_{\Delta}(P_3,X_2)K_{\Delta}(P_4,X_2)=\\
\frac{1}{(P_{12})^{\Delta}(P_{34})^{\Delta}}\frac{\Gamma_{\Delta-\frac{d}{4}-\frac{i\nu}{2}}^{2}\Gamma_{\Delta-\frac{d}{4}+\frac{i\nu}{2}}^{2}}{64\pi^{\frac{d}{2}+1}\Gamma_{\Delta}^2\Gamma_{1-\frac{d}{2}+\Delta}^2}\left[\frac{\Gamma_{\frac{d}{4}+\frac{i\nu}{2}}^{4}\mathcal{K}_{\frac{d}{2}+i\nu}(z,\bar{z})}{\Gamma_{\frac{d}{2}+i\nu}\Gamma_{i\nu}}+\frac{\Gamma_{\frac{d}{4}-\frac{i\nu}{2}}^{4}\mathcal{K}_{\frac{d}{2}-i\nu}(z,\bar{z})}{\Gamma_{\frac{d}{2}-i\nu}\Gamma_{-i\nu}}\right]\,.\end{aligned}
\end{align}
Here $z$ and $\bar{z}$ are the conformal cross ratios of the boundary points defined by
\begin{align}\label{eq:crossratiodef}
\frac{P_{12}P_{34}}{P_{13}P_{24}}=z\bar{z}\,,\qquad \frac{P_{14}P_{23}}{P_{13}P_{24}}=(1-z)(1-\bar{z})\,,
\end{align}
and $\mathcal{K}_{\Delta}$ is the scalar conformal block in $d$ dimensions.
As a result, we obtain\footnote{To arrive at the formula, we used the invariance of the integrand under $\nu\to-\nu$ and combined the contributions from two terms in the second line of \eqref{eq:integralweneedtocompute} into one.}
\begin{align}\label{eq:g1234explicit}
g_{12|34}=-\frac{1}{(P_{12})^{\Delta}(P_{34})^{\Delta}} \int \frac{d\nu}{2\pi} \frac{1}{\lambda^{-1}+2\tilde{B}(\nu)}\frac{\Gamma_{\Delta-\frac{d+2i\nu}{4}}^2\Gamma_{\Delta-\frac{d-2i\nu}{4}}^2\Gamma_{\frac{d+2i\nu}{4}}^4}{4\pi^{\frac{d}{2}}\Gamma_{\Delta}^2\Gamma_{1-\frac{d}{2}+\Delta}^2\Gamma_{i\nu}\Gamma_{\frac{d}{2}+i\nu}}\mathcal{K}_{\frac{d}{2}+i\nu}(z,\bar{z}) \,,
\end{align}
where we used the abbreviations $\Gamma_{x}\equiv \Gamma (x)$. To generate the OPE expansion, one simply needs to close the contour on the lower-half plane and read off the residues at the poles.
\paragraph{Step 3: Bootstrapping the bubble function}
Now that we derived the expression for $g_{12|34}$ \eqref{eq:g1234explicit}, the remaining task is to determine the unknown function $\tilde{B}(\nu)$. As mentioned above, the function $\tilde{B}(\nu)$ comes from a scalar bubble diagram in AdS and one can in principle compute it directly using Witten diagrams. For AdS$_3$, this was carried out explicitly in \cite{Giombi:2017hpr} by using the split representation\footnote{For recent developments on the computation of loop diagrams using the split representation, see \cite{Yuan:2017vgp,Yuan:2018qva}. See also alternative approaches \cite{Cardona:2017tsw,Bertan:2018khc,Bertan:2018afl} which do not rely on the split representation.} of the bulk-to-bulk propagators. However the computation is rather involved and it seems especially hard to obtain an explicit expression for even-dimensional AdS, such as AdS$_2$. Below, we present an alternative method which does not involve the evaluation of the diagram at all. Instead, we just impose the consistency of the OPE expansion of the boundary conformal theory and ``bootstrap'' the bubble function. By doing so, we succeed in obtaining an explicit expression valid in any dimensions\footnote{The expression for the bubble diagram in any dimension as a sum over double-trace propagators was derived using the orthogonality of bulk-to-bulk propagators in \cite{Fitzpatrick:2010zm}, and extended to the case of different masses in the two propagators in \cite{Fitzpatrick:2011hu}. Using harmonic analysis in AdS, similar expressions in Mellin space were derived in \cite{Penedones:2010ue,Fitzpatrick:2011dm}. For odd-dimensional AdS, this result was reproduced from the analytic conformal bootstrap in \cite{Aharony:2016dwx}. We also make use of harmonic analysis and the bootstrap idea but our method seems much simpler than these analyses, and can be readily generalized to other cases, for instance to fermions. See subsection \ref{subsec:GNcompcorr}.}.
To see this, let us project the four-point function of $\phi^i$ to the $O(N)$ singlet sector in the s-channel (namely $12\to 34$ channel). This can be achieved by contracting the correlator against a tensor $\delta_{ij}\delta_{kl}/N^2$ and the result reads
\begin{align}
\begin{aligned}
&\frac{1}{N^2}\langle \phi^{i}(P_1)\phi^{i}(P_2)\phi^{k}(P_3)\phi^{k}(P_4)\rangle=\\
&\frac{1}{(P_{12})^{\Delta}(P_{34})^{\Delta}}+\frac{1}{N}\left[\frac{1}{(P_{13})^{\Delta}(P_{24})^{\Delta}}+\frac{1}{(P_{14})^{\Delta}(P_{23})^{\Delta}}+g_{12|34}\right] +\mathcal{O}(1/N^2)\,.
\end{aligned}
\end{align}
As shown above, the projection to the singlet sector suppresses the contribution of the $t$- and $u$-channel diagrams and therefore diagrams which naively come from different orders of the $1/N$ expansion contribute at the same order in the large $N$ expansion. This fact has an important bearing on the OPE expansion of this correlator as we see below.
In terms of the s-channel OPE, the leading $\mathcal{O}(1)$ term simply represents a contribution of the identity operator. At $\mathcal{O}(1/N)$, there are two kinds of contributions: The first contribution comes from the generalized-free-field correlators (the first two terms inside the square bracket), and can be decomposed into a sum of the double-trace conformal blocks, namely
\begin{align}\label{eq:GFFOPE}
\frac{1}{(P_{13})^{\Delta}(P_{24})^{\Delta}}+\frac{1}{(P_{14})^{\Delta}(P_{23})^{\Delta}}=\frac{1}{(P_{12})^{\Delta}(P_{34})^{\Delta}} \sum_{\substack{\ell,n\\\ell:\text{ even}}}2c_{n,\ell}^2\mathcal{K}_{2\Delta+2n+\ell,\ell}(z,\bar{z})\,,
\end{align}
with\footnote{For the derivation of the OPE coefficients for the double-trace operators, see \cite{Fitzpatrick:2011dm}.}
\begin{align}
\begin{aligned}\label{eq:GFF3ptcoef}
c_{n,\ell}^{2}=\frac{(-1)^{\ell}\left[(\Delta-\frac{d}{2}+1)_n(\Delta)_{\ell+n}\right]^2}{\ell ! n! (\ell+\frac{d}{2})_n(2\Delta+n-d+1)_{n}(2\Delta+2n+\ell-1)_{\ell}(2\Delta+n+\ell-\frac{d}{2})_n}\,,
\end{aligned}
\end{align}
where $(a)_b$ is the Pochhammer symbol, $(a)_b=\Gamma(a+b)/\Gamma(a)$. The OPE expansion of the second contribution $g_{12|34}$ can be read off from the spectral parameter integral \eqref{eq:g1234explicit} by closing the contour on the lower-half plane. As is clear from the structure of the integrand, there are two sets of poles:
\begin{enumerate}
\item The poles at $\frac{d}{2}+i\nu=2\Delta +2n$ $(n\in \mathbb{N}_{\geq 0})$, which come from the factor $\Gamma_{\Delta-\frac{d+2i\nu}{4}}^2$.
\item The poles coming from
\begin{align}
\lambda^{-1}+2\tilde{B}(\nu)=0\,.
\end{align}
\end{enumerate}
The first set of poles is precisely at the position of the scalar double-trace primaries $\phi^{i}\square^{n}\phi^i$ while the second set of poles are at generic positions which depend on the coupling constant $\lambda$.
When the coupling constant $\lambda$ vanishes, we do not have the interacting diagram (namely $g_{12|34}=0$) and therefore the OPE expansion only yields the double-trace primaries \eqref{eq:GFFOPE}, which correspond to freely moving two-particle states. Now, once we turn on the coupling, the two-particle states are no longer free and we expect that (at least some of) their energies get slightly shifted\footnote{If we just consider a few perturbative Witten diagrams, they typically yield infinitesimal corrections to the dimensions of the operators which manifest themselves as logarithmic terms in the OPE expansion. By contrast, here we are {\it resumming} the diagrams and therefore we expect that the dimensions of operators receive finite shifts.}. However, as we saw above, even at finite $\lambda$ there are poles precisely at the locations of the non-interacting double-trace primaries. The only way to make it consistent with the physical intuition is to require that {\it the first set of poles in $g_{12|34}$ precisely cancels the corresponding generalized-free-field contribution \eqref{eq:GFFOPE}}. This leads to the relation
\begin{align}
\frac{1}{\lambda^{-1}+2\tilde{B}(\nu)}\frac{\Gamma_{\Delta-\frac{d+2i\nu}{4}}^2\Gamma_{\Delta-\frac{d-2i\nu}{4}}^2\Gamma_{\frac{d+2i\nu}{4}}^4}{4\pi^{\frac{d}{2}}\Gamma_{\Delta}^2\Gamma_{1-\frac{d}{2}+\Delta}^2\Gamma_{i\nu}\Gamma_{\frac{d}{2}+i\nu}}\quad \overset{\frac{d}{2}+i\nu\, \sim \,2\Delta+2n}{\sim} \quad \frac{-2c_{n,0}^2}{\frac{d}{2}+i\nu -(2\Delta+2n)}\,.
\end{align}
Now, on the left-hand side of this relation, we have $\Gamma_{\Delta-\frac{d+2i\nu}{4}}^2$ which produces double poles at $\frac{d}{2}+i\nu=2\Delta+2n$ while the right-hand side is just a simple pole. This means that the function $\tilde{B}(\nu)$ must have simple poles\footnote{Note that simple poles in $\tilde{B}(\nu)$ correspond to simple zeros of $1/(\lambda^{-1}+2\tilde{B}(\nu))$.} with appropriate residues at these points. Working out the residues, we conclude that the singularity of $2\tilde{B}(\nu)$ is given by
\begin{align}
2\tilde{B}(\nu)\overset{\frac{d}{2}+i\nu\, \sim \,2\Delta+2n}{\sim}-\frac{1}{\frac{d}{2}+i\nu-(2\Delta+2n)}\frac{(\frac{d}{2})_n\Gamma_{\Delta+n}\Gamma_{\Delta+n-\frac{d}{2}+\frac{1}{2}}\Gamma_{2\Delta+n-\frac{d}{2}}}{(4\pi)^{\frac{d}{2}}\Gamma_{n+1}\Gamma_{\Delta+n+\frac{1}{2}}\Gamma_{\Delta+n-\frac{d}{2}+1}\Gamma_{2\Delta-d+n+1}}\,.
\end{align}
Since the bubble function $\tilde{B}(\nu)$ must be symmetric under the shadow transform $\nu\to -\nu$, there are also poles on the upper half plane with the same residues.
Now, to fully determine $\tilde{B}(\nu)$, we need two further inputs; the knowledge about the existence of other poles and the behavior at infinity.
Let us first discuss the existence of other poles. If there were other poles, they would contribute already to the $\mathcal{O}(\lambda)$ correction of the four-point function (namely a simple one-loop diagram in AdS) and predict the existence of new operators. This would mean that the operator spectrum changes discontinuously once we turn on the coupling. However, on general grounds, we do not expect that to happen perturbatively.\footnote{We expect that such a change of spectrum happens only when there appears a bound state. However, a bound state is not something that one can see perturbatively; it can only be seen once one resums diagrams (as we will see in section \ref{sec:fermion}).} Therefore we conclude that there are no other poles. Let us next discuss the behavior at infinity. Physically the limit $\nu\to \infty$ corresponds to the high energy limit. Since the curvature of AdS becomes negligible in the high energy limit, one can determine the asymptotics of $\tilde{B}(\nu)$ from the high energy limit of the flat-space scattering amplitude. This leads to the following asymptotics of $\tilde{B}(\nu)$ in AdS$_{d+1}$:
\begin{align}
\tilde{B}(\nu) \sim 1/\nu^{3-d} \qquad (\nu\to \infty)\,.
\end{align}
Using these inputs, we can determine $\tilde{B}(\nu)$ uniquely to be\footnote{Note that we rewrote the expression into a manifestly shadow-symmetric form.}
\begin{align}
\tilde{B}(\nu)=-\sum_{n=0}^{\infty}\frac{2\Delta+2n-\frac{d}{2}}{\nu^2+(2\Delta+2n-\frac{d}{2})^2}\frac{(\frac{d}{2})_n\Gamma_{\Delta+n}\Gamma_{\Delta+n-\frac{d}{2}+\frac{1}{2}}\Gamma_{2\Delta+n-\frac{d}{2}}}{(4\pi)^{\frac{d}{2}}\Gamma_{n+1}\Gamma_{\Delta+n+\frac{1}{2}}\Gamma_{\Delta+n-\frac{d}{2}+1}\Gamma_{2\Delta-d+n+1}}\,. \label{eq:bubblesum}
\end{align}
We find perfect agreement with the expression for the bubble as a sum of double-trace exchanges found in position space in \cite{Fitzpatrick:2010zm}, upon translating it to the spectral representation. Moreover, given the simplicity of the propagator in the spectral representation, the sum in \eqref{eq:bubblesum} can be performed explicitly (with the help of Mathematica) and we finally get
\begin{align}\label{eq:finalBnu}
\begin{aligned}
&\tilde{B}(\nu)=\frac{\Gamma_{\Delta}\Gamma_{\Delta-\frac{d}{2}+\frac{1}{2}}\Gamma_{2\Delta-\frac{d}{2}}}{4(4\pi)^{\frac{d}{2}}}\\
&\times \left(\Gamma_{\Delta-\frac{d+2i\nu}{4}}\,{}_5\tilde{F}_{4}\left[\begin{array}{c}\{\frac{d}{2},\Delta,\Delta-\frac{d}{2}+\frac{1}{2},\Delta-\frac{d+2i\nu}{4},2\Delta-\frac{d}{2}\}\\\{\Delta+\frac{1}{2},\Delta-\frac{d}{2}+1,\Delta-\frac{d+2i\nu}{4}+1,2\Delta-d+1\}\end{array};1\right]\right.\\
&\left.\quad+ \Gamma_{\Delta-\frac{d-2i\nu}{4}}\,{}_5\tilde{F}_{4}\left[\begin{array}{c}\{\frac{d}{2},\Delta,\Delta-\frac{d}{2}+\frac{1}{2},\Delta-\frac{d-2i\nu}{4},2\Delta-\frac{d}{2}\}\\\{\Delta+\frac{1}{2},\Delta-\frac{d}{2}+1,\Delta-\frac{d-2i\nu}{4}+1,2\Delta-d+1\}\end{array};1\right]\right)\,,
\end{aligned}
\end{align}
where ${}_5\tilde{F}_{4}$ is the {\it regularized} generalized hypergeometric function. The result is valid in any dimensions, but the series that defines the hypergeometric function is divergent for $d+1\geq 4$. This is due to the fact that the bubble diagram has a UV divergence in this range of $d$\footnote{This is the same as what we have in flat space.}.
For $d=2$, i.e. on AdS$_3$, the expression simplifies to
\begin{align}\label{eq:ads3Bnuexplicit}
\tilde{B}(\nu)\quad \overset{d=2}{=} \quad \frac{i\left[\psi(\Delta-\frac{1+i\nu}{2})-\psi(\Delta-\frac{1-i\nu}{2})\right]}{8\pi \nu}\,.
\end{align}
This precisely matches the result in \cite{Giombi:2017hpr}.
\subsubsection{Generalization to $1/N$ corrections}\label{subsubsec:corrections}
The idea explained above can in principle be applied also to $1/N$ corrections. At the next order in the $1/N$ expansion, the four-point function in the singlet sector reads
\begin{align}
\begin{aligned}
&\frac{1}{N^2}\langle \phi^{i}(P_1)\phi^{i}(P_2)\phi^{k}(P_3)\phi^{k}(P_4)\rangle=\frac{1}{(P_{12})^{\Delta}(P_{34})^{\Delta}}\\
&+\frac{1}{N}\left[\frac{1}{(P_{13})^{\Delta}(P_{24})^{\Delta}}+\frac{1}{(P_{14})^{\Delta}(P_{23})^{\Delta}}+g_{12|34}\right] +\frac{1}{N^2}\left[g_{13|24}+g_{14|23}+g'_{12|34}\right]\,,
\end{aligned}
\end{align}
where $g'_{12|34}$ is the s-channel diagram for the $1/N^2$ correction to the four-point function. Since we already determined $g_{12|34}$, we also know $g_{13|24}$ and $g_{14|23}$. The OPE expansion of these two terms will again yield a collection of scalar double-trace operators without any shifts of the conformal dimension. However, for the same reason that we provided above, we do not expect such operators to exist in the full OPE expansion of the boundary conformal theory. This means that those double-trace contributions must be killed by the last term $g'_{12|34}$. This constrains the form of $g'_{12|34}$ and, with a little more assumption, it is likely that we can determine $g'_{12|34}$ without performing explicit diagrammatic computations. If successful, this would provide a recursive way to bootstrap $1/N$ corrections in this theory. It would be interesting to carry this out explicitly but we will leave it for future investigations.
It is worth pointing out that the bootstrap analysis performed in this subsection relies crucially on the fact that the correlators are meromorphic functions of the spectral parameter. By contrast, the scattering amplitude in flat space contains branch cuts, which make it difficult to perform analogous analysis. In this sense, the results in this subsection provide evidence that studying a theory on AdS rather than in flat space is not just extra complication but has real advantages.
\subsection{Analyzing the Correlators}
We now study the properties of the correlator that we derived above.
The simplest physical information that can be extracted from the correlator is the spectrum of the boundary conformal theory. As explained in the previous subsection, at $\mathcal{O}(1/N)$ the dimensions $h$ of the scalar double trace operators in the $O(N)$ singlet sector get finite shifts, which can be read off from the equation
\begin{align}
\lambda^{-1}+2 \tilde{B}\left(-i(h -\tfrac{d}{2})\right)=0\,.
\end{align}
As shown in figure \ref{fig:readingoffdimensions}, the dimensions determined by this equation start from the generalized-free-field spectrum and increase as we crank up the coupling $\lambda$, eventually receiving $\mathcal{O}(1)$ anomalous dimensions at strong coupling. Below we study several limits of this equation and discuss its physical consequences. For simplicity, we only present the results for AdS$_3$, but the qualitative features are the same\footnote{The only exception is the analysis on the critical point, which does not exist in AdS$_2$.} also for AdS$_2$.
\begin{figure}[t]
\centering
\includegraphics[clip,height=7cm]{readingoffdimensions.pdf}
\caption{The bubble function $\tilde{B}(\nu)$ for $d=2$ (AdS$_3$) and $\Delta=2$. The black curves denote the values of the bubble function $\tilde{B}(-i (h-d/2))$ and the horizontal axis is $h-d/2$ where $h$ is a conformal dimension. The spectrum of the operator can be read off from the intersection points of the black curve and the red dashed lines at $-\lambda^{-1}/2$. As shown in the figure, the red dashed line moves upward as we increase the coupling and eventually coincides with the horizontal axis.}\label{fig:readingoffdimensions}
\end{figure}
\paragraph{Large Conformal Dimension}
Let us first analyze the large-$h$ behavior of the operator spectrum. In the limit $h\to \infty$, the bubble function $\tilde{B}(\nu)$ for AdS$_3$ can be expanded as
\begin{align}
\tilde{B}(-i (h-1))=\frac{\cot \left(\pi (\Delta-\tfrac{h}{2})\right)}{8h}\left[1+\mathcal{O}(1/h^2)\right]
\end{align}
Therefore, the leading asymptotic operator spectrum is determined by the equation
\begin{align}
\lambda^{-1}+\frac{\cot \left(\pi (\Delta-\tfrac{h}{2})\right)}{4h}=0\,.
\end{align}
Incidentally the form of the equation resembles the one obtained for the SYK model with a quartic interaction ($q=4$) \cite{Maldacena:2016hyu, Polchinski:2016xgd}.
When the coupling is turned off, the solution to this equation is
\begin{align}
h=2\Delta+2n \qquad n\in \mathbb{N}_{\geq 0}\qquad (\lambda=0)
\end{align}
and coincides with the scalar double-trace spectrum. On the other hand, in the infinite coupling limit, they are shifted by $1$, namely
\begin{align}
h=2\Delta+2n +1 \qquad n\in \mathbb{N}_{\geq 0}\qquad (\lambda=\infty)\,,
\end{align}
while in the intermediate range of the coupling, the operators receive anomalous dimensions $0\leq \delta h\leq 1$.
\paragraph{Conformal Limit}
As we will discuss in detail in section \ref{sec:Critical}, the bulk theory becomes conformal when $\Delta=1$ and $\lambda=\infty$. The operator spectrum in this limit is determined by the equation
\begin{align}
\tilde{B}(-i (h-1))=\frac{\cot \tfrac{\pi h}{2}}{8-8h}=0\,,
\end{align}
whose solution is given by
\begin{align}
h=2\Delta+2n+1 =2n+3 \qquad n\in \mathbb{N}_{\geq 0}\,.\label{eq:sigmaconfspec}
\end{align}
This in particular contains the dimension $3$ operator which can be interpreted as the displacement operator.
Note also that this spectrum is identical\footnote{This fact seems to be related to the bulk conformal symmetry: From the bulk point of view, the conformal dimension (or more precisely the spectral parameter $\nu$) parametrizes the scale and the large dimension limit corresponds to the UV limit in the bulk. If the theory is at criticality, we expect that the observables do not qualitatively change under change of scales, which is what we found here. However, at the time of writing, we do not know how to make this argument more rigorous. For a related discussion on the consequence of the bulk conformality, see section \ref{sec:Critical}.} to the asymptotic spectrum that we derived above. In section \ref{sec:Critical}, we study the correlator at the critical point in more detail and compare with the known results.
\paragraph{Flat-Space Limit}
Let us next consider the flat-space limit. For this purpose, it is useful to reinstate the dependence on the AdS radius,\footnote{We use conventions where no factors of $L$ appear in the definition \eqref{eq:specrecall} of the spectral representation, hence $\tilde{F}$ carries the same dimension as $F$. This implies that when we reinstate $L$ the convolution identity becomes $\widetilde{F\star G} = L^{-d-1}\tilde{F}\tilde{G}$. In particular, the spectral transform of the delta function is $L^{-d-1}$.}
\begin{align}
\begin{aligned}\label{eq:totakeflat}
\langle \delta \sigma (x)\delta \sigma (y)\rangle&=\int^{\infty}_{\infty}d\nu\, \tilde{F}_{\delta\sigma\delta\sigma}(\nu)\,\Omega_{\nu}(x,y)\,,\\
\tilde{F}_{\delta\sigma\delta\sigma}(\nu)&=-\frac{1}{L^4}\frac{1}{(\lambda L)^{-1}+2 L^2\tilde{B}(\nu)}\,,
\end{aligned}
\end{align}
where $L^2 \tilde{B}(\nu)$ is given by \eqref{eq:ads3Bnuexplicit}.
\begin{figure}[t]
\centering
\includegraphics[clip,height=6cm]{plot0.pdf}
\caption{The behavior of the bubble function $\tilde{B}(\nu)$ in AdS$_3$ for $\Delta=10$. As depicted in the figure, it has a sequence of poles (starting from $-i\nu= 2\Delta-1$) along the imaginary axis which condense into a two-particle branch cut upon taking the flat-space limit.\label{fig:sequenceofpoles}}
\end{figure}
To take the flat-space limit, we need to send the AdS radius $L$ to be infinite while keeping the parameters in the Lagrangian ($\lambda$ and $M^2$) fixed.
We can then identify the spectral representation with the Fourier transformation in the radial direction as we show rigorously in Appendix \ref{sec:FslSpRep}:
\begin{align}
L^{d+1} \,\tilde{F}_{OO}(\nu = L |p|) \underset{L\to\infty}{\longrightarrow} \tilde{F}^{\rm flat}_{OO}(|p|)~.
\end{align}
Here $\tilde{F}_{OO}$ on the left-hand side is the spectral representation of the bulk two-point functions $\langle OO\rangle$ and $\tilde{F}^{\rm flat}_{OO}$ is the radial Fourier transformation of the two-point function in flat space. If we perform the same rescaling of the spectral parameter to the function $\tilde{B}(\nu)$, we obtain
\begin{align}
L^3 \, \tilde{B}(L|p|)\underset{L\to\infty}{\longrightarrow} \tilde{B}^{\rm flat}(|p|)=\frac{{\rm arctan}(\tfrac{p}{2M})}{4\pi |p|}\,.\label{eq:fsscalar}
\end{align}
As expected, the quantity on the right hand side coincides with the bubble diagram in flat space $\mathbb{R}^3$:
\begin{align}
\tilde{B}^{\rm flat}(|p|)=\frac{{\rm arctan}(\tfrac{p}{2M})}{4\pi |p|}&=\int \frac{d^3q}{(2\pi)^3} \frac{1}{(q^2+M^2)((p+q)^2+M^2)}\,.
\end{align}
Thus the limit of the full two-point function correctly reproduces the result in flat space:
\begin{align}
\lim_{L\to \infty}L^{3} \,\tilde{F}_{\delta\sigma\delta\sigma}(\nu = L |p|)=-\frac{1}{\lambda^{-1}+2 \tilde{B}^{\rm flat}(|p|)}\,,
\end{align}
Note that the flat-space limit of the bubble function $\tilde{B}^{\rm flat}(|p|)$ has a branch cut if we analytically continue the momentum to the imaginary value:
\begin{align}\label{eq:branchcutBflat}
\tilde{B}^{\rm flat}(-i p)=\frac{{\rm arctanh}(\tfrac{p}{2M})}{4\pi p}\,.
\end{align}
This branch cut, which starts from $p=2M$, is a familiar two-particle threshold in flat space. On the other hand, the bubble function $\tilde{B}(\nu)$ in AdS contains a collection of poles on the (negative) imaginary $\nu$ axis:
\begin{align}
L^2\tilde{B}(-i\nu)=\frac{\psi (\Delta-\tfrac{1-i\nu}{2})-\psi (\Delta-\tfrac{1+i\nu}{2})}{8\pi\nu} =\infty \quad \text{at }\nu=2\Delta+2n-1 \quad (n\in \mathbb{N}_{\geq 0})\,.
\end{align}
As shown in figure \ref{fig:sequenceofpoles}, these poles come close to each other and reproduce the branch cut in \eqref{eq:branchcutBflat} upon taking the flat-space limit.
\paragraph{Scale Dependence of the Correlator}
\begin{figure}
\centering
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip,height=4.5cm]{plot1.pdf}\\
(a) $\Lambda_{\rm AdS}=0.01$
\end{minipage}
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip,height=4.5cm]{plot2.pdf}\\
(b) $\Lambda_{\rm AdS}=10$
\end{minipage}
\caption{Comparison of the two-point functions in AdS and in flat space. In both figures, $\Lambda_{m}=1$ and the blue curve denotes the function in AdS ($\tilde{F}_{\delta\sigma\delta\sigma}(\nu)$) while the red curve denotes the function in flat space ($\tilde{F}^{\rm flat}_{\delta\sigma\delta\sigma}(\nu)$). (a) The plot for $\Lambda_{\rm AdS}=0.01$. Since $\Lambda_{\rm AdS}$ is small, the theory does not see the AdS curvature until it reaches the deep IR. Therefore the two functions stay close for a wide range of parameters. (b) The plot for $\Lambda_{\rm AdS}=10$. In this case, as soon as the functions deviate from the UV value (denoted by a gray dashed line), they start to differ significantly.\label{fig:comparewithflat}}
\end{figure}
Let us now analyze the behavior of the two-point function in a generic parameter regime. As discussed above, the spectral parameter plays the role of the radial momentum in flat space. Therefore, by analyzing the behavior of the correlator as a function of the spectral parameter, one can gain some information about the renormalization-group flow and the scale dependence of the theory. For this purpose, it is convenient to introduce the following dimensionless parameters:
\begin{align}
\Lambda_{m}= \frac{M}{\lambda}\,,\qquad \Lambda_{\rm AdS}=\frac{1}{\lambda L}\,.
\end{align}
Roughly speaking, $\Lambda_{m}$ parametrizes the mass scale of the theory while $\Lambda_{\rm AdS}$ is the scale which governs the ``finite-size correction'' coming from the AdS radius. In terms of these quantities, $\tilde{B}(\nu)$ and the spectral transform of the two-point function are given by
\begin{align}
\begin{aligned}
\tilde{B}(\nu)=&\frac{i\left[\psi \left(\tfrac{1-i\nu}{2}+\sqrt{1+\tfrac{\Lambda_{m}^2}{\Lambda_{\rm AdS}^2}}\right)-\psi \left(\tfrac{1+i\nu}{2}+\sqrt{1+\tfrac{\Lambda_{m}^2}{\Lambda_{\rm AdS}^2}}\right)\right]}{8\pi \nu}\,,\\
\tilde{F}_{\delta \sigma \delta\sigma}(\nu)&=\frac{1}{\Lambda_{\rm AdS}+2 \tilde{B}(\nu)}\,.
\end{aligned}
\end{align}
Unless the theory is fine-tuned to be at the critical point, the UV behavior of the theory is governed by flat-space physics because the AdS curvature becomes negligible in the extreme UV. Therefore, the two-point function approaches the flat-space counterpart
\begin{align}\label{eq:flatspacecounterpart}
\tilde{F}_{\delta \sigma \delta\sigma}^{\rm flat}(\nu)=\left(\Lambda_{\rm AdS}+\frac{{\rm arctan}\left[\tfrac{\Lambda_{\rm AdS}\nu}{2\Lambda_{m}}\right]}{2\pi \nu}\right)^{-1}\,,
\end{align}
when $\nu$ is sufficiently large. Let us first consider the situation where $\Lambda_{\rm AdS}\ll 1$ and $\Lambda_{m}$ is finite. When $\Lambda_{\rm AdS}$ is small, we expect that the theory does not see the effect of the AdS curvature until we reach the deep IR regime and therefore the two-point function stays close to \eqref{eq:flatspacecounterpart} for a wide range of the spectral parameter. This is indeed the case as shown in figure \ref{fig:comparewithflat}-(a).
Only when the spectral parameter becomes of order $\Lambda_{\rm AdS}$ does the theory start seeing the effect of the AdS radius, and from there the two functions start to differ.
On the other hand, if $\Lambda_{\rm AdS}$ is much larger than $1$, there is basically no regime in which the flat-space approximation is valid. Therefore, as soon as the two-point function deviates from the UV value, the two functions start differing significantly (see figure \ref{fig:comparewithflat}-(b)).
Finally, when both $\Lambda_{\rm AdS}$ and $\Lambda_{m}$ satisfy $\Lambda_{\rm AdS}, \Lambda_{m}\ll 1$, the theory does not see any scale until deep in the IR, and therefore flows close to the critical point in flat space. This can also be verified explicitly using our result as shown in figure \ref{fig:comparewithconformal}.
\begin{figure}[t]
\centering
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip,height=4.5cm]{plot3.pdf}\\
(a) $\Lambda_m=\Lambda_{\rm AdS}=0.0001$
\end{minipage}
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip,height=4.5cm]{plot4.pdf}\\
(b) $\Lambda_m=\Lambda_{\rm AdS}=0.001$
\end{minipage}
\caption{Flow to the flat-space critical point. In both figures, the blue and red curves denote the two-point functions in AdS ($F_{\delta\sigma\delta\sigma}(\nu)$) and in flat space ($F^{\rm flat}_{\delta\sigma\delta\sigma}(\nu)$) respectively while the black dashed line denotes the two-point function at the critical point in flat space ($F^{\text{flat, conformal}}_{\delta\sigma\delta\sigma}(\nu)=4\nu$). When both $\Lambda_{m}$ and $\Lambda_{\rm AdS}$ are small, the theory does not see any scale until deep in the IR. Therefore, the two-point function exhibits conformal behavior in flat space in some range of the spectral parameter. (a) The plot for $\Lambda_m=\Lambda_{\rm AdS}=0.0001$. As shown in the figure, the three curves are close to each other for a wide range of the spectral parameter. (b) If we increase $\Lambda_m$ and $\Lambda_{\rm AdS}$, the curves start to deviate from each other (although there is still a small range of the spectral parameter in which they coincide.)\label{fig:comparewithconformal}}
\end{figure}
It would be interesting to study in more detail the scale dependence of the theory by formulating a renormalization-group equation for quantum field theories in AdS. In particular, it would be nice to understand the differences and the similarities between the usual finite-size scaling \cite{Fisher:1972zza,Brezin:1981gm} and the corrections induced by the AdS radius.
\subsection{Correlators in the Symmetry-Breaking Phase}\label{subsec:corrsymbreak}
We now consider the symmetry-breaking phase with $m^2<m_0^2$ and non-zero VEV\footnote{The quantities $m^2/\lambda$ and $m_0^2/\lambda$ are UV divergent and depend on the choice of regularization scheme, but the VEV $|\Phi|^2$ is physical and independent of such choices.}
\begin{align}
|\Phi|^2 = \frac{m_0^2-m^2}{2\lambda} ~,\\
\frac{m_0^2}{\lambda}\equiv\mathrm{tr\,}\left(-\frac{1}{\square}\right)~.
\end{align}
We will see some interesting new phenomena in this phase. In particular, using the correlators computed above, we will provide an example of the AdS analogue of a resonance in a scattering amplitude. We will also comment on the implication of the existence of the bulk Goldstone bosons for the boundary conformal theory.
Without loss of generality we take the VEV along the $N$-th component $\Phi^i = \delta^{iN} |\Phi|$. We decompose the $O(N)$ vector in the radial mode $\rho$ and the Goldstone bosons $\pi^i$
\begin{align}
\rho & \equiv \delta{\phi}^N = \phi^N - \sqrt{N} |\Phi|~,\\
\pi^i & \equiv \phi^i~,~~i=1,\dots,N-1~.
\end{align}
In this phase $M^2=0$, which implies a non-zero VEV $\Sigma = -\frac{m^2}{2} $ and
\begin{equation}
\delta \sigma = \sigma + \sqrt{N}\frac{m^2}{2}~.
\end{equation}
The effective Lagrangian in these variables is, up to a constant
\begin{align}
\mathcal{L}_{\rm eff} & = \frac12 (\partial \rho)^2 +\frac12 (\partial \pi^i)^2 - \frac{1}{2\lambda} (\delta\sigma)^2 + 2 |\Phi| \, \delta\sigma \rho+ \frac{1}{\sqrt{N}} \,\delta\sigma (\pi^i)^2 \nonumber \\ & + \sqrt{N}\,\left(\frac{m^2}{2} + \lambda |\Phi|^2\right)\delta\sigma + \frac{N}{2} \, \mathrm{tr\,} \log\left(-\square + \frac{2}{\sqrt{N}}\delta\sigma \right)~.\label{eq:expsymmact}
\end{align}
Note that the terms linear in $\delta\sigma$ cancel, as they should.
\subsubsection{Resonance in AdS}
In eq. \eqref{eq:expsymmact} we see that the symmetry-breaking VEV induces a mixing between $\rho$ and $\delta\sigma$. As a consequence, even at infinite $N$ there is an $\mathcal{O}(1)$ interaction between $\rho$ and two pions. Before we start the analysis in AdS, let us remind the reader that in flat space this implies that the $\rho$ particle, which at tree-level has mass $m^2_\rho = 4\lambda |\Phi|^2$, becomes unstable at the quantum level. The associated pole gets an imaginary part, and it manifests as a resonance in the 2 to 2 amplitude of the pions. This is discussed in the original paper \cite{Coleman:1974jh}, and we will see it emerge from the flat-space limit of our result.
The quadratic terms in the action involving $\rho$ and $\sigma$ can be written in matrix notation as follows
\begin{align}
&\int_x \int_y \,\frac 12 \left( \delta\sigma(x) ~\rho(x) \right) K(x,y)
\begin{pmatrix} \delta\sigma(y) \\ \rho(y)\end{pmatrix}~,\\
& K(x,y) \equiv \begin{pmatrix}
-\frac{1}{\lambda}\delta^{d+1}(x,y)- 2B(x,y) & 2 |\Phi| \delta^{d+1}(x,y) \\
2 |\Phi|\delta^{d+1}(x,y) & -\square_y \delta^{d+1}(x,y) \\
\end{pmatrix}
\end{align}
where $\int_x \equiv \int_{AdS_{d+1}} d^{d+1} x \sqrt{g(x)}$ and similarly for $y$, and $\delta^{d+1}(x,y)$ is the delta function on AdS. The bubble function $B(x,y)$ here is evaluated at $M^2 =0$, i.e. with $\Delta =2$. Inverting the 2-by-2 kernel $K(x,y)$ above one obtains the corresponding matrix of two-point functions, at leading order at large $N$. This computation becomes algebraic if we employ the spectral representation.
Defining as usual
\begin{equation}
K(x,y) = \int_{-\infty}^\infty d\nu \, \tilde{K}(\nu) \Omega_\nu(x,y)~,
\end{equation}
we have
\begin{equation}
\tilde{K}(\nu) = \begin{pmatrix}
-\frac{1}{\lambda}- 2 \tilde{B}(\nu) & 2 |\Phi| \\
2 |\Phi| & \nu^2+\frac{d^2}{4} \\
\end{pmatrix}~.
\end{equation}
Therefore, the matrix of two-point function can be simply expressed in terms of the function $\tilde{B}(\nu)$ as follows
\begin{align}
& \langle \begin{pmatrix} \delta\sigma \\ \rho \end{pmatrix} \left( \delta\sigma ~\rho\right) \rangle(\nu) = (\tilde{K}(\nu))^{-1} = \frac{1}{\det \tilde{K}(\nu)}\begin{pmatrix}
\nu^2+\frac{d^2}{4} & -2 |\Phi| \\
-2 |\Phi| & -\frac{1}{\lambda}- 2 \tilde{B}(\nu)\\
\end{pmatrix} ~.\label{eq:matrixcorr}
\end{align}
Instead of diagonalizing the matrix, if we are interested in the spectrum of boundary operators contributing to either of the two correlators, we can simply look at the zeroes of the determinant
\begin{align}
\det \tilde{K}(\nu) &= \left(\nu^2+\frac{d^2}{4}\right)\left(-\frac{1}{\lambda}- 2 \tilde{B}(\nu)\right) - 4|\Phi|^2 = -\frac{1}{\lambda}\left[f(\nu)+ 2(m^2_0-m^2)\right] \label{eq:detres}\\ f(\nu) & \equiv \left(\nu^2+\frac{d^2}{4}\right)\left(1+ 2\lambda \tilde{B}(\nu)\right)~, \label{eq:deff}
\end{align}
along the imaginary $\nu$ axis. The scaling dimensions $h$ of these operators is related to the location of the zeroes by $-i \nu = \pm (h - \tfrac{d}{2})$.
By attaching bulk-to-boundary propagators of four external pions to the bulk two-point function of $\delta \sigma$, similarly to what we did in the previous section in the massive phase, we obtain a boundary four-point correlator that is the AdS version of the 2 to 2 scattering amplitude of pions. The poles the two-point function of $\delta \sigma$ ---the upper diagonal entry of \eqref{eq:matrixcorr}--- then correspond to the dimension of operators exchanged in this four-point function. By inspection of \eqref{eq:matrixcorr} we see that these poles are again simply given by the zeroes of $\det \tilde{K}(\nu)$.
In the limit $\lambda \to 0$ we recover classical physics in AdS, and indeed the equation $\det \tilde{K}(\nu) = 0$ reduces to
\begin{equation}
\nu^2+\frac{d^2}{4} + 2(m^2_0-m^2) = 0~,\label{eq:freerho}
\end{equation}
that corresponds to a $\rho$ particle of mass $m^2_\rho = 4 \lambda |\Phi|^2 = 2(m^2_0-m^2)$.
\begin{figure}
\hspace{2.1cm}
\begin{subfigure}{0.5\textwidth}
\centering
\includegraphics[width=1.51\linewidth]{ResonancePlot1.pdf}
\label{fig:sub1}
\end{subfigure}
\newline
\phantom{1}\hspace{2cm}
\begin{subfigure}{0.5\textwidth}
\centering
\includegraphics[width=1.51\linewidth]{ResonancePlot2.pdf}
\label{fig:sub3}
\end{subfigure}
\caption{In black, the function $f(-i(h-\tfrac{d}{2}))$ defined in eq. \eqref{eq:deff} as a function of $h-\tfrac{d}{2}$, for $d=2$, i.e. AdS$_3$. The red dashed line is the constant $-2(m_0^2 - m^2)$ which we fixed to $-40$. Its intersections with the black curve determine the values $h$ of the scaling dimensions of boundary operators contributing to the AdS amplitude of pions. The gray vertical dashed lines are the dimensions of double-trace operators in the free theory. The orange curve is the limit $\lambda \to 0$ of the black curve, and its intersection with the red line corresponds to the $\rho$ particle at tree-level. The dotted intersection between the black and the red curve is the continuation of the $\rho$ particle to finite $\lambda$. The arrows denote the sign of the double-trace anomalous dimensions.}
\label{fig:Resonance}
\end{figure}
Turning on $\lambda$, we generate an additional infinite set of poles associated to the field $\delta\sigma$, that correspond to the finite-coupling version of the double-trace operators / two-pion states. These poles are analogous to the ones that we discussed in the massive phase, see fig. \ref{fig:readingoffdimensions}. For small $\lambda$, the spectrum is illustrated in the upper plot in fig. \ref{fig:Resonance}: there is a pole very close to the free-particle pole of eq. \eqref{eq:freerho}. Moreover, all the ``double-trace" poles are close to their values in the free theory, and there is a distinctive feature in the pattern of their anomalous dimensions, namely they flip sign when they cross the $\rho$-particle pole. Note that this is different from the situation in the massive phase, depicted in fig. \ref{fig:readingoffdimensions}, in which all the anomalous dimensions have the same sign. As we crank up $\lambda$, the distinction between the $\rho$-particle pole and the double-trace poles becomes obscured, but the qualitative features of the pattern of the anomalous dimensions persist as shown in the lower figure of fig. \ref{fig:Resonance}.
\begin{figure}[t]
\centering
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip, height=5cm]{phaseshift1.pdf}\\
(a) $\lambda=4$, $x=8.99$, $y=0.35$
\end{minipage}
\begin{minipage}{0.49\hsize}
\centering
\includegraphics[clip, height=5cm]{phaseshift2.pdf}\\
(b) $\lambda=30$, $x=9.03$, $y=2.01$
\end{minipage}
\caption{The anomalous dimensions of the double-trace operators $\Delta_n-(2\Delta+2n)$. In both figures, we considered AdS$_3$ and set $\Delta=2$ (i.e. $M^2$ = 0) and $2(m_0^2 - m^2) = 400$. The black dots are the actual values of the anomalous dimensions while the dashed curve is the phase of the Breit-Wigner pole, $\arg \left(\dfrac{-1}{n-x+iy}\right)$. The values of $x$ and $y$ are determined by fitting the result for the anomalous dimensions. (a) When the coupling is small, the anomalous dimension undergoes a quick shift by $\pi$ around the $\rho$ pole. Correspondingly the imaginary part of the Breit-Wigner pole $y$ is small. (b) As we increase $\lambda$, the slope becomes less steep, which implies that the resonance becomes broad.}\label{fig:phaseshiftano}
\end{figure}
This sign-flip is reminiscent of one of the signatures of a resonance in a 2-to-2 scattering amplitude in flat space, namely that the phase shift gets shifted by $\pi$ across the resonance. The connection can be made more explicit using the relation\footnote{Roughly speaking, the exponential on the right hand side measures the AdS analogue of the phase shift, namely the relative phase shift over one period of the AdS time between the free propagation of the two particles in AdS and the actual particle/operator that appears in the OPE of four-point function. For a more precise definition and derivation, see the original paper \cite{Paulos:2016fap}.
} proposed in \cite{Paulos:2016fap},
\begin{align}
e^{2i\delta_{l}(s)}=\lim_{\Delta_{i}\to \infty} \langle e^{-i\pi (\Delta-\Delta_1-\Delta_2-l)}\rangle\,,
\end{align}
where $\Delta_i$ are the dimensions of the external operators while $\langle \ast\rangle$ denotes the average over the operators with dimension $\Delta\simeq\sqrt{s}$ weighted by the structure constants squared. If we neglect the weights coming from the structure constants and assume that the dimensions of the operators are close to those of the double-trace operators in the free theory, this simplifies to the following relation between the anomalous dimensions of the double-trace operators and the phase shift $\delta_l(s)$,
\begin{align}\label{eq:simplerelanomalousphase}
\delta_l (s)\sim \frac{\pi}{2}(\Delta_1+\Delta_2+2n+l-\Delta_n^{(l)})\qquad \sqrt{s}\sim \Delta_1+\Delta_2+2n+l\,,
\end{align}
where $\Delta_n^{(l)}$ is the $n$-th lightest operator with spin $l$. As we show in fig. \ref{fig:phaseshiftano}, the plot of the anomalous dimensions\footnote{To apply the formula \eqref{eq:simplerelanomalousphase}, we should regard the $\rho$-pole as one of the double-trace poles. This is physically reasonable since there is no clear distinction between $\rho$ particle and the double-trace states at finite $\lambda$.} of our result neatly reproduces the behavior of the phase shift coming from a simple pole of the Breit-Wigner type, $\frac{-1}{n-x+iy}$. In particular, the slope becomes less steep as we increase the coupling constant (see fig. \ref{fig:phaseshiftano}-(b)) being in line with the flat-space intuition that the particles are easier to decay when the coupling is strong. These results provide evidence that the pattern of the anomalous dimensions that we found is tied to the existence of the resonance in flat space.\footnote{The emergence of a resonance in the flat-space limit of AdS/CFT was discussed in \cite{Fitzpatrick:2011hu} using the Mellin amplitude. Our result provides an explicit realization of such a mechanism and also shows that the resonance-like behavior can be seen already at finite AdS radius.}
Alternatively, we can check explicitly that the resonance emerges upon taking the flat-space limit of eq. \eqref{eq:detres}. For simplicity we will restrict ourselves to $d=2$, i.e. AdS$_3$, in which the bubble function for $M^2 = 0$ simplifies to
\begin{equation}
\tilde{B}(\nu)\vert_{d=2, M^2=0} = -\frac{1}{2\pi(1+\nu^2)} + \frac{\tanh\left(\frac{\pi \nu}{2}\right)}{8\nu}~.
\end{equation}
Plugging $\nu = |p|$ in \eqref{eq:detres}, and then taking $|p|$, $\lambda$ and $|\Phi|$ to $\infty$ with $\lambda/|p|$ and $|\Phi|^2/|p|$ fixed, we obtain
\begin{equation}
-\lambda \det \tilde{K}(\nu)\longrightarrow p^2\left(1+\frac{\lambda}{4 |p|}\right) +4 \lambda |\Phi|^2~.\label{eq:fspres}
\end{equation}
This expression is equivalent to the flat-space result of \cite{Coleman:1974jh} so we can simply rely on their analysis. We have a square-root branch point at $p^2 = 0$ due to the dependence on $|p| = \sqrt{p^2}$, that is produced by the condensation of the ``double-trace" poles. This branch-cut is interpreted as the two-pion cut in the 2-to-2 scattering amplitude of massless pions. The complex $|p|$ plane thus contains both the first sheet ($\mathrm{Re} |p| > 0$) and the second sheet ($\mathrm{Re} |p| < 0$) for the variable $p^2$, with the physical region $p^2<0$ corresponding to the negative imaginary axis for $|p|$. Setting \eqref{eq:fspres} to zero and solving for $|p|$ we find the following two poles
\begin{equation}
|p| = -\frac{\lambda}{8} \pm i \sqrt{4\lambda |\Phi|^2 - \frac{\lambda^2}{64} }~,
\end{equation}
that are both on the second sheet, and approach the classical result $m_\rho^2 = 4\lambda |\Phi|^2$ for small $\lambda$. We see that indeed, as anticipated from the AdS result, the width increases with $\lambda$, at least as long as $4\lambda |\Phi|^2 - \frac{\lambda^2}{64} > 0$. On the other hand some features of the flat-space result do not have an evident counterpart in the $\nu$ space analysis of the AdS correlator above. Namely, there are actually two distinct poles on the second sheet in flat-space, while we could only detect one ``resonance-like" feature in the anomalous dimensions. Moreover for large values of $\lambda$, such that $4\lambda |\Phi|^2 - \frac{\lambda^2}{64} < 0$, both poles lie on the real negative $|p|$ axis, and one of them gets closer and closer to the physical sheet as $\lambda \to \infty$. It would be interesting to better elucidate the relation between the (simple) analytic structure of the boundary four-point function in $\nu$ space, and the resulting (complicated) analytic structure in $p^2$ that emerges in the flat-space limit. The example presented here can provide a useful playground for further studies of this problem.
\subsubsection{Goldstone Bosons and Conformal Manifold} One of the most basic aspects of the physics of the symmetry-breaking phase is the presence of massless Goldstone bosons, namely the $N-1$ pions $\pi^i$ with mass-squared $M^2=0$. In AdS background, the boundary values of these fields define marginal operators in the boundary conformal theory. Therefore, at least for $N=\infty$ and any $\lambda > 0$, the set of boundary conformal theories defined by the $O(N)$ model at any of the symmetry-breaking vacua together form a conformal manifold, parametrized by exactly marginal couplings. Since the value of the exactly marginal couplings corresponds to the constant expectation value of the bulk fields, this conformal manifold has the same geometry as the space of vacua in the bulk, namely it is an $N-1$-dimensional sphere. By analogy with flat space in $d+1=3$, it seems reasonable to assume that the the symmetry-breaking vacua in AdS will continue to exist also at finite $N$, suggesting that the conformal manifold should remain unlifted by $1/N$ corrections (the situation in AdS$_2$ is somewhat special, as we discussed in section \ref{eq:AdS2}).
More generally, the low-energy effective theory at a vacuum with symmetry breaking of a continuous symmetry is governed by the structure of a quotient between the broken and the unbroken group, $G/H$ \cite{Coleman:1969sm,Callan:1969sn}. When this occurs in AdS, we expect that the boundary conformal theory has a conformal manifold which coincides with the coset $G/H$, and the global symmetry group at each point in the conformal manifold is given by a residual symmetry group $H$. This quotient structure is reminiscent of the construction of conformal manifolds in superconformal field theories \cite{Green:2010da}.
Perhaps it might look surprising that such a simple construction would give rise to a conformal manifold in a non-supersymmetric theory, that is famously hard to find examples of (see \cite{Bashmakov:2017rko, Behan:2017mwi, Hollands:2017chb, Sen:2017gfr} for recent discussions of the implications of conformal manifolds in general conformal field theories). However the key point is that here we are considering a {\it conformal theory} rather than a {\it conformal field theory}, by which it is meant that the boundary theory is not local, i.e. it does not contain a stress-tensor. As recently pointed out in the context of the long-range Ising model \cite{Behan:2017emf}, relaxing the constraint of locality makes it easier to find examples of conformal manifolds.\footnote{The observation in \cite{Behan:2017emf} is weaker, namely it concerns continuous families of conformal theories, where the continuous parameter does not necessarily correspond to an exactly marginal operator. For instance, a massive scalar in AdS, or equivalently a generalized-free-field theory, comes within a family of theories parametrized by the bulk mass, or the boundary scaling dimension. However there is no local operator in the conformal theory that couples to this continuous parameter. Similarly, in the context of the $O(N)$ model changing $\lambda$ (in units of the AdS radius) gives rise to a continuous family of boundary theories, which in this case are not simple generalized free fields. On the other hand, the example that we are discussing here is stronger, because we do have exactly marginal operators in the conformal theory. Other examples of conformal manifolds in non-supersymmetric theories were recently found by relaxing the constraint of unitarity \cite{Caetano:2016ydc, Mamroud:2017uyz}.} An even simpler example of a conformal manifold without locality is just given by a free theory of massless scalar fields in AdS background, which has a shift symmetry. The symmetry-breaking phase in the $O(N)$ model is a more appealing example because it is an interacting theory, and it provides a dynamical mechanism that enforces the presence of massless scalar fields.
If, on the other hand, the boundary conformal theory is local, the bulk theory must contain dynamical gravity. It is generally believed that in quantum gravity all the symmetries are gauged \cite{Banks:2010zn}. In such situations, our construction of the conformal manifold would not work since the symmetry breaking in the bulk is accompanied by the Higgs phenomenon, and induces a mixing between the massless Goldstone bosons and the gauge fields that makes all the fields massive. On the boundary side, this phenomenon can be interpreted as the mixing between the current multiplet $J_{\mu}$, which is dual to the gauge field, and the marginal operators $O$. Together they recombine into a single long multiplet satisfying $\partial^{\mu}J_{\mu} \sim O$. This observation suggests that two seemingly different facts, the difficulty of finding a conformal manifold in non-supersymmetric conformal field theories and the absence of global symmetries in quantum gravity, might be related to each other.
The relation between symmetry breaking in the bulk and conformal manifolds also have interesting implications for the exact-marginality conditions: Suppose there exist at least two marginal operators in the theory, which we denote with $O$ and $O^{\prime}$. When one perturbs the theory by a marginal coupling $\int g \, O$, one can show using first-order conformal perturbation theory that the other marginal operator $O^{\prime}$ remains marginal only when the OPE coefficient $C_{OOO^{\prime}}$ vanishes. In our context, this constraint is trivially satisfied since in the low-energy effective action there is no cubic term in the pions. (More generally, any term with an odd number of pions is not allowed, with the exception of Wess-Zumino terms when the bulk is even dimensional \cite{Witten:1983tw}).
Beyond leading order, one also finds constraints on higher-point functions of the marginal operators whose exploration have started only recently. In particular the next-to-leading constraints were studied in the previously mentioned references \cite{Bashmakov:2017rko, Behan:2017mwi, Hollands:2017chb}. It would be interesting to verify beyond leading order that the dynamics of the low-energy effective action in the bulk automatically leads to boundary correlators that satisfy the marginality constraints.\footnote{A similar problem was studied in \cite{Bashmakov:2017rko}. In that paper bulk theories of massless scalar fields were considered, and it was observed that the constraint of marginality leads to a theory with derivative interactions.} Another interesting direction for the future would be to reverse the logic, and try to derive the higher order marginality constraints on correlation functions using the effective Lagrangian of the Goldstone bosons in the bulk.
\section{Critical Point}\label{sec:Critical}
In flat-space $\mathbb{R}^3$ the $O(N)$ model undergoes a second-order phase transition at a certain (scheme-dependent) value of the mass-squared parameter $m^2$, which separates the symmetry-breaking and the symmetry-preserving phases. The IR physics for that tuned value of $m^2$ is described by an interacting CFT with $O(N)$ global symmetry. This fixed point can be ---at least formally--- dimensionally continued to $\mathbb{R}^{d+1}$ with $2 < d+1 < 4$, and can be studied perturbatively in $4-\epsilon$ \cite{Wilson:1973jj} and $2+\epsilon$ \cite{Brezin:1975sq} expansion. A continuation to $4 < d+1 < 6$ has been proposed in \cite{Fei:2014yja}.
As we discussed in section \ref{sec:Phases}, on AdS background for intermediate values of the mass-squared parameter we have both symmetry-breaking vacua and a symmetry-preserving one, hence the physics is more similar to that of a first-order phase transition. Nevertheless, we will provide evidence that the in the symmetry-preserving vacuum, for a particular value of $m^2$, the theory enjoys conformal symmetry in the bulk. We will refer to this case as the ``critical point". In order to argue for its existence, we will first discuss more generally how to diagnose the presence of conformal symmetry on AdS background.
As a preliminary observation, note that, contrarily to flat-space, we cannot define the critical point in terms of an enhancement of the spacetime symmetry. In fact, a generic quantum field theory on (Euclidean) AdS$_{d+1}$ background enjoys a symmetry under the isometry group $SO(d+1,1)$. If it is a conformal field theory, via a Weyl transformation it can be mapped to $\mathbb{R}_+ \times \mathbb{R}^d$
\begin{equation}
\frac{dz^2 + (d\vec{x})^2}{z^2} \to dz^2 + (d\vec{x})^2~,\label{eq:Weyl}
\end{equation}
where $z > 0$ and $\vec{x} \in \mathbb{R}^d$. Hence it is equivalent to a boundary conformal field theory (BCFT) with $d+1$ dimensional bulk, which also has spacetime symmetry $SO(d+1,1)$.
On the other hand, the conformality of the bulk theory implies that among the boundary operators there must exist a scalar operator of protected dimension $d+1$, the so-called displacement operator. Moreover, even though on $\mathbb{R}_+ \times \mathbb{R}^d$ the larger bulk conformal symmetry $SO(d+2,1)$ is broken by the presence of the boundary, it still organizes the local bulk operators in conformal multiplets, and it constrains the bulk OPE. Upon Weyl rescaling, these properties are also imported to the AdS background. These considerations suggests that to detect bulk conformality, besides checking the existence of a scalar boundary operator of dimension $d+1$, we need to look at the properties of correlators when the insertion points are far away from the boundary, or equivalently very close to each other in the bulk. We will make this idea concrete in the following subsection, and then we will apply it to find the AdS critical point of the $O(N)$ model. For the reason explained above, this is equivalent to finding conformal boundary conditions for the $O(N)$ model in flat space, and in fact it will allow us to extract data of the associated BCFT.
\subsection{Conformal Symmetry in AdS: Bulk Two-point Functions}
Consider the two-point function of a scalar operator $O$ on AdS$_{d+1}$. Due to AdS isometries it can only depend on the insertion points through their distance, e.g. the chordal distance, whose square we denote with $\zeta$
\begin{equation}
\langle O(x_1) O(x_2) \rangle = F_{OO}(\zeta)~,~~\zeta \equiv \frac{(\vec{x}_1 - \vec{x}_2)^2 + (z_1-z_2)^2}{z_1 z_2}~.
\end{equation}
Alternatively, we can use the spectral representation (see appendix \ref{app:SpRep}) to view the two-point function as a function of $\nu$
\begin{equation}
\langle O(x_1) O(x_2) \rangle = \int_{-\infty}^{+\infty}d\nu \, \tilde{F}_{OO}(\nu) \Omega_\nu (x_1, x_2)~.\label{eq:nuspace}
\end{equation}
If the theory has bulk conformal symmetry, we can perform the Weyl rescaling \eqref{eq:Weyl} to $\mathbb{R}_+ \times \mathbb{R}^d$. Using the transformation law for primary operators operators under Weyl rescalings, and assuming the correlator is not affected by a Weyl anomaly, we obtain that
\begin{equation}
\langle O(x_1) O(x_2) \rangle_{\mathbb{R}_+ \times \mathbb{R}^d} = \frac{1}{(z_1 z_2)^\Delta}F_{OO}(\zeta)~,\label{eq:Wrescal}
\end{equation}
where $\Delta$ is the scaling dimension of $O$. The standard parametrization of bulk two-point function in BCFT is
\begin{equation}
\langle O(x_1) O(x_2) \rangle_{\mathbb{R}_+ \times \mathbb{R}^d} = \frac{1}{(4z_1z_2)^\Delta} f_{OO}(\xi)~,\label{eq:btwop}
\end{equation}
where $\xi$ is the $SO(d+1,1)$-invariant cross-ratio
\begin{equation}
\xi = \frac{(\vec{x}_1 - \vec{x}_2)^2 + (z_1-z_2)^2}{4 z_1 z_2} = \frac{\zeta}{4}~.
\end{equation}
Comparing \eqref{eq:Wrescal} and \eqref{eq:btwop} we find the relation between the two parametrizations of the two-point function
\begin{equation}
f_{OO}(\xi) = 4^\Delta F_{OO}(4 \xi)~.\label{eq:WAdS}
\end{equation}
In BCFT, the function $f_{OO}$ admits expansions in two distinct OPE channels, a boundary channel and a bulk channel. The boundary-channel expansion is around $\xi \to \infty$, which means that the insertion points approach the boundary, and it is obtained by replacing both bulk operators with their bulk-to-boundary OPE and summing the resulting boundary two-point functions. When the bulk operators are scalars only boundary scalar operators contribute. The contributions of the conformal family of a certain boundary primary $\hat{O}$ of dimension $\hat{\Delta}$ can be resummed in the boundary block \cite{McAvity:1995zd}
\begin{equation}
f_{OO}^{\rm bdy}(\hat{\Delta},\xi) = \xi^{-\hat{\Delta}} {}_2F_1\left(\hat{\Delta},\hat{\Delta} - \tfrac{d}{2}+\tfrac 12,2\hat{\Delta}-d+1,-\tfrac{1}{\xi} \right)~.\label{eq:bdycb}
\end{equation}
On the other hand, the bulk-channel expansion is around $\xi \to 0$, which means that the insertion points are approaching each other in the bulk, or equivalently they are far away from the boundary. In this case the expansion is obtained by replacing the two operators with their bulk OPE and summing the resulting bulk one-point functions. Only scalar operators can have a non-zero one-point function. Summing over the conformal family of a given bulk primary $O'$ of scaling dimension $\Delta'$, one finds the bulk block \cite{McAvity:1995zd}
\begin{equation}
f_{OO}^{\rm bulk}(\Delta',\xi) = \xi^{-\Delta + \frac{\Delta'}{2}} {}_2F_1\left(\tfrac{\Delta'}{2},\tfrac{\Delta'}{2}, \Delta' - \tfrac{d}{2}+\tfrac 12,-\xi \right)~.\label{eq:bulkcb}
\end{equation}
Given the identification \eqref{eq:WAdS}, a necessary condition for bulk conformality is that the function $F_{OO}$ should similarly admit a sensible expansion in both channels, i.e. both in boundary blocks and in bulk blocks.
However, as it turns out, the expansion in boundary blocks is not useful to diagnose bulk conformality, because an analogous expansion exists for a general quantum field theory on AdS background, even if massive.
One way to derive this boundary-channel expansion is to use that $\tilde{F}_{OO}(\nu)$ has a sequence of poles at $\nu_n = \pm i(\frac d2-\hat{\Delta}_n )$ on the imaginary $\nu$ axis, labeled by a discrete parameter $n$. Therefore, we can close the contour in eq. \eqref{eq:nuspace} and use Cauchy's theorem to rewrite the two-point correlator as a sum over $n$ of $\Omega_{\nu_n} (x_1, x_2)$. The functions $\Omega_{\nu_n} (x_1, x_2)$ coincide with the boundary conformal blocks \eqref{eq:bdycb} for $\hat{\Delta}=\hat{\Delta}_n$. The equivalence between the two set of functions is due to the fact that they are solution to the same eigenvalue problem: as shown in \cite{Liendo:2012hy}, the boundary block is an eigenfunction of the quadratic Casimir of the boundary conformal group $SO(d+1,1)$. Regarding $SO(d+1,1)$ as the isometries of AdS$_{d+1}$, this Casimir operator is naturally mapped to the AdS Laplacian, whose eigenfunctions are the functions $\Omega_{\nu} (x_1, x_2)$.\footnote{As an example of the boundary-channel expansion, for a free scalar $\phi$ in AdS with $m^2 = \Delta_\phi(\Delta_\phi -d)$, either conformally-coupled or not, the two-point function is the bulk-to-bulk propagator \eqref{eq:bulktobulk}, which can be seen as a single boundary block of a boundary scalar primary operator $\hat{\phi}$ of dimension $\Delta_\phi$ (which is either $\leq$ or $> \tfrac d 2$ depending on the boundary condition). In fact, in the free theory there is only one operator in the bulk-to-boundary OPE of $\phi$, the operator that in the language of holography is ``dual" to the bulk scalar field.
Two-point functions of composites of $\phi$ also admit a boundary-channel decomposition for any value of $m^2$. In this case there is an infinite sum over boundary blocks, because the bulk-to-boundary OPE contains infinitely many multi-trace operators built out of the generalized-free-field $\hat{\phi}$. This is a way to understand the evaluation of the bubble diagram ---that can be thought of as the two point function of $\phi^2$ in a free scalar theory--- as an infinite sum of double-trace contributions, in agreement with \cite{Fitzpatrick:2010zm, Fitzpatrick:2011hu} and with our calculation of the diagram in section \ref{sec:Corr}.
The two-point function of the field $\delta \sigma$ derived in section \ref{sec:Corr} is a further example, this time an interacting one. Also in this case we see that for any value of the parameters, hence irrespectively of bulk conformality, the spectral representation has a family of isolated poles on the imaginary axis of $\nu$, and therefore the two-point function can be written as a sum of boundary blocks, with the dimension of the boundary operators determined by the position of the poles as we described.
One can wonder whether it is possible that in some cases such poles would accumulate, or that the spectral representation would have branch-cuts on the imaginary axis. Given the interpretation of a singularity in terms of the exchange of the conformal family of a boundary primary operator, we can rule out these possibilities if we assume that the boundary conformal theory has a discrete spectrum of primary operators, without accumulation points. It might be possible to prove this assumption, but we leave this for future work.}
On the other hand, the existence of the bulk-channel expansion does require bulk conformality. Indeed, the bulk blocks are eigenvalues of the Casimir of $SO(d+2,1)$, the bulk conformal symmetry \cite{Liendo:2012hy}. Equivalently, the form of the blocks is fixed by using the conformal OPE in the bulk, and the latter is not valid when the bulk theory is massive. This leads to the idea that in order to test bulk conformality we should check whether two-point functions admit an expansion in the bulk blocks of eq. \eqref{eq:bulkcb}.
A practical way to check this, in the case of two identical operators, is to focus on the leading contributions in the limit $\xi \to 0$. If the bulk is conformal, a scalar primary $O'$ of dimension $\Delta'$ appearing in the $O\times O$ OPE contributes a power-law $\xi^{-\Delta + \frac{\Delta'}{2}}$. Its scalar descendants $\square^k O'$ contribute powers shifted by an integer $\xi^{-\Delta + \frac{\Delta'}{2} + k}$, with the sum over $k\in \mathbb{N}$ encoded in the bulk block. If the operators are identical, the leading power comes from the identity operator with $\Delta'=0$, and in this case the full block is just a single power-law $\xi^{-\Delta}$. In particular, since the identity does not have descendants, there should not be integer-shifted powers $\xi^{-\Delta + k}$. We see in examples (but did not attempt to prove in general) that in fact the AdS two-point functions admit an expansion in powers of $\zeta$ as $\zeta \to 0$, both in conformal and massive theories. However, in massive theories, denoting the leading power with the same symbol $\zeta^{-\Delta}$ (even though $\Delta$ loses its interpretation of bulk scaling dimension), it comes accompanied by a tower of ``pseudo-descendant" terms $\zeta^{-\Delta+k}$, $k\in \mathbb{N}$. The coefficient of these terms are functions of the parameters of the theory, and the bulk-conformality constraint on the parameters therefore comes from setting these coefficients to zero. This condition is not sufficient to ensure bulk-conformality, but since the number of parameters/couplings is finite, the fact that a solution exists for all $k$'s is a strong evidence that the theory indeed is conformal for those values of the parameters. We hasten to clarify that strictly speaking the condition is also not necessary, because there could be a primary scalar operator with integer scaling dimension $m$ in the $O\times O$ OPE, whose conformal family produces the integer-shifted powers with $k\geq m$. In generic theories we do not expect such an operator to exist, but one should keep this possible exception in mind.
We can also derive a similar ---but less rigorous--- condition on the two-point function in $\nu$ space. This is important for our purposes, because we computed the two-point function of the field $\delta \sigma$ in $\nu$ space. To this end, note that the derivation of the flat-space limit in section \ref{sec:FslSpRep} in the appendix can be also understood as the statement that the bulk OPE limit $\zeta \to 0$ maps to $\mathrm{Re}(\nu)\to \infty$. More concretely, we can look at the spectral representation of a power law in AdS$_{d+1}$, which is derived in section \ref{app:power} in the appendix and reproduced here
\begin{equation}
F_\Delta(\zeta) = \left(\frac{\zeta}{4}\right)^{-\Delta} \longleftrightarrow \tilde{F}_\Delta(\nu) = c_{\Delta}\frac{\Gamma(-\tfrac d2+\Delta\pm i \nu )}{\Gamma(\tfrac 12 \pm i \nu)}~, c_{\Delta} \equiv (4\pi)^{\tfrac{d+1}{2}}\frac{\Gamma(\tfrac{d+1}{2}-\Delta)}{\Gamma(\Delta)}~.\label{eq:nupower}
\end{equation}
As explained in more detail in the appendix, the definition of the transform in this case requires an analytic continuation in the parameter $\Delta$. The transform of the power has the following asymptotics at large and real $\nu$
\begin{equation}
\tilde{F}_\Delta(\nu) \underset{\nu\to \infty}{\sim} c_{\Delta} \,\nu^{2\Delta -d-1}\left(1 +\sum_{n>1} \frac{c_n}{\nu^{2n}} \right)~,\label{eq:nupowerexpand}
\end{equation}
where $c_n$ are $\Delta$ and $d$ dependent coefficients. From this result we confirm that an expansion in powers of $\zeta$ in the OPE limit $\zeta \to 0$ maps to an expansion at large $\nu$, i.e. larger positive powers of $\zeta$ map to larger negative powers of $\nu$. Therefore, the condition that in position space the leading contribution must come from the block of the identity is mapped to the following condition in $\nu$ space: the relative coefficients of the leading powers at large $\nu$ should agree with the asymptotic expansion of the spectral transform of a power law $\tilde{F}_\Delta(\nu)$, for a certain value of $\Delta$. What makes this condition less rigorous than the position-space version is that it requires us to commute two operations on the two-point function: $(i)$ taking the spectral transform, and going to the large-$\nu$ limit, and $(ii)$ the bulk OPE expansion. It is not clear that commuting these operations is always allowed.\footnote{In flat space without a boundary, it was observed in \cite{Dymarsky:2014zja} that naively commuting the Fourier transform and the OPE sometimes leads to wrong results.} One might be especially wary of commuting with the OPE expansion if the spectral representation is strictly-speaking not well-defined for the terms that are retained, as is the case for the power-law contribution of the identity, whose spectral representation can only be defined via an analytic continuation.\footnote{We thank the anonymous referee for their comments on this point.} One step towards a more rigorous version of this argument would require to compute the spectral transform of a generic bulk block,\footnote{Note that the crossing kernel in $\alpha$-space for a $d$-dimensional BCFT was computed in \cite{Hogervorst:2017kbj}. This result gives the the spectral transform of a certain combination of a bulk block and its shadow, and this seems like a promising starting point for the calculation of the transform of the block. Moreover, in presence of a small expansion parameter, like $1/N$ in our setup, the scaling dimensions will be expanded in the small parameter. Therefore to apply this method one also needs to understand the behavior in $\nu$ space of the derivatives of the blocks w.r.t. the dimensions. The large $\nu$ behavior of the OPE data for the four-point function in conformal field theories was analyzed rigorously in \cite{Mukhametzhanov:2018zja} by using complex tauberian theorem.} and study its large-$\nu$ behavior. We leave this as an interesting direction for the future. In the following we will proceed as if the leading contribution of operators other than the identity could indeed be estimated by commuting the OPE power series with the large-$\nu$ limit of the spectral transform, and we will see that this allows us to make progress in the specific examples we will consider.
\paragraph{Check: Conformally-coupled Free Scalar} Consider a free scalar $\phi$ in AdS$_{d+1}$ background. The only free parameter is the mass-squared coupling $m^2 = \Delta_\phi(\Delta_\phi-d)$, and we can ask for what value of $m^2$, or equivalently $\Delta_\phi$, the theory has conformal symmetry in the bulk. In this case we already know the answer, i.e. the conformally-coupled scalar has $m^2 = - \frac{d^2-1}{4}$,\footnote{Recall that the conformal coupling on a $d+1$ dimensional background is $\frac{1}{4}\frac{d-1}{d} R\, \phi^2$, where $R$ is the Ricci scalar. On AdS$_{d+1}$ with radius $1$, we have $R = - d(d+1)$.} and we can check that the criterion proposed above reproduces this answer.
For generic $m^2$, and either choice of boundary condition, the leading $\zeta \to 0$ behavior of the two-point function \eqref{eq:bulktobulk} is
\begin{equation}
\langle\phi(x_1)\phi(x_2) \rangle \underset{\zeta\to 0}{\sim} \frac{\Gamma(\tfrac{d-1}{2})}{4\pi^\frac{d+1}{2}} \,\zeta^{-\frac{d-1}{2}}~.\label{eq:leadfreesc}
\end{equation}
In this case, the full two-point function in position space is a power times an hypergeometric function of argument $- \frac{4}{\zeta}$, which we can rewrite as a sum of hypergeometric functions with inverted argument, so it is easily checked that for generic $\Delta_\phi$ the ``pseudo-descendant" powers $\zeta^{-\frac{d-1}{2}+k}$ resum to give
\begin{equation}
\frac{\Gamma(\tfrac{d-1}{2})}{4\pi^\frac{d+1}{2}} \,\zeta^{-\frac{d-1}{2}}\,{}_2F_1\left(\Delta_\phi-\tfrac{d-1}{2},\tfrac{d+1}{2}-\Delta_\phi,\tfrac{3-d}{2},-\tfrac{\zeta}{4}\right)~.
\end{equation}
We see that we can set to zero the coefficient of all the $k>0$ powers at once, by setting $\Delta_\phi = \frac{d\pm1}{2}$. These two values indeed correspond to a conformally-coupled scalar, with the sign fixed by the boundary condition: The case $\Delta_\phi = \frac{d-1}{2}$ corresponds to Neumann boundary conditions, with the boundary operator being just the restriction of $\phi$ to the boundary, while the case $\Delta_\phi =\frac{d+1}{2}$ corresponds to Dirichlet boundary condition, with the boundary operator being the the restriction of the normal derivative of $\phi$ to the boundary.
More interestingly, the same answer can also be obtained just from the knowledge of the spectral representation of the two-point function, i.e.
\begin{equation}
\tilde{F}_{\phi\phi}(\nu) = \frac{1}{\nu^2 + (\Delta_\phi -\tfrac d2)^2}~.\label{eq:freesp}
\end{equation}
In the limit of large $\nu$ large this goes like $\nu^{-2}$, which by eq. \eqref{eq:nupowerexpand} indeed corresponds to the power-law \eqref{eq:leadfreesc} in position space. Moreover, by the argument explained above, not only the leading term, but also all the subleading powers $\nu^{-(2+2 n)}$, $n\in \mathbb{N}$ should agree with the transform of power law \eqref{eq:leadfreesc}. Using eq. \eqref{eq:nupower}, we find that the transform of the power-law \eqref{eq:leadfreesc} is
\begin{equation}
\tilde{F}_{\Delta = \frac{d-1}{2}}(\nu) = \frac{(4\pi)^{\frac{d+1}{2}}}{\Gamma(\frac{d-1}{2})} \frac{1}{\nu^2 + \frac 14}~.\label{eq:freespleadpow}
\end{equation}
Comparing \eqref{eq:freesp} and \eqref{eq:freespleadpow}, we see that in order to match also all the subleading powers $\nu^{-(2+2 n)}$ at once, we need to set $\Delta_\phi$ to the conformal value $\Delta_\phi = \frac{d\pm1}{2}$.
It is worth noticing that in this simple example the spectral representation and the bulk OPE expansion commute. To see this, note that the full spectral representation of the two-point function is proportional to the spectral representation of the identity bulk block, i.e. the leading power-law. This implies that the contribution of the bulk block of the operator $\phi^2$, even though it is non-vanishing in position space, gives no contribution in $\nu$ space. Indeed we can confirm this by noting that the bulk block of $\phi^2$ is an infinite series of positive integer powers of $\zeta$, whose spectral representation vanishes according to \eqref{eq:nupower}. Note that more generally this means that in mean-field theory, if the spectral representation and the bulk OPE commute, the bulk blocks of double-trace operators will drop from the spectral representation of the two-point function.
\subsection{Critical Point of the $O(N)$ Model on AdS$_3$}\label{subsec:critON}
We will now apply the method to detect bulk conformality described above to the two-point function of the field $\delta \sigma$ in the $O(N)$ model, computed in sec. \ref{sec:Corr}. Differently from the rest of the paper, in this subsection we will denote the scaling dimension of the $O(N)$-vector boundary operator with $\hat{\Delta}$, in compliance with the common usage in the BCFT literature, and reserve the symbol $\Delta$ without a hat for bulk scaling dimensions. We reproduce here the result of sec. \ref{sec:Corr} for the spectral representation of the two-point function of $\delta\sigma$
\begin{align}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu) & = -\frac{1}{\lambda ^{-1} + 2\tilde{B}(\nu)}~, \label{} \\
\tilde{B}(\nu)& = \tfrac{i}{8\pi\nu}(\psi(\hat{\Delta} -\tfrac 12 - i \tfrac{\nu}{2})-\psi(\hat{\Delta} -\tfrac 12 + i \tfrac{\nu}{2}))~,
\end{align}
where $\hat{\Delta} = \hat{\Delta}_+ = 1 + \sqrt{M^2 + 1}$. We want to determine for what values of the parameters $\lambda$ and $M^2$, if any, the two-point function is compatible with bulk conformal symmetry.
The leading behavior of the bubble diagram for large real $\nu$ is
\begin{equation}
\tilde{B}(\nu)\underset{\nu\to \infty}{\sim} \frac{1}{8 \nu} +\mathcal{O}(\nu^{-2})~.
\end{equation}
As a consequence, for $\lambda < \infty$, the two-point function behaves like $\mathrm{const.} + \nu^{-1}$, and by eq. \eqref{eq:nupowerexpand} the only compatible assignment of scaling dimension is $\Delta_\sigma = 1$. This is the scaling dimension of the scalar bilinear in the free UV theory, hence for $\lambda < \infty$ we cannot find any interesting critical point. We can still ask for what values of the parameters do the series of powers $\nu^{-1-2n}$, $n \in \mathbb{N}$ have coefficients which are compatible with bulk conformality. Not surprisingly, we find that the only solution is the trivial one $\lambda = 0$, which sets the two-point function to zero altogether, as we would expect in the free UV theory. There is no constraint on $M^2$, because in this limit $\delta \sigma$ is decoupled from $\delta \phi$.
In order to find a non-trivial scaling we need to set $\lambda = \infty$. In this case the two-point function behaves like $\nu$ at large $\nu$, and the compatible assignment of scaling dimension is $\Delta_\sigma = 2$. This is indeed the scaling dimension of $\sigma$ in the critical $O(N)$ model. However the parameter $M^2$, or equivalently $\hat{\Delta}$, is still free, and for generic values we do not have bulk conformal symmetry.
To determine the conformal value of $\hat{\Delta}$, we look at the large real $\nu$ expansion of the two-point function for $\lambda = \infty$
\begin{equation}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu) \underset{\lambda = \infty}{=} \!-\frac{1}{2\tilde{B}(\nu)} \!\underset{\nu \to \infty}{\sim} \!\! - 4\left(\nu + \frac{4(\hat{\Delta}-1)}{\pi} + \frac{16(\hat{\Delta}-1)^2}{\pi^2\nu} + \mathcal{O}(\nu^{-2}) \right)~.\label{eq:explinf}
\end{equation}
Before going ahead and matching this expansion with the expansion of the block of the identity, we need to open a brief parenthesis to explain what are the other possible contributions from other scalar operators in the bulk OPE of two $\sigma$'s.
At leading order at large $N$ $\sigma$ is just a generalized-free-field, and besides the identity the OPE contains a tower of double-trace operators, the scalars ones being schematically $\sigma \square^n \sigma$, of dimension $4 + 2n$. Moreover, recall that $\sigma$ has a non-zero 1-point function $\sqrt{N}\Sigma$ (in the normalization in which the connected two-point function starts at $\mathcal{O}(1)$), leading to a disconnected contribution to the two-point function of order $N$, larger than the identity contribution. In the OPE expansion of the two-point function, this disconnected constant piece must be reproduced by the leading-order contribution of the scalar double-trace operators, which also get a 1-point function of order $N$. Considering the two-point function in $\nu$ space, under the assumption that we can commute it with the bulk OPE, the disconnected constant term and the leading contribution of the scalar double-traces trivially match, because they both vanish. This is due to the fact ---that was already observed in the example of the conformally-coupled free scalar--- that bulk blocks of double-trace operators only contain positive integer powers of the cross-ratio, whose spectral representation vanishes according to \eqref{eq:nupower}, and similarly for the constant disconnected term.
The connected part of the two-point function, i.e. the two-point function of $\delta \sigma$, starts at order $\mathcal{O}(1)$ at large $N$, and it potentially receives contributions in the bulk OPE from: $(i)$ the identity operator, $(ii)$ the field $\sigma$ itself, due to a compensation between the $\mathcal{O}(1/\sqrt{N})$ OPE coefficient $C_{\sigma\sigma\sigma}$ and the $\mathcal{O}(\sqrt{N})$ 1-point function, and $(iii)$ the next-to-leading double-trace contribution, namely the correction to their scaling dimension and to their (OPE coefficient $\times$ one-point function). Let us analyze what this would imply for the spectral representation of the two-point function, if we allow ourselves to naively commute the bulk OPE with the spectral transform. In $\nu$ space, the contribution $(iii)$ can only come from the anomalous dimensions of the double-trace operators, because the contribution from the correction to the (OPE coefficient $\times$ one-point function) has the same functional form in position space as the leading-order contribution, which we already argued to vanish in the spectral representation. Moreover, when $d+1 = 3$ there is a dramatic simplification in this OPE, because both the OPE coefficient $C_{\sigma\sigma\sigma}$ \cite{Petkou:1994ad} and the full set of $1/N$ anomalous dimensions of the double-trace operators \cite{Lang:1993ct} happen to vanish precisely at this value of the spacetime dimension. Hence, specifically for $d+1=3$, we are led to the conclusion that only the bulk block of the identity contributes to the two-point function in $\nu$ space at this order.
Using eq. \eqref{eq:nupower} we find that the spectral representation of the block of the identity in this case is
\begin{equation}
\tilde{F}_{\Delta = 2}(\nu) = -16 \pi^2 \nu \coth(\pi \nu)\underset{\nu \to \infty}{\sim} -16 \pi^2 \nu +\mathcal{O}(e^{-2\pi\nu})~.\label{eq:idblocksigma}
\end{equation}
Comparing with \eqref{eq:explinf}, we see that in order to make the first two subleading powers agree with the block of the identity, we necessarily have to set $\hat{\Delta} = 1$. Plugging $\hat{\Delta} = 1$ back in the two-point function, it simplifies to
\begin{equation}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu) \underset{\lambda = \infty, \hat{\Delta} =1}{=} - 4 \,\nu \coth(\tfrac{\pi\nu}{2})\underset{\nu \to \infty}{\sim} -4\,\nu +\mathcal{O}(e^{-\pi\nu})~,\label{eq:conftwopsigma}
\end{equation}
and we see that actually all the subleading powers match, not just the first few shown in equation \eqref{eq:explinf}. Moreover, the spectrum of boundary operators that contribute to this two-point function was worked out in eq. \eqref{eq:sigmaconfspec}, that we reproduce here for convenience
\begin{align}
\hat{\Delta}_n =2n+3 \qquad n\in \mathbb{N}_{\geq 0}\,,\label{eq:sigmaconfspec2}
\end{align}
and we see that in particular there is a scalar operator of dimension 3, that can be the displacement operator.
These observations strongly hint that indeed for $\lambda= \infty$, $\hat{\Delta} = 1$ we have bulk conformal symmetry. On the other hand, we note that there are exponentially small contributions at large $\nu$ that do not agree with the transform of the block of the identity. At finite $\nu$, the two-point function for $\hat{\Delta} =1$ simply does not agree with the spectral representation of the block of the identity. This could indicate that actually for no values of $(\lambda, \hat{\Delta})$ the theory is conformal. More likely, this mismatch is due to the subtlety that we mentioned in the commutation between the bulk OPE and the spectral transform. Even though we fall short of rigorously proving that for $\lambda =\infty$ and $\hat{\Delta} = 1$ the theory has conformal symmetry in the bulk, in what follows we will provide additional evidence that this is the case, by successfully matching boundary conformal data extracted from the AdS correlators with the known results about the conformal boundary conditions of the $O(N)$ model computed in flat space.
\subsection{$O(N)$ BCFT Data from the AdS Correlators}\label{sec:ONBCFT}
The conformal boundary conditions for the large-$N$ critical $O(N)$ model were studied in flat space in \cite{Bray:1977tk, Ohno:1984iy, McAvity:1995zd}. These references considered generic $d+1$ between 2 and 4, and studied two distinct conformal boundary conditions, dubbed {\it ordinary transition} and {\it special transition}, that can be understood as the IR fixed point of the RG started respectively with Dirichlet or Neumann boundary conditions for the $O(N)$ vector fields. In our analysis in AdS$_3$ we focused on the $+$ boundary condition, that is the AdS version of the Dirichlet boundary condition, so we expect to match with the ordinary transition.
The first BCFT datum that we can match is the scaling dimension of the boundary $O(N)$-vector operator: Above we found $\hat{\Delta} = 1$ for $d+1=3$, and indeed the flat-space analysis gives that this scaling dimension is $d-1$ (recall that $d$ here is the boundary dimension) at the ordinary transition.
Secondly, we can look at the dimension of the boundary operators appearing in the bulk-to-boundary OPE of the field $\sigma$. The analysis in flat space at the ordinary transition finds infinitely many such operators $\hat{\sigma}_n$ labeled by a non-negative integer $n$, and their scaling dimensions are
\begin{equation}
\hat{\Delta}_n = d +1+ 2n~.
\end{equation}
Note that in particular there always exists a scalar operator with dimension $d+1$, the displacement operator. Again, we find a match with the spectrum determined from the AdS$_3$ correlator, that we showed in eq. \eqref{eq:sigmaconfspec2}. From the residue of the spectral representation of the two-point function \eqref{eq:conftwopsigma} at the poles, we can easily read-off the bulk-to-boundary OPE coefficients to be
\begin{equation}
b_{\sigma\hat{\sigma}_n}^2 = 32(n+1)~,
\end{equation}
at leading order at large $N$.
Taking as an input from the flat-space studies that for generic $d$ the conformal point is $\lambda = \infty$ and $\hat{\Delta} = d-1$, we verified more generally that for these values of $\lambda$ and $\hat{\Delta}$ the poles of the AdS$_{d+1}$ correlator are located precisely at $\nu_n =-i(\hat{\Delta}_n-\tfrac{d}{2})$. This is a non-trivial check of our general-$d$ formula \eqref{eq:finalBnu} for the bubble diagram.
With no additional work, we can also compute the connected four-point function of the $O(N)$-vector boundary operators at order $1/N$, simply by plugging $\lambda = \infty$ and $\hat{\Delta} = 1$ in the AdS result of eq.s \eqref{eq:scalarfourpfun}-\eqref{eq:g1234explicit}. Taking the residue of the $\nu$ integrand at the $\hat{\sigma}_n$ poles, we compute the leading-order boundary OPE coefficients between two boundary $O(N)$-vector operators and a $\hat{\sigma}_n$, obtaining
\begin{equation}
C_{\hat{\phi}\hat{\phi}\hat{\sigma}_n}^2 = \frac{1}{N}\frac{\pi \Gamma(\frac32+2n)^2}{2^{4n+1}(n!)^2}~.
\end{equation}
These are just some examples of BCFT observables that we could easily extract from the AdS correlators. It might be useful more generally to approach perturbative calculations in BCFT from the point of view of AdS, especially with the aid of the spectral representation. A concrete motivation to explore more this direction is that the computation of $1/N$ corrections in the $O(N)$ BCFT via Feynman diagrams on $\mathbb{R}^2\times\mathbb{R}_+$ is often a daunting task, and the ``bootstrap" approach of subsection \ref{sec:Corr41} could potentially turn out to be more effective. An alternative way would be to analyze them by using the standard conformal bootstrap; namely by studying the crossing equation for the BCFT \cite{Liendo:2012hy}. See \cite{Bissi:2018mcq} for a recent work in that direction.
\section{Correlators of the Gross-Neveu Model in AdS}\label{sec:fermion}
In this section we study correlation functions of the Gross-Neveu (GN) model in AdS$_{d+1}$. The GN model describes $N$ spin $1/2$ fermions, with a quartic interaction preserving $U(N)$ symmetry. It was introduced in \cite{Gross:1974jv} as a solvable two-dimensional model displaying asymptotic freedom and chiral symmetry breaking, and later generalized to higher dimension (see \cite{ZinnJustin:1991yn, Rosenstein:1990nm} and references therein). In $d +1 > 2$ the model is non-renormalizable, so it should be regarded as a low-energy effective theory. It is still sensible to study the scattering of low energy excitations. Moreover, at large $N$ it is possible to show the existence of a UV fixed point, whose observables can be computed in $1/N$ perturbation theory.
\subsection{Computation of the Correlators\label{subsec:GNcompcorr}}
The Lagrangian is
\begin{eqnarray}
\mathcal{L} = \bar{\Psi}^i \, \gamma\cdot \nabla \Psi^i + m \bar{\Psi}^i \Psi^i +\frac{g}{2N} (\bar{\Psi}^i \Psi^i)^2~,
\end{eqnarray}
where $\Psi^i$ are Dirac fermions, and $i= 1, \ldots , N$, and $\gamma$ are the gamma matrices in $d+1$ dimensions. In Poincar\'e coordinates $(z, \vec{x})$, we have gamma matrices with flat indices $\gamma^a = (\gamma^0,\vec{\gamma})$ and to contract with bulk vectors we use the vielbein $e^a_\mu = \frac{1}{z}\delta^a_\mu$. Introducing a HS field $\s$, we can rewrite the Lagrangian as
\begin{eqnarray}
\mathcal{L} = \bar{\Psi}^i \, \gamma\cdot \nabla \Psi^i + m \bar{\Psi}^i \Psi^i -\frac{1}{2 g}\s^2 + \frac{1}{\sqrt{N}} (\bar{\Psi}^i \Psi^i)\s~.
\end{eqnarray}
The equation of motion of $\s$ gives
\begin{eqnarray}
\s= \frac{g}{\sqrt{N}} \bar{\Psi}^i \Psi^i~.
\end{eqnarray}
The effective Lagrangian that defines the $1/N$ perturbation theory is\begin{equation}\label{eq:Leffferm}
\mathcal{L}_{\rm eff} = \bar{\Psi}^i \, \gamma\cdot \nabla \Psi^i + m \bar{\Psi}^i \Psi^i -\frac{1}{2 g}\s^2 + \frac{1}{\sqrt{N}} (\bar{\Psi}^i \Psi^i)\s- N \, \mathrm{tr\,} \log\left(\gamma\cdot \nabla + m + \frac{1}{\sqrt{N}}\sigma \right)~.
\end{equation}
The resulting effective potential can induce a VEV for the field $\sigma$, causing a shift of the mass of the fermions. We will denote the physical mass as
\begin{equation}
M = m + \Sigma~,~~\Sigma \equiv \frac{\langle \sigma\rangle}{\sqrt{N}}~.
\end{equation}
It is actually always consistent to start with $m=0$, because this choice is protected by symmetry (a discrete chiral symmetry left unbroken by the quartic interaction if $d+1$ is even, or parity --by which we mean a reflection w.r.t. to one of the boundary coordinates-- if $d+1$ is odd). The VEV for $\sigma$ is then a spontaneous breaking of the symmetry that protects $m=0$. We will consider $m=0$ in the following, so that $M = \Sigma$. For future reference we note that the vacuum equation for $\Sigma$, obtained by taking a derivative of the effective potential in \eqref{eq:Leffferm}, can be written as follows
\begin{equation}
\frac{1}{g}\Sigma = -\mathrm{tr\,} \left[\frac{1}{ \gamma\cdot \nabla + \Sigma}\right] ~.\label{eq:gap}
\end{equation}
This is the familiar gap equation, straightforwardly generalized to AdS background by using the appropriate Dirac operator.
Near the boundary, a solution of the Dirac equation behaves as
\begin{equation}
\Psi^i(z, \vec{x}) \underset{z\to 0}{\longrightarrow} z^{\Delta_+}(\psi_+^i(\vec{x})+\mathcal{O}(z) ) + z^{\Delta_-}(\psi_-^i(\vec{x})+\mathcal{O}(z) )~,
\end{equation}
where $\Delta_{\pm} = \frac{d}{2}\pm M$ and the modes $\psi_\pm^i(\vec{x})$ satisfy $\gamma_0 \psi_\pm^i = \pm \psi_\pm^i$. The two possible $U(N)$-symmetric boundary conditions consist in setting to zero $\psi^i_-$ or $\psi^i_+$, and we call them $+$ and $-$ boundary condition, respectively. Choosing the $\pm$ condition, the boundary theory contains $U(N)$-vector fermionic operators of scaling dimension $\Delta_\pm$. These operators always have half of the components of their parent bulk field, and they are Weyl (Dirac) fermions on the boundary, if the bulk is odd (even, respectively) dimensional. When $d+1$ is even, the real parameter $M$ that enters the mass/dimension formula is obtained by absorbing the possible phase in a chiral rotation, and the leftover discrete chiral transformation flips the sign of $M$. When $d+1$ is odd, the two boundary conditions are exchanged by parity, that flips at the same time the sign of the bulk mass and the chirality of the boundary condition.
For the computation of the AdS correlators we will adopt the same strategy as for the scalar case of section~\ref{sec:Corr}. We will first parametrize the two-point function of the auxiliary field in terms of an unknown bubble function, and then from that we will obtain the boundary four-point function of the $U(N)$-vector operators. Finally, the consistency of the OPE of the boundary theory will allows us to compute the bubble function. To our knowledge, the fermionic bubble diagram in AdS has not been computed before.
\paragraph{``Bootstrapping" the Fermionic Bubble} The bulk two-point function of $\d \sigma \equiv \sigma - \langle\sigma\rangle$ coming from the effective Lagrangian \eqref{eq:Leffferm} is
\begin{align}\label{eq:delsigxyfermion}
\langle \delta\sigma(x) \delta\sigma(y)\rangle = -\left[\frac{1}{g}\mathds{1} - B_F \right]^{-1}(x, y)~,
\end{align}
where $B_F(x,y)$ is a product of 2 bulk-to-bulk propagators
\begin{align}\label{eq:productofbtobfermion}
B_F(x,y) = {\rm Tr} \left[\left(\frac{1}{ \gamma\cdot \nabla + M}\right)^2(x,y)\right]~,
\end{align}
and ${\rm Tr}$ denotes the trace over the fermion indices (to be distinguished from the functional trace $\mathrm{tr\,}$). We denote by $\tilde{B}_F(\nu )$ the spectral representation of the fermionic bubble
\begin{align}
B_F(x,y) =\int_{-\infty}^{\infty} d\nu \,\tilde{B}_F(\nu )\Omega_{\nu}(x,y)~.
\end{align}
Hence, we can write eq.~\eqref{eq:delsigxyfermion} as
\begin{align}\label{eq:spectralforsigmafermion}
\langle\delta \sigma (x)\delta\sigma (y)\rangle=-\int_{-\infty}^{\infty} d\nu \,\frac{1}{g^{-1}- \tilde{B}_F (\nu)}\Omega_{\nu}(x,y)\,.
\end{align}
We can then proceed to compute the boundary 4-point function $$\langle \bar{\Psi}^{i}(P_1,S_1) \Psi^{j}(P_2,S_2) \bar{\Psi}^{k}(P_3,S_3) \Psi^{l}(P_4,S_4) \rangle~. $$ We are using the embedding-space formalism, and $S$ denotes the spinor-polarization variable for the boundary operators, that keeps track of the spinor indices, i.e. $\Psi(P,S)\equiv \bar{S}\Psi(P)$. See ref.s \cite{Weinberg:2010fx, Iliesiu:2015qra, Iliesiu:2015akf, Isono:2017grm} for more details about the embedding formalism for spinors in CFT, and \cite{Nishida:2018opl} for its extension to the bulk fermions. We will use the conventions of \cite{Nishida:2018opl}. The spinor-polarization variable for the bulk fermions will be denoted by $S_b$.
The bulk-to-boundary propagators of the fermion fields with boundary scaling dimension $\Delta$, choosing the + boundary condition for definiteness, are \cite{Nishida:2018opl}
\begin{align}
K^F_{\D} (X,\bar S_b,P,S_\partial) & = \sqrt{ \mathcal{C}^{F}_{\D} }\frac{ \bar S_b \Pi_- S_\partial }{(-2X \cdot P)^{\D+\frac{1}{2}}}~,
\\
\bar{K}^F_{\D} (X, S_b,P, \bar S_\partial) & = \sqrt{ \mathcal{C}^{F}_{\D} } \frac{ \bar S_\partial \Pi_- S_b }{(-2X \cdot P)^{\D+\frac{1}{2}}}~,
\\
\text{with}~~~\mathcal{C}^{F}_{\D} & = \frac{1}{\pi^{d/2}} \frac{\G(\D+\frac{1}{2})}{\Gamma(\D+\frac{1-d}{2})}~,
\end{align}
where $\Pi_{\pm}$ are chiral projectors in embedding space. Let us define
\begin{equation}
S_{12|34} = ( \bar S_{2\partial} \Pi_- S_{1\partial} ) ( \bar S_{4\partial} \Pi_- S_{3\partial})~,~~ S_{14|32} = (\bar{S}_{4\partial} \Pi_- S_{1\partial} )( \bar S_{2\partial} \Pi_- S_{3\partial})~.
\end{equation}
At leading order at large $N$ the four-point function is just given by the mean-field theory answer
\begin{align}
&\langle \bar{\Psi}^{i}(P_1,S_1) \Psi^{j}(P_2,S_2) \bar{\Psi}^{k}(P_3,S_3) \Psi^{l}(P_4,S_4) \rangle\vert_{\mathcal{O}(1)} \nonumber\\ &~~~~~~~~~~~~~~~~~~~= \frac{\delta^{ij}\delta^{kl}S_{12|34}}{(P_{12})^{\Delta+\frac12}(P_{34})^{\Delta+\frac12}}-\frac{\delta^{il}\delta^{jk}S_{14|32}}{(P_{14})^{\Delta+\frac12}(P_{23})^{\Delta+\frac12}}~.
\end{align}
Note that the only allowed Wick contraction is between $\Psi$ and $\bar{\Psi}$, so differently from the scalar case here we only have the s-channel and u-channel contraction, but no t-channel.
At order $1/N$ the four-point function receives contributions from the exchanges of the field $\sigma$ in the bulk, both in the s-channel and the u-channel. Since this diagram is a scalar exchange, the spinor polarization structure is the same as that of the disconnected part. Therefore, we can write the $1/N$ four-point function as follows
\begin{align}
&\langle \bar{\Psi}^{i}(P_1,S_1) \Psi^{j}(P_2,S_2) \bar{\Psi}^{k}(P_3,S_3) \Psi^{l}(P_4,S_4) \rangle\vert_{\mathcal{O}(1/N)} \nonumber \\ &~~~~~~~~~~~~~~~~~~~= \frac{\delta^{ij}\delta^{kl}S_{12|34}\, g_{12|34} - \delta^{il}\delta^{jk}S_{14|32}\, g_{14|32}}{N}~,
\end{align}
where
\begin{align}
S_{12|34}\, g_{12|34} & = \int d X_1 dX_2 \langle\delta\sigma (X_1)\delta\sigma(X_2)\rangle
(\partial_{S_{1b}} \partial_{\bar{S}_{2b}}) \bar{K}^F_{\Delta}(X_1, S_{1b},P_1, \bar S_{1\partial}) K^F_{\Delta}(X_1,\bar S_{2b},P_2,S_{2\partial})
\nn\\
& \times(\partial_{S_{3b}} \partial_{\bar{S}_{4b}}) \bar{K}^F_{\Delta}(X_2, S_{3b},P_3, \bar S_{3\partial})K^F_{\Delta}(X_2,\bar S_{4b},P_4,S_{4\partial})~,\label{eq:1ovNfer}
\end{align}
and similarly for $g_{14|32}$. The derivatives w.r.t. the bulk polarizations implement the contraction of the indices at the interaction vertex. We can evaluate the expression in \eqref{eq:1ovNfer} without much effort by relating it to the calculation that we already did for the scalar. This is possible because, after the bulk spinor indices are appropriately contracted, the product of two fermionic bulk-to-boundary operators becomes proportional to the product of two scalar ones with shifted dimension, namely\footnote{The relation between an AdS scalar exchange with external fermions and a scalar exchange with external scalars, upon shifting the dimensions by $\frac12$, was noted already in \cite{Kawano:1999au}, see eq. (2.18) therein, and more recently in \cite{Faller:2017hyt}. The fact that here we are exchanging a ``composite" scalar, rather than an elementary one, does not affect the argument.}
\begin{align}
& (\partial_{S_{1b}} \partial_{\bar{S}_{2b}}) \bar{K}^F_{\Delta}(X_1, S_{1b},P_1, \bar S_{1\partial}) K^F_{\Delta}(X_1,\bar S_{2b},P_2,S_{2\partial}) \nonumber \\ & =(2\Delta+1-d) (\bar S_{1\partial} \Pi_- S_{2\partial}) K_{\Delta+\frac 12}(X_1,P_1) K_{\Delta+\frac 12}(X_1,P_2)~.
\end{align}
The $\Delta$-dependent prefactor comes from the different normalization of the scalar and fermionic propagators. Using this identity and the scalar result in eq. \eqref{eq:g1234explicit} we obtain\footnote{The scalar result is also multiplied by an additional combinatorial factor of $4$.}
\begin{align}
g_{12|34} & = -\frac{1}{(P_{12})^{\Delta+\frac12}(P_{34})^{\Delta+\frac12}} \nonumber\\ & \times\int\frac{d \nu}{2\pi}\frac{1}{g^{-1}- \tilde{B}_F (\nu)}\frac{\Gamma_{\Delta+\frac{1}{2}-\frac{d+2i\nu}{4}}^2\Gamma_{\Delta+\frac{1}{2}-\frac{d-2i\nu}{4}}^2\Gamma_{\frac{d+2i\nu}{4}}^4}{4\pi^{\frac d2} \Gamma^2_{\Delta +\frac 12}\Gamma^2_{\frac 12 -\frac{d}{2} +\Delta}\Gamma_{i \nu}\Gamma_{\frac d2 + i \nu}}\mathcal{K}_{\frac{d}{2}+i\nu}(z,\bar{z})~,\label{eq:fermg1234}
\end{align}
in terms of the cross-ratios $z,\bar{z}$, where $\mathcal{K}$ as above denotes the scalar conformal block.
Next, we project the four-point function to the $U(N)$ singlet sector in the s-channel (namely $12\to 34$ channel). This is achieved by contracting the correlator with the tensor $\frac{\delta_{ij}\delta_{kl} }{N^2} $. The result reads
\begin{align}
&\frac{1}{N^2}\langle \bar{\Psi}^{i}(P_1,S_1) \Psi^{j}(P_2,S_2) \bar{\Psi}^{k}(P_3,S_3) \Psi^{l}(P_4,S_4) \rangle =
\nn\\
& \frac{ S_{12|34}}{(P_{12})^{\Delta+\frac{1}{2}}(P_{34})^{\Delta+\frac{1}{2}}}+\frac{1}{N} \Big[- \frac{S_{14|32}}{ (P_{14})^{\Delta+\frac{1}{2}}(P_{23})^{\Delta+\frac{1}{2}}}+ S_{12|34}\,g_{12|34} \Big] +\mathcal{O}(\frac{1}{N^2})\,.\label{eq:fourpointferm}
\end{align}
Just like we saw in the scalar calculation, the projection mixes different orders in the large-$N$ expansion. At $\mathcal{O}(1/N)$ after the projection we get contribution from the u-channel of mean-field theory, and the s-channel of the $\sigma$-exchange diagram. Note that, besides the poles coming from the propagator of $\sigma$, the exchange diagram $g_{12|34}$ has spurious poles in the lower-half plane at
\begin{equation}
\frac{d}{2}+ i\nu = 2\Delta+2n+1~,~~n\in\mathbb{N}_{\geq0}
\end{equation}
with residue proportional to the structure $S_{12|34}$. By analogy with the scalar case, we expect these poles to cancel with scalar double-trace operators (products of two fermionic operators), of scaling dimension $2\Delta+2n+1$, arising from the u-channel of mean field theory
\begin{align}
\label{eq:GFFOPEfermion}
\frac{S_{14|32}}{(P_{14})^{\Delta+\frac{1}{2}}(P_{23})^{\Delta+\frac{1}{2}}}= \frac{S_{12|34} }{(P_{12})^{\Delta+\frac{1}{2}}(P_{34})^{\Delta+\frac{1}{2}}} \sum_{n}c_n^2 \,\mathcal{K}_{2\Delta+2n+1,l=0}(z, \bar z)+\dots~,
\end{align}
where $\dots$ denote other double-trace contributions that we will not need to consider. In the appendix \ref{app:fermgff} we compute the coefficients $c_n^2$ in $d=2$ and $d=1$.
Requiring the cancelation between the spurious poles in the lower-half plane and the double-trace operators we obtain the relation
\begin{align}
\frac{1}{g^{-1}-\tilde{B}_F(\nu)} & \frac{ \Gamma_{\Delta+\frac{1}{2}-\frac{d+2i\nu}{4}}^2\Gamma_{\Delta+\frac{1}{2}-\frac{d-2i\nu}{4}}^2\Gamma_{\frac{d+2i\nu}{4}}^4}{4 \pi^{\frac{d}{2}}\Gamma_{\Delta+\frac{1}{2}}^2\Gamma_{\frac{1}{2}-\frac{d}{2}+\Delta}^2\Gamma_{i\nu}\Gamma_{\frac{d}{2}+i\nu}} \nonumber\\& ~~~~~\overset{\frac{d}{2}+i\nu \sim 2\Delta+2n+1}{\sim} ~-\frac{c_n^2}{\frac{d}{2}+i\nu -(2\Delta+2n+1)}\,.
\end{align}
To compensate for the double-pole in the numerator of the l.h.s., the function $\tilde{B}_F(\nu)$ must have simple poles with appropriate residues at these points. Up to a possible constant shift, we can then identify the function $ \tilde{B}_F(\nu)$ with the resulting sum over poles. In the case of the scalar bubble diagram in $d+1<4$, we were then able to fix the constant term, by imposing that the diagram should vanish as a power-law in the large-$\nu$ limit, as prescribed by the flat-space limit. For the fermionic bubble in $d+1\geq 2$, on the other hand, we have two important differences, related to each other: first, the same diagram in flat space {\it grows} with momentum at large momentum, hence we cannot use this constraint to fix the constant shift; secondly, the resulting sum over poles is divergent (we explicitly checked this for the cases in which we computed the coefficients $c_n^2$, namely $d=1,2$). The latter is an expected UV divergence, that of course is also present for the flat-space fermionic bubble. Therefore, we can write
\begin{align}
\label{eq:polfermion}
&(\tilde{B}_F(\nu))_{\rm reg} = \frac{1}{\pi^{\frac{d}{2}}\Gamma_{\Delta+\frac{1}{2}}^2\Gamma_{\Delta-\frac{d-1}{2}}^2 } \\
&\times \left[\sum_{n=0}^\infty \frac{ \Gamma_{2\Delta+n+1-\frac{d}{2}}^2\Gamma_{\Delta+n+\frac12}^4}{ (n!)^2\Gamma_{2\Delta+2n+1-\frac{d}{2}}\Gamma_{2\Delta+2n+1}} \frac{1}{c_n^2} \frac{2(\frac{d}{2}-2\Delta-2n-1)}{(\frac{d}{2} - 2\Delta-2n-1))^2+\n^2}\right]_{\rm reg}+ C(\Delta,d)_{\rm reg} ~.\nonumber
\end{align}
Here the square bracket denotes that the sum can only be defined with some regulator, and we introduced a constant shift $C(\Delta,d)_{\rm reg}$, that depends on the choice of the regulator and cannot be fixed by the knowledge of the poles. Note that one would find an analogous ambiguity for the scalar bubble in $d+1\geq4$.
While for the application to the GN model we will actually need to fix the constant shift, if one is only interested in the $\nu$ dependence of the diagram this ambiguity is not relevant, as we will now show. We will consider the specific cases $d=2$ and $=1$ (i.e. AdS$_3$ and AdS$_2$) for which we computed the OPE coefficients $c_n^2$ in appendix \ref{app:fermgff}.
In $d=2$, plugging eq.~\eqref{eq:ope2} in eq.~\eqref{eq:polfermion} we obtain the following sum, e.g. with a hard cutoff regulator
\begin{align}
(\tilde{B}_F(\nu))_{d=2,\text{cutoff}} & = -\frac{2}{\pi } \sum_{n=0}^{n_{\text{max}}} \frac{(n+1)(2\Delta+n-1)}{( 4(n+\Delta)^2+ \n^2 ) } \nonumber\\
& = -\frac{i( 4(\Delta-1)^2+\n^2 )}{8\pi \n } \Big( \psi(\Delta+\frac{i\n}{2})- \psi(\Delta-\frac{i\n}{2}) \Big) - \frac{n_{\text{max}}}{2\pi} \label{eq:fermBcut} \\
&~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + C(\Delta,d=2)_{\rm cutoff}+\mathcal{O}(1/n_{\text{max}})~.\nonumber
\end{align}
We can also regularize it with a ``naive'' Pauli-Villars regulator, i.e. subtracting the same diagram as if we added to the theory a bosonic Dirac field with large mass $M_{PV} >0$
\begin{align}
(\tilde{B}_F(\nu))_{d=2,\text{PV}} & = -\frac{2}{\pi } \sum_{n=0}^\infty \Big[ \frac{(n+1)(2\Delta+n-1)}{( 4(n+\Delta)^2+ \n^2 ) } - \frac{(n+1)(2(1 + M_{PV})+n-1)}{( 4(n+1 + M_{PV})^2+ \n^2 ) } \Big] \nonumber\\
& = -\frac{i( 4(\Delta-1)^2+\n^2 )}{8\pi \n } \Big( \psi(\Delta+\frac{i\n}{2})- \psi(\Delta-\frac{i\n}{2}) \Big) - \frac{M_{PV}}{2\pi} \label{eq:fermBPVregu} \\
&~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + C(\Delta,d=2)_{\rm PV}+\mathcal{O}(1/M_{PV})~.\nonumber
\end{align}
A third possible regularization consists in subtracting the same summand at $\n=0$
\begin{eqnarray}
(\tilde{B}_F(\nu))_{d=2,\text{sub}}= -\frac{2}{\pi } \sum_{n=0}^\infty \Big[ \frac{(n+1)(2\Delta+n-1)}{( 4(n+\Delta)^2+ \n^2 ) } - \frac{(n+1)(2\Delta+n-1)}{( 4(n+\Delta)^2 ) } \Big]
\nn\\
= -\frac{i ( 4(\Delta-1)^2+\n^2 )}{8\pi \n } \Big( \psi(\Delta+\frac{i\n}{2})- \psi(\Delta-\frac{i\n}{2}) \Big)+C(\Delta,d=2)_{\rm sub}~.\label{eq:fermBsub}
\end{eqnarray}
As we would expect on physical grounds, comparing the different regularizations we see that the result contains a universal $\nu$-dependent part, plus a constant scheme-dependent shift.
Similarly, we can compute the fermionic bubble diagram in $d=1$. In this case we were only able to perform the sum using a subtraction at $\nu=0$ as UV regulator. Plugging eq.~\eqref{eq:ope1} in eq.~\eqref{eq:polfermion}, and subtracting the $\n=0$ term, we obtain a sum that can be written in terms of a hypergeometric function as follows
\begin{align}\label{eq:finalBnufermion}
&\left(\tilde{B}_F(\nu)\right)_{\rm sub}= \mathcal{B}_{\text{AdS}_2} (\Delta,\nu)+C(\Delta, d=1)_{\rm sub}~,
\end{align}
with
\begin{align}
& \mathcal{B}_{\text{AdS}_2} (\Delta,\nu)=\frac{i\nu\Gamma_{\frac{1}{4}+\Delta}\Gamma_{\frac{1}{2}+\Delta}^2\Gamma_{\frac{1}{2}+2\Delta}}{8\sqrt{\pi}}\times\\
&\left(\Gamma_{\Delta+\frac{1}{4}+\frac{i\nu}{2}}\,{}_6\tilde{F}_{5}\left[\begin{array}{c}\{\frac{3}{2},\frac{1}{4}+\Delta,\frac{1}{2}+\Delta,\frac{1}{2}+\Delta,\frac{1}{2}+2\Delta,\frac{1}{4}+\Delta+\frac{i\nu}{2}\}\\\{2\Delta,1+\Delta,1+\Delta,\frac{5}{4}+\Delta,\frac{5}{4}+\Delta+\frac{i\nu}{2}\}\end{array};1\right]-(\nu \leftrightarrow -\nu)\right)~,\nn
\end{align}
where ${}_6\tilde{F}_{5}$ is the regularized generalized hypergeometric function.
Let us emphasize again that all these results are subject to a constant shift $C(\Delta,d)$ since we only determined the bubble function by specifying the locations and the residues of the poles. To unambiguously determine the shift, one needs to resort to a different physical input, which we will provide in the next subsection.
\subsection{Parity-Preserving Pauli-Villars Regularization}
Using the results in the previous subsection, one can in principle compute the two-point function of $\sigma$ as
\begin{equation}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu) = -\frac{1}{g^{-1} - \tilde{B}_F(\nu)}~,
\end{equation}
where $g^{-1}$ is determined by the gap equation \eqref{eq:gap} as\footnote{Recall that $\Sigma=M=\Delta-\frac d2$.}
\begin{align}\label{eq:gapusedtodetermineg}
Mg^{-1}=-{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M}\right]\,,
\end{align}
while $\tilde{B}_F(\nu)$ is the bubble function given in \eqref{eq:polfermion}. However, both of these quantities are separately divergent and have to be regulated. Furthermore, the bubble function is subject to a constant shift $C(\Delta,d)$ as we discussed in the previous subsection. Therefore, to proceed, we need to
\begin{enumerate}
\item Choose a regularization scheme which regularizes both the gap equation and $\tilde{B}_F$ at the same time.
\item Use some additional input to determine the constant shift $C(\Delta,d)$.
\end{enumerate}
After carrying out these procedures, the difference of $g^{-1}$ and $\tilde{B}_F$ is expected to become finite and independent of the regularization scheme although each of them is separately scheme-dependent.
\paragraph{Regularization} Let us first discuss the regularization scheme. As shown in the previous subsection, there are multiple ways to regulate the bubble function $\tilde{B}_F$. However, most of the regularizations cannot be applied to the computation of the gap equation, with the exception being the ``naive'' Pauli-Villars regularization performed in \eqref{eq:fermBPVregu}. Applying the Pauli-Villars regularization, the gap equation \eqref{eq:gapusedtodetermineg} changes to
\begin{align}\label{eq:regulateddirac}
M(g^{-1})_{\rm reg}=-{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M}\right]+{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M_{\rm PV}}\right]\,.
\end{align}
The right hand side can be computed using the spectral representation of ${\rm tr}\left[\frac{1}{\gamma\cdot \nabla +M}\right]$ given in Appendix \ref{apsubsec:femionloop},
\begin{align}\label{eq:diracloop}
\begin{aligned}
&{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M}\right]=\\
&c_{d+1}\int_{-\infty}^{\infty} d\nu \left[\frac{1}{\nu^2+(M-\tfrac{1}{2})^2}-\frac{1}{\nu^2+(M+\tfrac{1}{2})^2}\right]\frac{\Gamma(\tfrac{d}{2})\Gamma(1+\tfrac{d}{2}\pm i\nu)}{4 \pi^{\frac{d}{2}+1}(d+1)\Gamma(d)\Gamma(\pm i\nu)}\,,
\end{aligned}
\end{align}
where $c_{d+1}$ is the number of components of Dirac spinor in $d+1$ dimensions (e.g.~$c_3=2$). Now, evaluating the right hand side of \eqref{eq:regulateddirac} using \eqref{eq:diracloop}, we find that the divergence is {\it not} regulated, contrary to our naive expectation. The same problem shows up also in the analysis in flat space, and it is essentially because the naive Pauli-Villars regularization does not preserve the discrete symmetry of the original Lagrangian which flips the sign of the mass term. The resolution to this problem in flat space is explained in Appendix \ref{app:PV}: The idea is to add $N/2$ bosonic Dirac fields with mass $M_{\rm PV}$ and $N/2$ bosonic Dirac fields with mass $-M_{\rm PV}$ instead of adding $N$ bosonic Dirac fields with mass $M_{\rm PV}$. When the $\sigma $ field takes a non-zero expectation value, these masses are shifted to $M_{+}=M+M_{\rm PV}$ and $M_{-}=M-M_{\rm PV}$. With this new parity-preserving Pauli-Villars regularization, the gap equation reads
\begin{align}
\begin{aligned}
&M(g^{-1})_{\rm reg}=-{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M}\right]+\frac{1}{2}\left({\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M_{+}}\right]+{\rm tr}\left[\frac{1}{\gamma\cdot \nabla + M_{-}}\right]\right)\,.
\end{aligned}
\end{align}
Using the spectral representation \eqref{eq:diracloop}, we can check that the right hand side is now convergent. The result for $d=2$ (AdS$_3$) reads
\begin{align}\label{eq:Mginverse1}
M(g^{-1})_{{\rm PV}, \,d=2}= \frac{2M^2-M_{+}^2+M_{-}^{2} -\tfrac{1}{2}}{4\pi}=\frac{M^2}{2\pi}-\frac{M M_{\rm PV}}{\pi}-\frac{1}{8\pi}\,,
\end{align}
while the result for $d=1$ (AdS$_2$) reads
\begin{align}\label{eq:Mginverse2}
\begin{aligned}
M(g^{-1})_{{\rm PV}, \,d=1}&=\frac{2M\psi(M)-M_{+}\psi (M_{+})-M_{-}\psi(-M_{-})+1}{2\pi}\\
&=\frac{2M \psi (M)-2M\log M_{\rm PV}-2M+1}{2\pi }+\mathcal{O}(1/M_{\rm PV})\,.
\end{aligned}
\end{align}
For the bubble function $\tilde{B}_{F}$, this regularization gives the same result as the naive Pauli-Villars regularization \eqref{eq:fermBPVregu} and therefore removes the divergence as expected.
\paragraph{Determination of the Constant Shift} Having identified the correct regularization scheme, the next task is to determine the constant shift of the bubble function. For this purpose, let us consider a free fermion\footnote{To emphasize that we are not considering the Gross-Neveu model, here we use a different notation for the fermions.} $\psi$ with mass $M$ and analyze the connected two-point function of a composite operator $\bar{\psi}\psi$. Since this two-point function is precisely given by the bubble diagram, it can be expressed in terms of the bubble function as\footnote{The minus sign comes from our convention of the bubble function.}
\begin{align}
\langle\bar{\psi}\psi (x)\,\,\bar{\psi}\psi (y) \rangle_{\rm connected}=-B_{F}(x,y)\,.
\end{align}
On the other hand, the one-point function of the same operator is given by a trace of the propagator. Therefore we have the relation
\begin{align}
\langle \bar{\psi}\psi (x)\rangle=-{\rm tr}\left[\frac{1}{\gamma\cdot \nabla +M}\right]\,.
\end{align}
Now comes the crucial observation: In free-fermion theory, these two quantities are related by differentiation with respect to mass $M$; more precisely we have the relation\footnote{This can be seen explicitly in the path integral formalism in which the one-point function is given by
\begin{align}
\langle \bar{\psi}\psi(x)\rangle =\frac{1}{Z}\int \mathcal{D}\psi\,\,\bar{\psi}\psi(x)\,\,e^{-\int d^{d+1}y \sqrt{g}\left(\bar{\psi}\gamma\cdot \nabla\psi +M\bar{\psi}\psi\right)}\,,
\end{align}
The differentiation with respect to the mass brings down an extra $-\bar{\psi}\psi$ from the action. In addition, it acts on the partition function $Z$ producing a disconnected correlator. In total, we obtain the integrated connected correlator.}:
\begin{align}
\begin{aligned}
\frac{\partial}{\partial M}\langle \bar{\psi}\psi (x)\rangle =&-\int d^{d+1}y \sqrt{g(y)}\langle\bar{\psi}\psi (x)\,\,\bar{\psi}\psi (y) \rangle_{\rm connected}\,.
\end{aligned}
\end{align}
Translating this relation to the bubble function and using the spectral representation of the integrated correlator \eqref{eq:niceFOO}, we obtain
\begin{align}
-\frac{\partial}{\partial M}{\rm tr}\left[\frac{1}{\gamma\cdot \nabla +M}\right] =\int d^{d+1} \sqrt{g (y)}B_F(x,y)=\tilde{B}_F(\tfrac{id}{2})\,.
\end{align}
Of course, both sides of the relation are divergent and must be regulated using the regularization that we just explained. Once regulated, the relation allows us to determine the value of $\tilde{B}_F (\nu)$ evaluated at $\nu=id/2$, hence the constant shift, from ${\rm tr}\left[\frac{1}{\gamma\cdot \nabla +M}\right]$ which we already computed in \eqref{eq:Mginverse1} and \eqref{eq:Mginverse2}.
Carrying out these procedures, we get for $d=2$ (AdS$_3$),
\begin{align}\label{eq:BFPVregd2}
\begin{aligned}
(\tilde{B}_F(\nu))_{{\rm PV},\,d=2}=&\frac{-i(4M^2+\nu^2)}{8\pi \nu}\left(\psi (M+1+\tfrac{i\nu}{2})-\psi (M+1-\tfrac{i\nu}{2})\right)\\
&+\frac{1+2M-4M_{\rm PV}}{4\pi}\,,
\end{aligned}
\end{align}
while for $d=1$ (AdS$_2$), we get
\begin{align}\label{eq:BFPVregd1}
\begin{aligned}
(\tilde{B}_F(\nu))_{{\rm PV},\,d=1}=&\mathcal{B}_{\text{AdS}_2} (M+\tfrac{1}{2},\nu)-\mathcal{B}_{\text{AdS}_2} (M+\tfrac{1}{2},\tfrac{i}{2})\\
&+\frac{M\psi^{(1)}(M)+\psi (M)-1-\log M_{\rm PV}}{\pi}\,,
\end{aligned}
\end{align}
where $\psi^{(1)}(x)\equiv d\psi (x)/dx$.
\paragraph{Two-point function of $\sigma$} We are now in the position to compute the two-point function of $\sigma$,
\begin{align}
(F_{\delta\sigma\delta\sigma}(\nu))^{-1}=-(g^{-1})_{\rm PV}+(\tilde{B}_F(\nu))_{\rm PV}\,.
\end{align}
For $d=2$ (AdS$_3$), the difference of \eqref{eq:Mginverse1} and \eqref{eq:BFPVregd2} gives
\begin{align}
(F_{\delta\sigma\delta\sigma}(\nu))^{-1}=\frac{-i(4(\Delta-1)^2+\nu^2)}{8\pi \nu}\left(\psi(\Delta+\tfrac{i\nu}{2})-\psi(\Delta-\tfrac{i\nu}{2})\right)+\frac{2\Delta-1}{8\pi (\Delta-1)}\,.
\end{align}
For $d=1$ (AdS$_2$), the difference of \eqref{eq:Mginverse2} and \eqref{eq:BFPVregd1} gives
\begin{align}
(F_{\delta\sigma\delta\sigma}(\nu))^{-1}=\mathcal{B}_{\text{AdS}_2} (\Delta,\nu)-\mathcal{B}_{\text{AdS}_2} (\Delta,\tfrac{i}{2})+\frac{(\Delta-\tfrac{1}{2})\psi^{(1)}(\Delta-\tfrac{1}{2})}{\pi}-\frac{1}{\pi (2\Delta-1)}\,.
\end{align}
\subsection{Bound State in AdS}
We will now study the physical properties of the correlators obtained above. We will restrict ourselves to $d=2$, i.e. AdS$_3$.
The result of the calculation in the previous subsections is that the connected two-point function of $\sigma$ in the massive phase is free of any divergence or ambiguity and takes the following form
\begin{equation}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu) = \frac{1}{-\frac{i( 4(\Delta-1)^2+\n^2 )}{8\pi\n } \Big( \psi(\Delta+\frac{i\n}{2})- \psi(\Delta-\frac{i\n}{2}) \Big) +\frac{2\Delta-1}{8\pi(\Delta -1)}} ~.\label{eq:twopointGN}\\
\end{equation}
The resulting spectrum of boundary operators is illustrated in \ref{fig:boundstate}. For the ease of comparison with fig. \ref{fig:readingoffdimensions} that describes the $O(N)$ model, we plotted the $(-)$inverse of the function $\tilde{F}_{\delta\sigma\delta\sigma}(\nu)$.
We see the by-now-familiar sequence of poles originating from the double-trace operators of the free-theory, whose associated anomalous dimensions are positive and of $\mathcal{O}(1)$ for $\Delta\sim\mathcal{O}(1)$ (as in fig. \ref{fig:boundstate} which has $\Delta =2$), and become small at large $\Delta$. Notably, besides the sequence of double-trace poles, there is an additional pole with associated dimension $\Delta_{b} < 2\Delta + 1$, whose origin can be traced to the factor $( 4(\Delta-1)^2+\n^2 )$ in the denominator of \eqref{eq:twopointGN}. Comparing with the analogous plot for the massive $O(N)$ model in fig. \ref{fig:readingoffdimensions}, we see that the main difference is that in the GN case the central, u-shaped part of the curve intersects the axis, giving rise to this additional pole. We interpret the additional state as a bound state of two fermions in AdS, that only exists at finite coupling. This state manifests itself in the boundary conformal theory as a scalar operator in the spectrum.
We will now verify that in the flat-space limit the additional state indeed approaches the bound-state that is known to exist in the GN model in $\mathbb{R}^3$ \cite{ZinnJustin:1991yn, Rosenstein:1990nm}.
\begin{figure}[t]
\centering
\includegraphics[clip,height=7cm]{fermion.pdf}
\caption{The (-)inverse of the connected two-point function of $\sigma$ in the massive GN model in $d=2$ (i.e. AdS$_3$), for $\Delta = 2$. Since we used the gap equation to determine $g^{-1}$, there is no additional tunable parameter. Besides the zeroes associated to double-trace operators (whose tree-level dimension is denoted with vertical dashed gray lines), there is an additional zero highlighted by a dot. The associated operator corresponds to a bound state of two fermions in AdS that does not exist in the free theory. \label{fig:boundstate}}
\end{figure}
\paragraph{Flat-Space Limit}
Let us start by considering the fermionic bubble diagram with parity-preserving PV regulator, written in eq. \eqref{eq:BFPVregd2}. Taking the limit $L\to\infty$ with $\nu=|p|L$, $\Delta=ML$ and the rescaling $M_{PV}\to L M_{PV}$, we find perfect agreement with the flat-space result \eqref{eq:flatPVbubble} in the appendix.
Next, we can compare the fermionic trace that appears in the gap equation \eqref{eq:gap}. Again, starting from the AdS answer in eq. \eqref{eq:gap} and taking the limit as above, we find perfect agreement with the flat space result in eq. \eqref{eq:1pointMPV}.
It is nice to observe the even when considering correlation functions that depend on a UV regulator, as long as the regulator can be defined also for the theory on AdS, the dependence on the regulator in flat space can be precisely recovered from the AdS result. This is in agreement with the intuition that short distance effects are not affected by the curvature of the background.
The two checks above immediately imply that also the correlator of $\delta\sigma$ also has the correct behavior in flat-space limit
\begin{equation}
\lim_{L\to \infty}L^{3} \,\tilde{F}_{\delta\sigma\delta\sigma}(\nu = L |p|)=-\frac{1}{(g^{-1})_{PV} - (\tilde{B}_F^{\rm flat}(|p|))_{PV}} =\frac{1}{(p^2 + 4 M^2)\tilde{B}(p^2,M^2) }~,
\end{equation}
which has a bound-state pole at $-p^2 = M^2_b = 4 M^2$, i.e. the onset of the two-fermion threshold. It can be also easily verified that the scaling dimension $\Delta_b$ of the AdS bound state grows asymptotically like $2\Delta$ at large $\Delta$, matching with the mass of the bound state $M_b = 2 M$.
\subsection{Critical Point of the GN Model on AdS$_3$}\label{subsec:critGN}
In flat space $\mathbb{R}^3$, the RG flow of the massless (i.e. $\Sigma = M =0$) GN model can be followed towards the UV at large $N$. In the limit of $g\to\infty$ the correlation functions display scaling behavior, with dimensions that can be systematically computed in $1/N$ expansion, thus providing evidence for the existence of a UV fixed point. The latter fixed point is believed to exist also at finite $N$, and a UV completion has been proposed in \cite{ZinnJustin:1991yn} in terms of the so-called Gross-Neveu-Yukawa model (see \cite{Fei:2016sgs} for a recent perturbative study of this fixed point).
Using the approach of section \ref{sec:Critical} and the correlators computed in the previous subsection, we can now look for this UV conformal fixed point on AdS$_3$ background. Similarly to the $O(N)$ case, the question is for what values of the dimensionless parameters $g$ and $M$, measured in units of the AdS radius, there exists conformal symmetry in the bulk. The manifestations of this symmetry that we will look for are: $(i)$ the existence of an expansion in bulk conformal blocks for the two-point function of $\sigma$, and $(ii)$ the existence of a displacement operator in the spectrum of boundary operators. As we discussed in section \ref{sec:Critical}, the point $(i)$ has the shortcoming that it requires us to commute the spectral representation with the bulk OPE expansion, which is not a rigorous operation, but we saw that nevertheless it allowed us to detect the critical point in the $O(N)$ example.
In this subsection we will denote the boundary dimension of the $U(N)$-vector fermionic operators with $\hat{\Delta}$, and use $\Delta$ without hats for the scaling dimensions of bulk operators. The two-point function of $\sigma$ in eq. \eqref{eq:twopointGN} is only a function of $\hat{\Delta}$, because it was obtained using the gap equation to determine $g^{-1}$. Naively, inspired by flat space, one might think that the critical point in AdS$_3$ must be at the massless point $\Sigma = M = 0$, which corresponds to $\hat{\Delta} = 1$\footnote{Note that the connected two-point function of $\sigma$ in eq. \eqref{eq:twopointGN} actually vanishes at the massless point. However recall that it was derived under the assumption that $\hat{\Delta} >1$, with a strict inequality.}. On the other-hand, it is possible to have conformal boundary conditions for the GN model that actually break the symmetry under parity, meaning that we can have a BCFT with non-zero bulk one-point function of $\sigma$. This is analogous to the so-called {\it extraordinary transition} in the context of the 3d Ising model, in which the $\mathbb{Z}_2$ symmetry is broken by the boundary condition and the spin operator has a non-zero one-point function. We can then proceed using the two-point function of eq. \eqref{eq:twopointGN}, keeping in mind that we are looking for parity-breaking conformal boundary conditions.
Let us analyze what are the possible contribution to the bulk OPE expansion of the two-point function of $\sigma$ at the putative bulk conformal point. Similarly to the $O(N)$ model, at leading order at large $N$ $\sigma$ is a generalized free field of dimension $\Delta_\sigma = 1$. Hence in the leading-order bulk OPE there are double-trace operators, and the scalar ones ---that schematically are $\sigma\square^n \sigma$ and have scaling dimension $2 + 2n$--- can get a non-zero one-point function and therefore contribute to the two-point function. Indeed the two-point function of $\sigma$ at leading order $\sim \mathcal{O}(N)$ is the disconnected piece, due to the non zero one-point function $\langle \sigma\rangle = \sqrt{N}\Sigma$. This disconnected piece is reproduced by the leading order contribution of the double-trace operators in the OPE (which is of the same order because the OPE coefficients are of order $\sim\mathcal{O}(1)$ and the one-point functions are of order $\sim\mathcal{O}(N)$). Again in analogy with the $O(N)$ model, these leading-order terms drop in the spectral representation, because they are a series of positive integers powers of the cross-ratio.
Next, in the connected two-point function, i.e. the two-point function of $\delta\sigma$, that starts at $\sim\mathcal{O}(1)$, we have contributions in the bulk OPE from the identity operator, and from the $1/N$ corrections of the double-trace contributions. Note that, differently from the $O(N)$ case, in the GN model there can never be a contribution from $\sigma$ itself, because of the discrete symmetry that flips the sign of $\sigma$, enforcing the bulk OPE coefficient to vanish $C_{\sigma\sigma\sigma} = 0$ (for the $O(N)$ model this is only an accident that happens to be true for $d+1=3$, but there is no symmetry visible along the RG that enforces it). Another difference from the $O(N)$ model is that in this case we expect the double-trace operators $\sigma \square^n \sigma$ to receive an anomalous dimension at order $1/N$. Recall that in the spectral representation the contribution from the order $\sim\mathcal{O}(1/N)$ correction to the (OPE coefficient $\times$ one-point function) drops, because it has the same dependence on the cross-ratio as the leading-order contribution.
Summarizing the last two paragraphs, in the bulk OPE expansion of $F_{\delta\sigma\delta\sigma}$ at the conformal point we expect to find the power-law contribution from the bulk block of the identity operator, and a contribution from the $1/N$ expansion of the blocks of the double-trace operators. Under the assumptions that this OPE can be mapped to the large-$\nu$ limit of $\tilde{F}_{\delta\sigma\delta\sigma}$, in this limit we expect to have a contribution from the transform of the power-law block of the identity, and additional contributions from the spectral transform of derivatives of power-laws, associated to the anomalous dimensions of double-trace operators.
The first few terms in the expansion for large real $\nu$ of the two-point function are
\begin{align}
&\tilde{F}_{\delta\sigma\delta\sigma}(\nu) \underset{\nu \to \infty}{\sim} \frac{8}{\nu}\left[1 + \frac{4(\hat{\Delta} - \frac 12)(\hat{\Delta} - \frac 32)}{\pi(\hat{\Delta}-1)\nu} + \frac{1}{\nu^2}\left(\left(\frac{4(\hat{\Delta} - \frac 12)(\hat{\Delta} - \frac 32)}{\pi(\hat{\Delta}-1)}\right)^2 - 4 (\hat{\Delta} -1)^2\right)\right. \nonumber\\ &\left.+ \frac{1}{\nu^3}\left(\left(\frac{4(\hat{\Delta} - \frac 12)(\hat{\Delta} - \frac 32)}{\pi(\hat{\Delta}-1)}\right)^3 - \frac{64 (\hat{\Delta} -1)(\hat{\Delta} - \frac 12)(\hat{\Delta} - \frac 32)}{3\pi}\right) + \mathcal{O}\left(\frac{1}{\nu^4}\right)\right]~.\label{eq:sigmatwopointGN}
\end{align}
Comparing the leading power $\nu^{-1}$ to eq. \eqref{eq:nupowerexpand}, we find that the only compatible assignment of scaling dimension to $\sigma$ is $\Delta_\sigma = 1$, which is precisely the expected value at the UV fixed point of the GN model. Using this value of the dimension in eq. \eqref{eq:nupower}, we find that the spectral representation of the bulk block of the identity operator is
\begin{equation}
\tilde{F}_{\Delta = 1}(\nu) = 8\pi^2 \frac{\coth(\pi \nu)}{\nu} \underset{\nu \to \infty}{\sim} 8\pi^2\frac{1}{\nu}(1+\mathcal{O}(e^{-2\pi \nu}))~.
\end{equation}
The bulk blocks of the double-trace operators $\sigma\square^n\sigma$ in position space are a series of powers $\zeta^{n + k}$, with $k$ a non-negative integer labeling descendants. Hence in $\nu$ space the contributions from their anomalous dimensions are proportional to a derivative w.r.t. $\Delta$ of the transform of the power law $\zeta^{-\Delta}$, evaluated at $\Delta = -n-k$. Among all these contributions, the leading one at large $\nu$ comes from $n=k=0$ and is
\begin{equation}
\left(\frac{d}{d\Delta}\tilde{F}_{\Delta}(\nu)\right)\vert_{\Delta = 0} = -\frac{4\pi^2 \coth(\pi \nu)}{\nu+\nu^3}\underset{\nu \to \infty}{\sim} -4\pi^2\frac{1}{\nu^3}\left(1+\mathcal{O}\left(\frac{1}{\nu^2}\right)\right)~.
\end{equation}
Hence, in order for the two-point function to have a sensible bulk OPE, it should agree with the bulk block of the identity at large $\nu$ at least up to order $\nu^{-3}$. By inspection of eq. \eqref{eq:sigmatwopointGN} we see that this requires $\hat{\Delta} = \frac 32$ (the solution $\hat{\Delta} = \frac 12$ is outside the regime of the $+$ boundary condition, i.e. $\hat{\Delta} \geq 1$; it is tempting to speculate that $\hat{\Delta} = \frac 12$ is the solution in the case of $-$ boundary condition, however a more careful analysis is needed to verify this). Specifying to this value, the two-point function simplifies to
\begin{equation}
\tilde{F}_{\delta\sigma\delta\sigma}(\nu)\vert_{\hat{\Delta}=\frac 32} = \frac{8}{\nu} \coth(\tfrac{\pi\nu}{2})\frac{1}{1+\frac{1}{\nu^2}}~.\label{eq:sigmatwopointGNconf}
\end{equation}
The resulting two-point function deviates at large $\nu$ from the block of the identity due to the additional powers coming from the factor $\frac{1}{1+\frac{1}{\nu^2}}$. These power-law deviations precisely match the possible powers from the anomalous dimensions of double-trace operators. Moreover, the spectrum of boundary operators that contribute to this two-point function (i.e. the poles of $\tilde{F}_{\delta\sigma\delta\sigma}(-i(h-1))$ as a function of $h$) is
\begin{equation}
\hat{\Delta}_n =2n+3 \qquad n\in \mathbb{N}_{\geq 0}\,.
\end{equation}
In particular, we find a boundary scalar operator of dimension 3, that can be the displacement operator. We view these facts as a strong hint that the theory at $\hat{\Delta} = \frac 32$ and $g$ fixed by the gap equation has bulk conformal symmetry.
It is interesting to observe that for the $O(N)$ model we found the conformal value of the boundary scaling dimension to be $\hat{\Delta}=1$, which is the boundary scaling dimension for a free-fermion theory with either choice of boundary condition (the corresponding boundary operator is just the restriction of some components of the bulk fermionic field to the boundary), while for the GN model we found $\hat{\Delta} = \frac 32$, which is the boundary scaling dimension for a free-scalar theory with Dirichlet boundary condition (the corresponding boundary operator is the normal derivative of the bulk scalar field, restricted to the boundary; the second solution $\hat{\Delta} = \frac 12$ would correspond to a free scalar with Neumann boundary condition). This observation is likely to have some interpretation in the context of the bosonization duality that these theories enjoy in presence of additional interactions with bulk Chern-Simons gauge fields \cite{Aharony:2012nh}.
\subsection{GN BCFT Data from the AdS Correlators}
We can now straightforwardly repeat the analysis that we performed for the $O(N)$ case in subsection \ref{sec:ONBCFT} also for the GN model, and derive some of the data of the BCFT associated to the AdS critical point found in the previous subsection. To our knowledge, the conformal boundary conditions in the GN model was never studied before and our results provide the first predictions for its BCFT data.
We can directly read from the previous subsection that this BCFT contains $U(N)$-vector boundary fermionic operators with scaling dimension $\hat{\Delta} = \frac 32$, and that the bulk-to-boundary OPE of the operator $\sigma$ contains a family of scalar operators $\hat{\sigma}_n$ with scaling dimensions $3 + 2n$, with $n$ a non-negative integer, which includes the displacement operator.\footnote{Note that even though $\sigma$ is odd under parity, the boundary operators $\hat{\sigma}_n$ do not need to be, because the boundary condition that we are discussing breaks parity.} While the operators $\hat{\sigma}_n$ with $n>1$ are a continuation of the double-trace operators of the free-fermion theory, the displacement operator $\hat{\sigma}_0$ corresponds to the bound state in AdS. Through the AdS analysis we thus discover that the existence of this operator in the spectrum of the BCFT is connected to the existence of a bound state in the S-matrix of the gapped phase.
Looking at the residues of the two-point function \eqref{eq:sigmatwopointGNconf}, we obtain that the leading order bulk-to-boundary OPE coefficients of $\sigma$ with these operators are
\begin{equation}
b_{\sigma\hat{\sigma}_n}^2 = \frac{64(n+1)}{4n(n+2)+3}~.
\end{equation}
Plugging the two-point function of $\sigma$ in the four-point function of the $U(N)$-vector fermionic fields in \eqref{eq:fermg1234}-\eqref{eq:fourpointferm}, and looking at the residues of the integrand in $\nu$, we can also easily compute the boundary OPE coefficients between two $U(N)$-vector fields and a $\hat{\sigma}_n$ at first non-trivial order, i.e. $\mathcal{O}(1/N)$, finding
\begin{equation}
C_{\hat{\Psi}\hat{\Psi}\hat{\sigma}_n}^2 = \frac{1}{N}\frac{\pi (2n+1)(2n+3)\Gamma(\frac32+2n)^2}{2^{4n+4}(n!)^2}~.
\end{equation}
As we already remarked while analyzing the $O(N)$ BCFT, we stress that these data are not straightforward to obtain via a Feynman diagram calculation in flat-space. Hence it would be interesting to explore further whether the techniques used here can lead to an efficient calculation of additional data of the GN BCFT, perhaps including also subleading orders in $1/N$.
\section{Conclusion}\label{sec:conclusion}
In this paper we studied the dynamics of large-$N$ vector models on AdS background. We demonstrated that the large-$N$ techniques familiar from flat space can be efficiently imported to AdS, with the spectral representation being the main technical tool required. The solvability at large $N$ allowed us to explore the finite-coupling regime of field theories on AdS, revealing phenomena that are not visible in the usual perturbative regime. Moreover, we explicitly saw in these examples that one can obtain both the flat-space S-matrix and the correlators in BCFT from the AdS correlation functions.
In the context of the models considered in this paper, a clear direction for the future is to explore whether the simpler analytic structure of the correlators in AdS, as a function of $\nu$, allows to efficiently compute $1/N$ corrections. We saw a hint of how this could work in \ref{subsubsec:corrections}. Moreover, it would be desirable to clarify the validity of the large-$\nu$ analysis that we use to detect bulk conformality in sections \ref{subsec:critON} and \ref{subsec:critGN}, and to try to make this approach more rigorous.
More generally, it would be interesting to explore if techniques similar to the ones used for the vector models can be applied to other theories. The first possible generalization that naturally presents itself would amount to introducing Chern-Simons (CS) gauge-fields in AdS$_3$ coupled to the $O(N)$ (or $U(N)$) symmetry. These CS-matter theories are solvable at large $N$ in flat space, they enjoy a bosonization duality \cite{Aharony:2012nh}, and their S-matrix in the massive phase displays a non-standard crossing-symmetry property \cite{Jain:2014nza}. If the relevant diagrams can be resummed on AdS$_3$, it should be possible to see these properties of the S-matrix emerge from the boundary correlators,\footnote{The boundary correlators in AdS that map to the 2 to 2 amplitude of $O(N)$ vector particles are correlators of line operators in the bulk that end on charged operator insertions at the boundary.} and also study the conformal boundary conditions for the critical points of these theories, hopefully obtaining more checks of the duality on the way. This would require us to consider more general diagrams than the bubble diagrams encountered in this paper, e.g. ladder diagrams.
Another possibility is to consider theories on AdS$_n\times$S$^m$. When the bulk is conformal, this would describe a defect conformal field theory \cite{Billo:2016cpy} since AdS$_n\times$S$^m$ can be mapped to $\mathbb{R}^{n+m}$ with a codimension-$(m+1)$ defect via a Weyl transformation. It would in particular be interesting to consider an analogue of the twist defect \cite{Gaiotto:2013nva,Yamaguchi:2016pbj,Soderberg:2017oaa} and understand how the mass deformation in AdS allows us to interpolate such defect CFT correlators with the flat-space S-matrix.
We hope that the results of this paper convinced the reader that quantum field theory in AdS is a rich and interesting subject which connects various different physics. As the closing remark\footnote{This last remark is largely motivated by an inspiring talk given by Sasha Zamolodchikov in a bootstrap workshop in the Azores \cite{Zamolodchikov:2018}.}, let us suggest yet another potentially interesting direction which we did not explore in this paper. In conformal field theories, the operator product expansion is extremely powerful since we have a good understanding of its analytic property such as the radius of convergence \cite{Pappadopulo:2012jk,Hogervorst:2013sma}. By contrast, the general properties of the OPE in massive quantum field theories are poorly understood. One might naively think that the operator product expansion in such theories is at best asymptotic and has a zero radius convergence. However, in cases where one has analytical control of a theory, be it integrability or large $N$, one often finds much a better behavior \cite{Zamolodchikov:2018}. Addressing this question directly in flat space would be a hard problem, but one might be able to make some progress for theories in AdS since any correlation functions in AdS admit an alternative expansion, which is the {\it boundary} operator product expansion as we explained in section \ref{sec:Critical}, and their analytic properties are likely to be under better control than those of their flat-space counterparts. It would be interesting if the idea of placing QFTs in AdS will help solving such a foundational question.
\section*{Acknowledgements}
We thank Shira Chapman, Davide Gaiotto, Andrea Guerrieri, Daniel Kapec, Zohar Komargodski, Juan Maldacena, Marco Meineri, Matthijs Hogervorst, Marco Serone, Joao Penedones, Vladimir Rosenhaus and Pedro Vieira for useful discussions. DC and SK would like to thank Perimeter Institute for Theoretical Physics where part of this work was done. Research at Perimeter Institute is supported by the Government of Canada through Industry Canada and by the Province of Ontario through the Ministry of Research \& Innovation.The work of SK is supported by DOE grant number DE-SC0009988.
|
1,314,259,995,001 | arxiv | \section{Conclusion}
In this paper, we propose a knowledge-enhanced recommendation model in the hyperbolic space (Hyper-Know) for top-K recommendation. Hyper-Know learns the user and item embeddings as well as the knowledge graph representation in the Poincaré ball model to capture the hierarchical structure in the knowledge graph. In addition, we incorporate hyperbolic attention to select the most important neighboring entities of each item. To adaptively control the regularization effect, a bilevel optimization mechanism is proposed to generate a fine-grained regularization effect between recommendation and the knowledge graph. Experimental results on three real-world datasets clearly validate the performance advantages of our model over multiple state-of-the-art methods and demonstrate the effectiveness of each of the proposed constituent modules.
\section{Evaluation}
In this section, we first describe the experimental set-up. We then report the results of the conducted experiments and demonstrate the effectiveness of the proposed modules.
\subsection{Datasets}
The proposed model is evaluated on three real-world datasets from various domains with different sparsities: \textit{Amazon-book}, \textit{Last-FM} and \textit{Yelp2018}, which are fully adopted from~\cite{DBLP:conf/kdd/Wang00LC19}. The \textit{Amazon-book} dataset is adopted from the Amazon review dataset~\cite{DBLP:conf/www/HeM16} with the \textit{book} category, which covers a large amount of user-item interaction data, e.g., user ratings and reviews. The \textit{Last-FM} dataset is collected from \textit{last.fm} music website, where the tracks are viewed as the items. A subset of data from Jan. 2015 to Jun. 2015 is selected. The \textit{Yelp2018} dataset is adopted from the 2018 edition of the \textit{Yelp} challenge, where local businesses like restaurants and bars are viewed as the items.
All the above datasets follow the 10-core setting to ensure that each user and item have at least ten interactions. For Amazon-book and Last-FM, items are mapped into Freebase entities via title matching if there is a mapping available. For Yelp2018, the item knowledge from the local business information network (e.g., category, location, and attribute) is extracted as KG data. The data statistics after preprocessing are shown in Table \ref{tab:data_statistics}.
For fair comparison, these three datasets in our experiments are exactly the same as those used in~\cite{DBLP:conf/kdd/Wang00LC19}. For each dataset, 80\% of interaction data of each user is randomly selected to constitute the training set, and we treat the remaining 20\% as the test set. From the training set, 10\% of interactions are randomly selected as validation set to tune hyper-parameters. The experiments are executed five times and the average result is reported.
\begin{table}[ht]
\centering
\caption{\label{tab:data_statistics}The statistics of the datasets.}
\begin{tabular}{l|r|r|r}
\hline
& Amazon-book & Last-FM & Yelp2018 \\
\hline
\#Users & 70,679 & 23,566 & 45,919 \\
\#Items & 24,915 & 48,123 & 45,538 \\
\#Interactions & 847,733 & 3,034,796 & 1,185,068 \\
\hline
\#Entities & 88,572 & 58,266 & 90,961 \\
\#Relations & 39 & 9 & 42 \\
\#Triplets & 2,557,746 & 464,567 & 1,853,704 \\
\hline
\end{tabular}
\end{table}
\begin{table*}[ht]
\centering
\caption{\label{tab:performance_comparison}The performance comparison of all methods in terms of \textit{Recall@20} and \textit{NDCG@20}. The best performing method is boldfaced. The underlined number is the second best performing method. * indicates the statistical significance for $ p <= 0.01 $ compared to the best baseline method based on the paired t-test.}
\begin{tabular}{|c|c c| c c| c c c| c| c|}
\hline
& \textbf{FM} & \textbf{NFM} & \textbf{CKE} & \textbf{CFKG} & \textbf{RippleNet} & \textbf{GC-MC} & \textbf{KGAT} & \textbf{Hyper-Know} & \multicolumn{1}{l|}{\textbf{Improv.}} \\\hline
\multicolumn{10}{|c|}{Recall@20} \\
\hline
\textit{Amazon-book} & 0.1345 & 0.1366 & 0.1343 & 0.1142 & 0.1336 & 0.1316 & \underline{0.1489} & \textbf{0.1534}* & 3.23\% \\
\textit{Last-FM} & 0.0778 & 0.0829 & 0.0736 & 0.0723 & 0.0791 & 0.0818 & \underline{0.0870} & \textbf{0.0949}* & 9.08\% \\
\textit{Yelp2018} & 0.0627 & 0.0660 & 0.0657 & 0.0522 & 0.0664 & 0.0659 & \textbf{0.0712} & \underline{0.0683} & N/A\\
\hline
\multicolumn{10}{|c|}{NDCG@20} \\
\hline
\textit{Amazon-book} & 0.0886 & 0.0913 & 0.0885 & 0.0770 & 0.0910 & 0.0874 & \underline{0.1006} & \textbf{0.1075}* & 6.86\% \\
\textit{Last-FM} & 0.1181 & 0.1214 & 0.1184 & 0.1143 & 0.1238 & 0.1253 & \underline{0.1325} & \textbf{0.1533}* & 16.70\% \\
\textit{Yelp2018} & 0.0768 & 0.0810 & 0.0805 & 0.0644 & 0.0822 & 0.0790 & \underline{0.0867} & \textbf{0.0897}* & 3.46\% \\
\hline
\end{tabular}
\end{table*}
\subsection{Evaluation Metrics} \label{sec:metrics}
We evaluate all the methods in terms of \textit{Recall@K} and \textit{NDCG@K}. For each user, Recall@K (R@K) indicates what percentage of her rated items emerge in the top $ K $ recommended items. NDCG@K (N@K) is the normalized discounted cumulative gain at $ K $, which takes the position of correctly recommended items into account.
\subsection{Methods Studied}
To demonstrate the effectiveness of our model, we compare to the following recommendation methods:
\begin{itemize}
\item \textbf{FM}~\cite{DBLP:conf/icdm/Rendle10}, a classical factorization model, which incorporates the second-order feature interactions between input features.
\item \textbf{NFM}~\cite{DBLP:conf/sigir/0001C17}, a state-of-the-art factorization model, which subsumes FM under a neural network.
\item \textbf{CKE}~\cite{DBLP:conf/kdd/ZhangYLXM16}, a representative regularization-based method, which exploits semantic embeddings derived from TransR~\cite{DBLP:conf/aaai/LinLSLZ15} to enhance the matrix factorization.
\item \textbf{CFKG}~\cite{DBLP:journals/algorithms/AiACZ18}, a model that applies TransE~\cite{DBLP:conf/nips/BordesUGWY13} on the unified graph including users, items, entities, and relations, casting the recommendation task as the prediction of (u, Interact, i) triplets.
\item \textbf{RippleNet}~\cite{DBLP:conf/cikm/WangZWZLXG18}, a model that combines regularization- and path-based methods, which enrich user representations by adding those of items within paths rooted at each user.
\item \textbf{GC-MC}~\cite{DBLP:journals/corr/BergKW17}, a model designed to employ a graph convolutional network on graph-structured data. Here the model is applied on the user-item knowledge graph.
\item \textbf{KGAT}~\cite{DBLP:conf/kdd/Wang00LC19}, a state-of-the-art KG enhanced model, which employs a graph neural network and an attention mechanism to learn from high-order graph-structured data for recommendation.
\item \textbf{Hyper-Know}, the proposed model, which learns the knowledge-enhanced recommendation in the Poincaré ball and applies hyperbolic attention for distinguishing neighboring entities and bilevel optimization for adaptive regularization, respectively.
\end{itemize}
\subsection{Experiment Settings}
In the experiments, the latent dimension of all the models is set to 64. The parameters for all baseline methods are initialized as in the corresponding papers, and are then carefully tuned to achieve optimal performances. The learning rate is tuned amongst $ [0.0001, 0.0005, 0.001, 0.005, 0.01] $, and we search for the coefficient of L2 normalization over the range $ [0.0001 ,..., 0.1] $. To prevent overfitting, the dropout ratio is selected from the range $ [0.0, 0.1, ..., 0.9] $ for NFM, GM-MC, and KGAT. The dimension of attention network $ k $ is tested over the values $ [16, 32, 64] $. Regarding NFM, the number of MLP layers is set to $1$ with $64$ neurons according to the original paper.
For RippleNet, we set the number of hops and the memory size as $2$ and $8$, respectively. For KGAT, we set the depth as $3$ with hidden dimension $64$, $32$, and $16$, respectively. The network architectures of the above methods are configured to be the same as described in the original papers. For Hyper-Know, the curvature $ c $ is set to 1 and the batch size is set to $ 4096 $. The hyper-parameters are tuned on the validation set. Our experiments are conducted with PyTorch running on GPU machines (NVIDIA Tesla V100).
\subsection{Performance Comparison}
The performance comparison results are shown in Table \ref{tab:performance_comparison}.
\textbf{Observations about our model}.
First, the proposed model---Hyper-Know, achieves the best performance for most evaluation metrics on three datasets, which illustrates the superiority of our model. Second, Hyper-Know outperforms KGAT on the Amazon-book and Last-FM datasets. Although KGAT adopts the attention model to distinguish the entity importance in the knowledge graph, it may not effectively capture the hierarchical structure between entities, which can be well-modeled by learning the entity and relation embeddings in the hyperbolic space. One possible reason why Hyper-Know does not outperform KGAT for the Recall@20 metric on the Yelp2018 dataset is that most of the entities in this KG are linked according to whether they have the same attributes, such as \textit{HasTV}. Most of these attributes are very generic, which means that the KG provides information of limited value. As a result, much of the transfer that Hyper-Know performs from the KG to the recommendation part for the Yelp2018 dataset is likely to be noise.
Third, Hyper-Know achieves better performance than GC-MC and RippleNet. Although GC-MC and RippleNet can model high-order connectivities, they fail to identify the important entities that would make a difference in recommendation. On the other hand, Hyper-Know employs an attention model in the hyperbolic space to learn the neighborhood representation of an item and transfers the knowledge from the KG to the item representation via regularization.
Fourth, Hyper-Know obtains better results than CKE. One possible reason is that CKE adopts a fixed power of regularization during the whole training process. By contrast, Hyper-Know performs fine-grained regularization to regularize the item and its neighborhood.
Fifth, Hyper-Know outperforms FM and NFM. One reason may be that using a distance as the scoring function can capture more fine-grained user preference.
\textbf{Other observations}. First, KGAT outperforms GC-MC and RippleNet. KGAT is capable of exploring the high-order connectivity in an explicit way and applies a graph attention model to aggregate the neighbors in the user-item knowledge graph in a weighted manner. Second, FM and NFM achieve better performance than CFKG and CKE in most cases. One major reason is that FM and NFM capture the second-order connectivity between users and entities, whereas CFKG and CKE model connectivity on the granularity of triples, leaving high-order connectivity untouched. Third, RippleNet achieves better performance than FM. This may verify that incorporating two-hop neighboring items is of importance to enrich user representations. Fourth, NFM performs better than FM. One major reason is that NFM has stronger expressiveness, since the hidden layer allows NFM to capture the nonlinear and complex feature interactions between user, item, and entity embeddings.
\begin{table}[ht]
\centering
\caption{\label{tab:ablation_analysis}The ablation analysis. \textit{Att} denotes the attention model, \textit{Avg} denotes the embedding average operation, \textit{E} denotes the Euclidean space, and \textit{H} denotes the hyperbolic space.}
\begin{tabular}{ |l|c|c|c|c| }
\hline
\multirow{2}{*}{Architecture} & \multicolumn{2}{c|}{\textit{Amazon-book}} & \multicolumn{2}{c|}{\textit{Last-FM}} \bigstrut \\\cline{2-5}
& R@20 & N@20 & R@20 & N@20 \bigstrut \\
\hline
(1) BPR+E & 0.1017 & 0.0729 & 0.0604 & 0.1112 \\
(2) BPR+H & 0.1167 & 0.0833 & 0.0656 & 0.1191 \\
(3) BPR+Att+E & 0.1121 & 0.0812 & 0.0746 & 0.1319 \\
(4) BPR+Att+H & 0.1447 & 0.1025 & 0.0885 & 0.1453 \\
(5) BPR+Avg+H & 0.1250 & 0.0897 & 0.0775 & 0.1358 \\
(6) Hyper-Know & \textbf{0.1534} & \textbf{0.1075} & \textbf{0.0949} & \textbf{0.1533} \\
\hline
\end{tabular}
\end{table}
\subsection{Ablation Analysis} \label{sec:ablation}
To verify the effectiveness of the proposed model in the Poincaré ball, the hyperbolic attention model, and the adaptive regularization mechanism, we conduct an ablation study in Table~\ref{tab:ablation_analysis}. This demonstrates the contribution of each module to the Hyper-Know model. In (1), we use the Euclidean distance to measure the user preference optimized by the BPR loss. In (2), we apply the distance in the Poincaré ball to measure users' preferences and optimize using Eq.~\ref{eq:bpr_dist}. In (3), we integrate the TransE-style attention on the top of (1) in the Euclidean space. In (4), we add hyperbolic attention to (2). In (5), we replace the attention model in (4) with an average operation in the hyperbolic space. In (6), we present the overall Hyper-Know model to show the effectiveness of the adaptive regularization mechanism.
From the results shown in Table~\ref{tab:ablation_analysis}, we make the following observations. First, comparing (1) and (2), we can observe that measuring the user preference by calculating distance in the hyperbolic space achieves better performance than calculating distance in the Euclidean space. This confirms the results reported in~\cite{DBLP:conf/wsdm/TranT0CL20}. Second, from (2) and (3), we observe that incorporating the hyperbolic attention model significantly improves the model performance. Third, in (3) and (4), we compare the performance of the attention model in both the Euclidean and hyperbolic space. From the results, we can observe that the attention model achieves better results in the hyperbolic space than in the Euclidean space. Fourth, from (1), (2), (3) and (4), we can observe that equipping the recommendation model with the KG either in the Euclidean space or hyperbolic space can improve the recommendation performance. Fifth, from (4) and (5), we observe that by distinguishing the importance of each neighbour of an item through attention, we achieve considerable improvement compared to a simple average. Comparing (4) and (6), we can observe that the adaptive regularization can provide the fine-grained regularization power.
\begin{table}[ht]
\caption{\label{tab:training_time}Training time comparison.}
\begin{tabular}{|c|c|c|c|c|}
\hline
& CKE & CFKG & KGAT & Hyper-Know \\ \hline
Amazon-book & 55s & 22s & 457s & \textbf{15s} \\ \hline
Last-FM & 53s & 27s & 137s & \textbf{22s} \\ \hline
Yelp2018 & 63s & 37s & 352s & \textbf{20s} \\
\hline
\end{tabular}
\end{table}
\subsection{Training Efficiency}
In this section, we compare the training efficiency with other state-of-the-art KG-enhanced methods in terms of the training speed. We compare the time taken for one epoch of training.
From the results reported in~\cite{DBLP:conf/sigir/ChenZMLM20}, these compared methods take a similar number of epochs to converge as well as our proposed method.
RippleNet is not computation-efficient and takes much longer to train, we omit the comparison with RippleNet. All the experiments are conducted on a single GPU of an NVIDIA Tesla V100. All the compared methods are executed for 20 epochs and we report the average computation time, which is shown in Table~\ref{tab:training_time}. The training time comparison shows that Hyper-Know is more computationally efficient than other state-of-the-art methods, and the reason follows.
Compared to CKE, Hyper-Know has a smaller number of learnable parameters (8.3 million v.s. 11.4 million on the Last-FM dataset). Compared to KGAT and CFKG, Hyper-Know does not incorporate the users into the KG, which makes the scale of the KG much smaller.
\begin{figure}[ht!]
\centering
\begin{subfigure}[t]{0.25\textwidth}
\centering
\includegraphics[width=\linewidth]{pic/beta_amazon.pdf}
\caption{\label{fig:amazon_beta_var}$ \beta $ on Amazon-book}
\end{subfigure
\begin{subfigure}[t]{0.25\textwidth}
\centering
\includegraphics[width=\linewidth]{pic/beta_last-fm.pdf}
\caption{\label{fig:lastfm_beta_var}$ \beta $ on Last-FM}
\end{subfigure}
\caption{\label{fig:hyper_parameter}The variation of $ \beta $.}
\end{figure}
\subsection{Influence of Hyper-parameters}
The value of $ \beta $ that regularizing the item embedding with its neighborhood is an important hyper-parameter if not using the adaptive regularization mechanism. Its effect on the Amazon-book and Last-FM datasets is shown in Figure~\ref{fig:hyper_parameter}.
From the results in Figure~\ref{fig:hyper_parameter}, we observe that the value of $ \beta $ does affect the recommendation performance, with performance deteriorating by as much as 10 percent if a suboptimal value is chosen. Furthermore, there is no fixed value that achieves performance that is as good as the performance obtained by using the proposed adaptive mechanism. These results demonstrate that the fine-grained and adaptive regularization benefits the recommendation task, which confirms the results reported in~\cite{DBLP:conf/wsdm/Rendle12}.
\begin{figure}[ht!]
\centering
\begin{subfigure}[t]{0.2\textwidth}
\centering
\includegraphics[width=\linewidth]{pic/left.png}
\caption{\label{fig:1}Entity-48130}
\end{subfigure
\begin{subfigure}[t]{0.1\textwidth}
\end{subfigure}
\begin{subfigure}[t]{0.196\textwidth}
\centering
\includegraphics[width=\linewidth]{pic/right.png}
\caption{\label{fig:2}Entity-97468}
\end{subfigure}
\caption{\label{fig:case_study}The embedding visualization of selected entities.}
\end{figure}
\subsection{Embedding Visualization}
To verify whether the learned embedding in the Poincaré ball can capture the hierarchical structure in the knowledge graph, we train Hyper-Know in the 2D space on the Last-FM dataset and visualize the entities in the 2D hyperbolic space. We randomly select two nodes and their two-hop neighbors to visualize.
The visualization is shown in Figure~\ref{fig:case_study}. The biggest dot denotes the selected entity, the less biggest dot denotes the first-hop neighbor of the selected entity, and the smallest dot denotes the second-hop neighbor.
From Figure~\ref{fig:case_study}, we can observe that these three kinds of nodes may form the hierarchical patterns in Poincaré ball and the learned embeddings in the hyperbolic space can represent the hierarchical relationships.
\section{Introduction}
With the rapid growth of Internet services and mobile devices, personalized recommender systems play an increasingly important role in modern society. They can reduce information overload and help satisfy diverse service demands. Such systems bring significant benefits to at least two parties. They can: (i) help users easily discover products from millions of candidates, and (ii) create opportunities for product providers to increase revenue.
To provide a more accurate and interpretable recommendation service, knowledge graphs (KGs) are being incorporated into recommender systems. A KG is a heterogeneous graph, where nodes function as entities and edges represent relations between the entities. This is an effective data structure to model relational data, e.g., two movies directed by the same director. Several recent works have integrated KGs into the recommendation model, and the approaches can be divided into two branches: path-based~\cite{DBLP:conf/kdd/HuSZY18,DBLP:conf/cikm/WangZWZLXG18} and regularization-based~\cite{DBLP:conf/kdd/ZhangYLXM16,DBLP:conf/kdd/Wang00LC19}. Path-based methods extract paths from the KG that carry the high-order connectivity information and feed these paths into the predictive model. To handle the large number of paths between two nodes, researchers have either applied path selection algorithms to select prominent paths or defined meta-path patterns to constrain the paths. By contrast, regularization-based methods devise additional loss terms that capture the KG structure and use these to regularize the recommender model learning.
Although many effective models have been proposed, we argue that there are still several avenues for enhancing performance. First, previous works learn the KG representations in the Euclidean space. As has been observed in other application domains, this may not effectively capture the hierarchical structure that is known to exist within KGs~\cite{DBLP:conf/icml/SalaSGR18}. Second, methods like CKE~\cite{DBLP:conf/kdd/ZhangYLXM16}, CFKG~\cite{DBLP:journals/algorithms/AiACZ18}, and RippleNet~\cite{DBLP:conf/cikm/WangZWZLXG18} do not distinguish between neighboring entities, adjusting according to their relative importance and informativeness, when learning the representation of each entity.
This may lead to undesirable blurring of information from relations in the KG and an incomplete understanding of an entity. Third, all the regularization-based methods adopt a fixed hyper-parameter. We argue that the regularization degree should be adaptive, taking on different values for different entities according to the relevance and value of the information from the knowledge graph. Furthermore, different training phases may need different magnitudes of regularization power values, so the hyper-parameter values should evolve during training.
To tackle the aforementioned problems, we propose a knowledge-enhanced recommendation model in the hyperbolic space, namely \textit{Hyper-Know}, to tackle the top-K recommendation task. In particular, we map the entity and relation embeddings of the KG as well as user and item embeddings to the Poincaré ball model. This allows us to capture the hierarchical structure in the KG. We incorporate an attention model in the hyperbolic space, and use the Einstein midpoint for aggregation, in order to form a representation of the neighborhood of each item in the knowledge graph. We then use a regularization term to encourage the representation of an item to remain close to the representation of its neighborhood (in the hyperbolic space). This transfers the relational and structural information from the knowledge graph to the recommendation model.
To adaptively control the regularization effect, we model the learning of adaptive and fine-grained regularization factors as a bilevel (inner and outer) optimization problem~\cite{DBLP:journals/tec/SinhaMD18}. We build a proxy function to explicitly link the learning of the regularization related parameters with the outer objective function.
We extensively evaluate our model on three real-world datasets, comparing it with many state-of-the-art methods using a variety of performance validation metrics. The experimental results not only demonstrate the improvements of our model over other baselines but also show the effectiveness of the proposed components.
To summarize, the major contributions of this paper are:
\begin{itemize}[leftmargin=*]
\item To model the hierarchical structure of KG, we map the entity and relation embeddings of the KG into the Poincaré ball along with user and item embeddings. To the best of our knowledge, ours is the first work to consider knowledge-enhanced recommendation in the hyperbolic space.
\item To transfer the knowledge from the KG to the recommendation model, we incorporate hyperbolic attention and use the Einstein midpoint to aggregate the neighboring entities of an item to form a neighborhood representation.
\item To learn the adaptive regularization factors, we cast the learning process as a bilevel optimization problem and build a proxy function to explicitly update the regularization-related parameters.
\item Experiments on three real-world datasets show that Hyper-Know significantly outperforms the state-of-the-art methods for the top-K recommendation task.
\end{itemize}
\section{Preliminaries}
\subsection{Problem Formulation}
The knowledge-based recommendation considered in this paper takes as inputs the user implicit feedback and the item knowledge graph. The implicit feedback is represented by a number of user-item pairs $ \mathcal{D} = {(u, v)} \subseteq \mathcal{U} \times \mathcal{I} $, where $ \mathcal{U} $ is the user set and $ \mathcal{I} $ is the item set.
The item knowledge graph $ \mathcal{G} = {(h, r, t)} \subseteq \mathcal{E} \times \mathcal{R} \times \mathcal{E}$ can be formulated as a set of triples, each consisting of a relation $ r \in \mathcal{R} $ and two entities $ h,t \in \mathcal{E} $, referred to as the \textit{head} and \textit{tail} of the triple.
Then the top-$ K $ recommendation task in this paper is formulated as: given the training item set $ \mathcal{S}_{u} $ of user $ u $, and the non-empty test item set $ \mathcal{T}_{u} $ (requiring that $ \mathcal{S}_{u} \cup \mathcal{T}_{u} = \mathcal{D}_{u} $ and $ \mathcal{S}_{u} \cap \mathcal{T}_{u} = \emptyset $) of user $ i $, the model must recommend an ordered set of items $ \mathcal{X}_{u} $ such that $ |\mathcal{X}_{u}| \leq K $ and $ \mathcal{X}_{u} \cap \mathcal{S}_{u} = \emptyset $. Then the recommendation quality is evaluated by a matching score between $ \mathcal{T}_{u} $ and $ \mathcal{X}_{u} $, such as Recall@$K$.
\subsection{Hyperbolic Geometry of the Poincaré Ball}
The Poincaré ball model is one of five isometric models of hyperbolic geometry~\cite{cannon1997hyperbolic}, which is a non-Euclidean geometry with constant negative curvature. Formally, the Poincaré ball \(\left(\mathbb{B}_{c}^{d}, g^{\mathbb{B}}\right)\) of radius \(1 / \sqrt{c}, c>0\) is a \(d\) -dimensional manifold \(\mathbb{B}_{c}^{d}=\left\{\mathbf{x} \in \mathbb{R}^{d}: c\|\mathbf{x}\|^{2}<1\right\}\) equipped with the Riemannian metric \(g^{\mathbb{B}}\) which is conformal to the Euclidean metric \(g^{\mathbb{E}}=\mathbf{I}_{d}\) with the conformal factor \(\lambda_{\mathbf{x}}^{c}=2 /\left(1-c\|\mathbf{x}\|^{2}\right)\), i.e., \(g^{\mathbb{B}}=\left(\lambda_{\mathbf{x}}^{c}\right)^{2} g^{\mathbb{E}} .\) The distance between two points \(\mathbf{x}, \mathbf{y} \in \mathbb{B}_{c}^{d}\) is measured along a geodesic (i.e. a shortest path between the points) and is given by:
\begin{equation}
d_{\mathbb{B}}(\mathbf{x}, \mathbf{y})=\frac{2}{\sqrt{c}} \tanh ^{-1}\left(\sqrt{c}\left\|-\mathbf{x} \oplus_{c} \mathbf{y}\right\|\right) \,,
\label{eq:hyper_dist}
\end{equation}
where $ \|\cdot\| $ denotes the Euclidean norm and $ \oplus_{c} $ represents Möbius addition~\cite{DBLP:conf/nips/GaneaBH18}:
\begin{equation}
\mathbf{x} \oplus_{c} \mathbf{y}=\frac{\left(1+2 c\langle\mathbf{x}, \mathbf{y}\rangle+c\|\mathbf{y}\|^{2}\right) \mathbf{x}+\left(1-c\|\mathbf{x}\|^{2}\right) \mathbf{y}}{1+2 c\langle\mathbf{x}, \mathbf{y}\rangle+c^{2}\|\mathbf{x}\|^{2}\|\mathbf{y}\|^{2}} \,.
\label{eq:mobius_add}
\end{equation}
\input{new_pic/framework}
\section{Methodology}
In this section, we introduce the proposed model, \textit{Hyper-Know}, which integrates the knowledge graph with the recommendation task in the hyperbolic space. We first introduce the user preference learning in the hyperbolic space. Then we illustrate the hyperbolic attention mechanism that is used to distinguish items' neighboring entities in the knowledge graph. We next explain how to adaptively learn the recommendation objective and knowledge graph by a bilevel optimization formulation.
Lastly we introduce the training and prediction procedure of the proposed model.
\subsection{Learning User Preference}
User preference modeling lies at the core of recommender systems. Recently, distance metric learning has been widely applied to measure the user preference on items, yielding substantial performance gains~\cite{DBLP:conf/www/HsiehYCLBE17}. In this approach, the distance $ d_{\mathbb{B}}(\mathbf{u}, \mathbf{v}) $ between user $ u $ and item $ v $ is used to measure the user preference on a certain item. To learn the user preference, we apply the Bayesian Personalized Ranking loss~\cite{DBLP:conf/uai/RendleFGS09} to capture the pairwise preference of a user $u$ for an item $ v $ that the user has accessed compared to a randomly sampled item $ {v}' $:
\begin{equation}
\mathcal{L}_{R}(u, v, {v}'; \Theta) = - \mathrm{ln} \, \sigma \left(d_{\mathbb{B}}(\mathbf{u}, {\mathbf{v}}') - d_{\mathbb{B}}(\mathbf{u}, \mathbf{v}) \right) \,,
\label{eq:bpr_dist}
\end{equation}
where $ \mathbf{u} $, $ \mathbf{v} $, and $ {\mathbf{v}}' \in \mathbb{B}_{c}^{d} $, $ \sigma $ is the sigmoid function, and $ d $ is the dimension of the manifold. $\Theta$ represents the parameters of the recommender model.
\subsection{Regularizing Neighboring Entities}
Knowledge graphs (KGs), consisting of (head entity, relationship, tail entity) triples, are efficient data structures for representing factual knowledge and are widely used in applications such as question answering~\cite{DBLP:conf/aaai/ZhangDKSS18}. Recently, KGs have been applied in recommender systems to not only enhance the recommendation performance but also provide interpretable recommendation results.
To effectively exploit KGs in recommender systems, we treat them as relational inductive biases~\cite{DBLP:journals/corr/abs-1806-01261} between items. During the learning process, the relations in the KG can be used as regularizers; if two items link to a common entity or multiple common entities in the KG which suggests that a user might have similar preferences for the items. However, an item can link to multiple entities in the KG and the relative importance of different entities can differ greatly. Moreover, the entities can contribute in different ways to the description of the item. This motivates us to propose an attention mechanism in the Poincaré ball model.
Considering an item $ v $, we use $ \mathcal{N}_v = \{ (v, r, t)| (v, r, t) \in \mathcal{G} \} $ to denote the set of neighboring triples for which $ v $ is the head entity. Then we apply a TransE-style~\cite{DBLP:conf/nips/BordesUGWY13} scoring function ($ ||\mathbf{h} + \mathbf{r} - \mathbf{t}|| $) to calculate the matching score between an item and its neighboring entity in $ \mathcal{N}_v $:
\begin{equation}
\alpha(v, t) = \mathrm{exp}\big(-d_{\mathbb{B}}(\mathbf{v} \oplus_{c} \mathbf{r}, \mathbf{t}) \big) \,.
\end{equation}
The usual way to aggregate multiple attentions in the Euclidean space is weighted midpoint aggregation. The corresponding operation in the hyperbolic space is not immediately obvious, but fortunately, the extension to hyperbolic space does exist in the form of the \textit{Einstein midpoint}. It has a simple form in the Klein disk model~\cite{cannon1997hyperbolic}:
\begin{equation}
\label{eq:neighbourhood}
\mathbf{n}_v = f_{\mathbb{K} \rightarrow \mathbb{B}} \left( \sum_{t\in\mathcal{N}_v} \frac{\alpha(v,t) \gamma(\mathbf{t})}{\sum_{{t}'} \alpha(v, {t}') \gamma({\mathbf{t}}')} f_{\mathbb{B} \rightarrow \mathbb{K}} (\mathbf{t}) \right) \,,
\end{equation}
where the elements of $ \gamma(\mathbf{t}) = \frac{1}{\sqrt{1 - c||\mathbf{t}||^{2}}}$ are the Lorentz factors and $ f_{\mathbb{B} \rightarrow \mathbb{K}} (\cdot)$ is the function to transform the coordinates from the Poincaré ball model to the Klein disk model. The Klein model is supported on the same space as the Poincaré ball, but the same point has different coordinates in each model. Let $ \mathbf{x}_{\mathbb{B}} $ and $ \mathbf{x}_{\mathbb{K}} $ denote the coordinates of the same point $\mathbf{x}$ in the Poincare and Klein models correspondingly. Then the following transition formulas hold:
\begin{equation}
\begin{aligned}
\mathbf{x}_{\mathbb{K}} &= f_{\mathbb{B} \rightarrow \mathbb{K}} (\mathbf{x}_{\mathbb{B}}) = \frac{2 \mathbf{x}_{\mathbb{B}}}{1 + c||\mathbf{x}_{\mathbb{B}}||^2} \,, \\
\mathbf{x}_{\mathbb{B}} &= f_{\mathbb{K} \rightarrow \mathbb{B}} (\mathbf{x}_{\mathbb{K}}) = \frac{\mathbf{x}_{\mathbb{K}}}{1 + \sqrt{1 - c||\mathbf{x}_{\mathbb{K}}||^2}} \,.
\end{aligned}
\end{equation}
We call $\mathbf{n}_v$ in~\eqref{eq:neighbourhood} the neighborhood representation of item $ v $. During the training process we add a regularizing term that encourages the neighborhood representation $\mathbf{n}_v$ to be close to the item's representation $ \mathbf{v} $.
The goal is to transfer the inductive bias in KG to the item representation:
\begin{equation}
\mathcal{L}_{K} (v; \Theta) = d_{\mathbb{B}}(\mathbf{v}, \mathbf{n}_v) \,.
\end{equation}
Combining with the user preference learning objective $ \mathcal{L}_{R} $, the overall knowledge-enhanced objective can be:
\begin{equation}
\mathcal{L} (u, v, {v}'; \Theta) = \mathcal{L}_{R} (u, v, {v}'; \Theta) + \beta \mathcal{L}_{K} (v; \Theta) \,,
\label{eq:overall_obj}
\end{equation}
where $ \beta $ is to balance the effect from the KG.
\subsection{Adaptive and Fine-grained Regularization}
Previous works~\cite{DBLP:conf/kdd/Wang00LC19,DBLP:conf/cikm/WangZWZLXG18,DBLP:conf/kdd/ZhangYLXM16} that derive information from a KG in the recommender setting use a single and fixed number for $ \beta $ in Eq.~\ref{eq:overall_obj} to train the overall objective. However, employing a single fixed value for $ \beta $ can have several drawbacks. First, different datasets may require different impact levels of regularization from KGs. Treating $\beta$ as a fixed value requires extra hyper-parameter searching procedure for each dataset to better realize the power of KGs.
Second, different items may need different degrees of regularization. Using the same value for every item would limit the achievable performance improvement that can be derived from the KG information. Third, in different training phases, the model may need different magnitudes of regularization power.
To address the problems outlined above, we propose an adaptive regularization scheme to apply different strengths of regularization to each item and to adjust the strength throughout training. We formulate Eq.~\ref{eq:overall_obj} as:
\begin{equation}
\begin{aligned}
\mathcal{L} (u, v, {v}'; \Theta, \bm{\beta}) = \mathcal{L}_{R} (u, v, {v}'; \Theta) + \sigma(\beta_v) \mathcal{L}_{K} (v; \Theta) \,,
\end{aligned}
\end{equation}
where $ \beta_v $ is $v$-th value of $ \bm{\beta} \in \mathbb{R}^{|\mathcal{I}|} $ and $ \sigma(\beta_v) \in (0, 1) $ where $ \sigma(\cdot) $ is the sigmoid function. Unfortunately, directly minimizing this objective function is not able to achieve the desired purpose of adaptively controlling the regularization. The reason is that, considering $ \beta_v $ explicitly appears in the loss function, constantly decreasing the value of $ \beta_v $ is the straightforward way to minimize the loss. As a consequence, instead of reaching optimal values for the model, all $ \beta_v $ will end up with very small values close to zero, leading to unsatisfactory results.
To tackle the above problem, we model the learning of recommendation models and the adaptive regularization of KG as a bilevel optimization problem~\cite{DBLP:journals/anor/ColsonMS07}:
\begin{equation}
\begin{aligned}
\underset{\bm{\beta}}{\mathrm{min}} \: \mathcal{J}_{outer}\left(\Theta^*(\bm{\beta})\right) &:= \sum_{(u, v) \in \mathcal{D} \wedge (u, {v}') \not\in \mathcal{D}} \mathcal{L}_{R} \big(u, v, {v}'; \Theta^{*}(\bm{\beta}) \big) \\
\mathrm{s.t.} \: \Theta^{*}(\bm{\beta}) & = \underset{\Theta}{\mathrm{argmin}}\: \mathcal{J}_{inner}(\Theta, \bm{\beta}) \\ &:= \sum_{(u, v) \in \mathcal{D} \wedge (u, {v}') \not\in \mathcal{D}} \mathcal{L} (u, v, {v}'; \Theta, \bm{\beta}) \,.
\end{aligned}
\label{eq:bilevel}
\end{equation}
Here $ \Theta $ contains the model parameters $ \mathbf{u} $, $ \mathbf{v} $, $ \mathbf{r} $ and $ \mathbf{t} $.
The objective function $\mathcal{J}_{inner}$ attempts to minimize $\mathcal{L}$ with respect to $ \Theta $ with $\bm{\beta}$ fixed.
Meanwhile, the objective function $\mathcal{J}_{outer}$ optimizes $\mathcal{L}_{R}$ with respect to $ \bm{\beta} $ through $ \Theta^*(\bm{\beta}) $, considering $ \Theta^*(\bm{\beta})$ as a function of $\bm{\beta}$.
\begin{algorithm}[hbt]
\SetAlgoLined
Initialize optimizers $\operatorname{OPT}_{\Theta}$ and $\operatorname{OPT}_{\bm{\beta}}$ \;
\While{not converged}{
$\Theta$ Update~(fix $\bm{\beta}^t$):\\
\Indp $\Theta^{t+1} \longleftarrow \operatorname{OPT}_{\Theta}\left(\Theta^t, \nabla_{\Theta^t} \mathcal{J}_{inner}(\Theta^t, \bm{\beta}^t)\right)$ \;
\Indm Proxy:\\
\Indp $\tilde\Theta^{t+1}(\bm{\beta}^t) := \Theta^t - \alpha \nabla_{\Theta^t} \mathcal{J}_{inner}(\Theta^t, \bm{\beta}^t)$ \;
\Indm$\bm{\beta}$ Update~(fix $\Theta^t$):\\
\Indp $\bm{\beta}^{t+1} \longleftarrow \operatorname{OPT}_{\bm{\beta}} \Big( \bm{\beta}^t, \nabla_{\bm{\beta}^t}\mathcal{J}_{outer}\big(\tilde\Theta^{t+1}(\bm{\beta}^t)\big)\Big)$ \;
}
\caption{Iterative Training Procedure}
\label{alg:opt}
\end{algorithm}
As most existing models use gradient-based methods for optimization, a simple approximation strategy with less computation is introduced as follows:
\begin{equation}
\begin{aligned}
\nabla_{\bm{\beta}} \mathcal{J}_{outer}\left(\Theta^*(\bm{\beta})\right)\approx
\nabla_{\bm{\beta}} \mathcal{J}_{outer} \left(\Theta - \alpha \nabla_{\Theta} \mathcal{J}_{inner}(\Theta, \bm{\beta}) \right)\,.
\end{aligned}
\end{equation}
In this expression, $\alpha$ is the learning rate for one step of inner optimization. Related approximations have been validated in~\cite{DBLP:conf/wsdm/Rendle12,DBLP:conf/iclr/LiuSY19,DBLP:conf/kdd/MaMZTLC20}. Thus, we can define a proxy function to link $\bm{\beta}$ with the outer optimization:
\begin{equation}
\tilde{\Theta}(\bm{\beta}) := \Theta - \alpha \nabla_{\Theta} \mathcal{J}_{inner}(\Theta, \bm{\beta})\, .
\label{eq:gradient_approx}
\end{equation}
For simplicity, we use two optimizers $ \operatorname{OPT}_{\Theta} $ and $ \operatorname{OPT}_{\bm{\beta}} $ to update $ \Theta $ and $ \bm{\beta} $, respectively. The iterative procedure is shown in Alg.~\ref{alg:opt}.
\subsection{Training and Prediction}
After incorporating a parameter regularization term to avoid overfitting, the overall loss function is:
\begin{equation}
\begin{aligned}
& \underset{\bm{\beta}}{\mathrm{min}} \: \mathcal{J}_{outer}\left(\Theta^*(\bm{\beta})\right) \\
& \mathrm{s.t.} \: \Theta^{*}(\bm{\beta}) = \underset{\Theta}{\mathrm{argmin}}\: \mathcal{J}_{inner}(\Theta, \bm{\beta}) + \lambda ||\Theta||_\mathrm{F} \,,
\end{aligned}
\label{eq:final_loss}
\end{equation}
where $ \lambda $ is a hyper-parameter. When minimizing the objective function, the partial derivatives with respect to all the parameters can be computed by gradient descent with back-propagation. We apply the Adam~\cite{DBLP:journals/corr/KingmaB14} algorithm to automatically adapt the learning rate during the learning procedure.
\textbf{Recommendation Phase}. For user $ u $, we compute the distance $ d_{\mathbb{B}}(\mathbf{u}, \mathbf{v}) $ between the user $ u $ and each item $ v $ in the dataset. Then the items that are not in the training set and have the shortest distances are recommended to user $ u $.
\section{Related Work}
\subsection{General Recommendation}
Early recommendation studies largely focused on explicit feedback~\cite{DBLP:conf/www/SarwarKKR01,DBLP:conf/kdd/Koren08}. The recent research focus is shifting towards implicit data~\cite{DBLP:conf/cikm/TranLL018}. Collaborative filtering (CF) with implicit feedback is usually treated as a Top-K item recommendation task, where the goal is to recommend a list of items to users that users may be interested in. It is more practical and challenging~\cite{DBLP:conf/icdm/PanZCLLSY08}, and accords more closely with the real-world recommendation scenario. Early works mostly rely on matrix factorization techniques~\cite{DBLP:conf/icdm/HuKV08,DBLP:conf/uai/RendleFGS09} to learn latent features of users and items. Due to their ability to learn salient representations, (deep) neural network-based methods~\cite{DBLP:conf/www/HeLZNHC17,DBLP:conf/icdm/SunZMCGTH19,DBLP:conf/kdd/MaKL19} are also adopted. Autoencoder-based methods~\cite{DBLP:conf/wsdm/WuDZE16,DBLP:conf/cikm/MaZWL18,DBLP:conf/wsdm/MaKWWL19} have also been proposed for Top-K recommendation. In~\cite{DBLP:conf/kdd/LianZZCXS18,DBLP:conf/ijcai/XueDZHC17}, deep learning techniques are used to boost the traditional matrix factorization and factorization machine methods. Recently, some methods are also conducted in the hyperbolic space. HyperML~\cite{DBLP:conf/wsdm/TranT0CL20} conducts metric learning in the hyperbolic space and outperforms Euclidean counterparts. \citet{DBLP:conf/sigir/FengTCCLL20} propose to tackle the next Point-of-Interest recommendation task in the hyperbolic space.
\subsection{Knowledge Graph Enhanced Recommendation}
Knowledge graphs (KGs) are an important means to represent side information of recommender systems and have proven to be helpful to improve the recommendation performance. For example, \citet{DBLP:conf/kdd/ZhangYLXM16} propose to apply the TransR method~\cite{DBLP:conf/aaai/LinLSLZ15} to learn the KG representation as well as the item embeddings in the KG. \citet{DBLP:journals/algorithms/AiACZ18} integrate users and items with the KG and jointly learn the recommendation and KG part. \citet{DBLP:conf/www/WangZZLXG19} propose a multi-task feature learning approach for knowledge graph enhanced recommendation, where these two parts are connected with a cross-and-compress unit to transfer knowledge and share regularization of items. Another track of research tries to perform propagation over the KG to assist in recommendation. Specifically, RippleNet~\cite{DBLP:conf/cikm/WangZWZLXG18} extends the user’s interests along KG links to discover her potential interests by introducing preference propagation, which automatically propagates users’ potential preferences and explores their hierarchical interests in the KG.
KPRN~\cite{DBLP:conf/aaai/WangWX00C19} constructs the extracted path sequence with both the entity embedding and the relation embedding. These paths are encoded with an LSTM layer and the preferences for items in each path are predicted through fully-connected layers. KGCN~\cite{DBLP:conf/www/WangZXLG19} studies the utilization of Graph Convolutional Networks (GCNs) for computing embeddings of items via propagation among their neighbors in the KG. Recently, KGAT~\cite{DBLP:conf/kdd/Wang00LC19} recursively performs propagation over the KG via a graph attention mechanism that refines entity embeddings. Several subsequent works~\cite{DBLP:conf/sigir/ChenZMLM20,DBLP:conf/www/WangX000C20} focus on optimizing the negative sampling procedure in knowledge-enhanced recommendation. In this paper, we report results for our proposed method using a vanilla negative sampling strategy, so that we can focus on the performance impact of the novel aspects: learning in the hyperbolic space, using hyperbolic attention with Einstein midpoint aggregation, and introducing adaptive regularization. But the advanced negative sampling strategies can also be incorporated into our proposed method to provide a further performance improvement.
Our proposed model distinguishes itself from previous models by learning knowledge-enhanced recommendation in the Poincaré ball model.
In addition, we employ a hyperbolic attention model in the hyperbolic space to assign different degrees of importance to the neighboring entities of a certain item. We introduce a bilevel optimization formulation of the learning task to achieve an adaptive regularization mechanism that controls the regularization effect. |
1,314,259,995,002 | arxiv | \section{Introduction}
As is well known, many physically interesting problems are modeled
by nonlinear differential-difference equations, such as the Toda
lattice, Volterra lattice and discrete nonlinear Schr\"odinger
equation. Since 1970s discrete systems have received considerable
attention from variety of aspects (e.g.,
\cite{AL-75-JMP,Hirota-1970s,Date-1980s,Kuper-book}), such as
Inverse Scattering Transform, bilinear method, Sato's approach,
symmetry analysis and so on. One of famous discrete spectral
problems is given by Ablowitz and Ladik\cite{AL-75-JMP,AL-76-JMP}
which is now referred to as the Ablowitz-Ladik (AL) spectral
problem. This spectral problem, coupled with different time
evolution parts, has provided Lax integrabilities for many discrete
soliton systems, such as integrable discrete nonlinear Schr\"odinger
equation\cite{Ablowitz-04-book}, discrete mKdV equation and so
forth.
There are two types of the AL spectral problems,
which contains two potentials $\{Q_n,R_n\}$ and four potentials $\{Q_n,R_n,S_n,T_n\}$, respectively.
The two-potential one is the direct discretization (cf. \cite{Ablowitz-04-book}) of the famous continuous AKNS-ZS spectral problem\cite{AKNS},
and besides solutions, the related Hamiltonian structures, constraint flows, nonlinearization,
Darboux transformation, conservation laws, symmetries and Lie algebra structures
have been studied (cf. \cite{Geng-1989,Zeng-95-JPA,Ma-Tamizhimani-JPSJ-1999,ZDJ-02-JPA,ZDJ-02-CSF,Geng-03-JMP,Veks-2006,ZDJ-06-PLA,Geng-07-SAM,Gesztesy-08-SAM}).
The four-potential AL spectral problem is more complicated than the two-potential case
because of containing two more potentials and its unsymmetrical matrix form.
Cheng \cite{CY-86} transformed the four-potential AL spectral problem to a bit simple form
which could be further related to the two-potential case, but the relation (connecting two and four potentials) given in \cite{CY-86} is nothing helpful for
discussions of Hamiltonian structures and symmetries.
Recently, based on Cheng's transformation, Geng and Dai \cite{Geng-4p} separated the four-potential
AL spectral matrix into two symmetrical two-potential AL spectral matrices.
This makes it possible to construct a four-potential hierarchy (e.g.\cite{Geng-4p}) and its recursion operator
and then investigate more characteristics of integrability.
Infinite symmetries act as an important characteristic for
integrable systems \cite{Fokas-1987}. Symmetry-analysis is also a
powerful approach to finding exact solutions for nonlinear systems
\cite{Olver-book,Bluman-book}. In this paper we focus on symmetries
and their Lie algebras for the four-potential isospectral and
non-isospectral AL hierarchies. We will first derive positive and
negative order isospectral and non-isospectral flows. These flows
share a same recursion operator $L$ and can be respectively
uniformed as $K^{(m)}=L^m K^{(0)}$ and $\sigma^{(m)}=L^m
\sigma^{(0)}$, where $m$ is an arbitrary integer (instead of a
nature number in most of cases). Then we can imbed these flows into
their zero-curvature equations by means of functional derivatives.
The resulting expressions, which we refer to as zero-curvature
representations, have been shown to be powerful in constructing
symmetries for Lax integrable systems
(cf.\cite{Ma-Tamizhimani-JPSJ-1999,ZDJ-06-PLA,Ma-1990,Chen-1991,Chen-1996,Chen-2003,MWX-99-JMP}).
We will derive algebraic relations for isospectral and
non-isospectral flows and then derive symmetries and their Lie
algebras for not only isospectral AL hierarchy but also
non-isospectral hierarchy. Both algebras are the type of centerless
Kac-Moody-Virasoro algebra. The recursion operator $L$ is hereditary
and a strong symmetry for the isospectral hierarchy.
A natural question is whether all these structures w.r.t. four potentials $(Q_n,R_n,S_n,T_n)$,
including hierarchies, recursion operator $L$, symmetries and algebras admit a closed reduction
w.r.t. two potentials $(Q_n,R_n)$ by directly taking $(S_n,T_n)=(0,0)$.
This is true for those even order members in the
four-potential isospectral and non-isospectral hierarchies,
and the new recursion operator becomes $L^2$.
We will discuss the reduction in the paper.
This paper is the first part of our series investigations which
consist of two parts. In Part II we will focus on symmetries of the
integrable discrete nonlinear Schr\"odinger equation and discrete
AKNS hierarchy. The integrable discrete nonlinear Schr\"odinger
equation consists of positive and negative order flows which
correspond to a central-difference discretization for a continuous
second order derivative. We will also give a recursion operator
which generates discrete AKNS hierarchies. The obtained symmetry
algebras are not centerless Kac-Moody-Virasoro type. The structure
changes will also be explained in Part II.
The present paper is organized as follows.
Sec.2 contains some basic notations and backgrounds on the AL spectral problem.
Sec.3 derives four-potential isospectral and non-isospectral flows and their zero-curvature representations.
In Sec.4 we derive symmetries and their algebraic structures
for both four-potential isospectral and non-isospectral hierarchies.
In Sec.5 we discuss reduction relation between four-potential case and two-potential case.
There are also two Appendix sections.
Sec.A lists out first few equations in four-potential AL hierarchies and their Lax pairs,
and Sec.B is a theorem obtained in Ref.\cite{ZDJ-02-JPA} which we give here for self-containedness.
\section{Basic notations and backgrounds}
Let us first introduce some basic notations and notions which have been used
for discussing symmetries of discrete systems (cf. \cite{MWX-99-JMP,ZDJ-02-JPA,ZDJ-06-PLA}).
Assume that $u_n\doteq u(t, n)=(u^{(1)}, u^{(2)}, u^{(3)},u^{(4)})^T$ is a four-dimensional vector field, where
$u^{(i)}=u^{(i)}(t, n),~1\leq i\leq 4$, are all functions defined
over $\mathbb{R}\times \mathbb{Z}$ and vanish rapidly as
$|n|\rightarrow \infty$.
By $\mathcal{V}_{4}$ we denote a linear space consisting of all vector
fields $f=(f^{(1)}, f^{(2)}, f^{(3)}, f^{(4)})^{T}$, where each $f^{(i)}$
is a function of $u(t, n)$ and its shifts $u(t, n+j),~j\in \mathbb{Z}$, satisfying $f^{(i)}(u(t, n))|_{u_{n}=0}=0$,
and each $f^{(i)}$
is $C^{\infty}$ differentiable w.r.t. $t$ and $n$, and
$C^{\infty}$-Gauteaux differentiable w.r.t. $u_{n}$.
Here the Gateaux (or Fr\'echet) derivative of $f\in
\mathcal{V}_{4}$ (or $f$ an operator on $\mathcal{V}_{4})$ in the
direction $g\in \mathcal{V}_{4}$ is defined as
\begin{equation}
f^{\prime}[g]=\frac{d}{d \epsilon}\Bigr|_{\epsilon=0}f(u+\epsilon g).
\label{def-gat}
\end{equation}
By means of the Gateaux derivative one can define a Lie product for any $f,
g\in \mathcal{V}_{4}$ as
\begin{equation}
[\![ f, g]\!] =f^{\prime}[g]-g^{\prime}[f].
\end{equation}
We also define a Laurent matrix polynomials
space $\mathcal {Q}_{2}(z)$ composed by all $2\times 2$ matrices $Q=Q(z, u(t,
n))=(q_{ij}(z, u(t, n)))_{2\times 2}$, where all the $\{q_{ij}\}$ are
Laurent polynomials of $z$. Two subspaces of $\mathcal
{Q}_{2}(z)$ we will need are
\begin{align*}
&\mathcal {Q}^{+}_{2}(z)=\{Q\in \mathcal {Q}_{2}(z)|\mathrm{~the~ lowest~degree~of}~ z\geq 0\},\\
&\mathcal {Q}^{-}_{2}(z)=\{Q\in \mathcal {Q}_{2}(z)|\mathrm{~the~ highest~degree~ of}~ z\leq 0\}.
\end{align*}
We note that in a similar way we can define spaces $\mathcal{V}_{s}$, $\mathcal{Q}_{m}(z)$ and
$\mathcal {Q}^{\pm}_{m}(z)$ (cf. \cite{ZDJ-02-JPA}).
In general a discrete evolution equation arises from
the compatibility of a pair discrete linear problems\footnote{
Actually, they are semi-discrete.}
\begin{equation}
\Phi_{n+1}=U_{n}(z, u(t, n))\Phi_n,~~~~\Phi_{n,t}=V_{n}(z, u(t, n))\Phi_n,
\end{equation}
where $\Phi_n$ is a wave function, $U_{n}$ is a spectral matrix with
spectral parameter $z$ and potential vector $u(t, n)$ while $V_n$ is a matrix
governing time evolution.
The compatibility condition, also called discrete zero-curvature equation, reads
\begin{equation}
\label{2}
U_{n, t}=(EV_{n})U_{n}-U_{n}V_{n}.
\end{equation}
Here and in the following $E$ is a shift operator defined as $E^jf(n)=f(n+j)$ for $j\in \mathbb{Z}$.
Suppose that the corresponding nonlinear evolution equation is
\begin{equation}
\label{44.1}
u_{n, t}=K(u_{n}).
\end{equation}
Then by means of the Gateaux derivative the flow $K(u_{n})$ can be embedded
into the zero-curvature equation \eqref{2} as the following,
\begin{equation}
\label{3}
U_{n}^{\prime}[K(u_{n})]=(EV_{n})U_{n}-U_{n}V_{n}-U_{n, z}z_{t},
\end{equation}
which is usually called the zero-curvature representation of the flow $K(u_{n})$.
For the nonlinear evolution equation \eqref{44.1}, $\sigma(u_{n})\in \mathcal{V}_{4}$
is its symmetry if $\sigma_{t}=K^{\prime}[\sigma]$, i.e.,
\begin{equation}
\label{44.11}
\frac{\tilde{\partial} \sigma}{\tilde{\partial} t}=[\![ K, \sigma]\!] ,
\end{equation}
where by $\frac{\tilde{\partial}\sigma}{\tilde{\partial} t}$
we specially denote the derivative of $\sigma$ w.r.t. $t$ explicitly included in $\sigma$,
(for example, $\frac{\tilde{\partial}\sigma }{\tilde{\partial} t}=u_{n}$ if $\sigma=t u_n+u_{n+1}$).
Next, let us recall some backgrounds on four-potential AL hierarchy.
The four-potential AL spectral problem reads
\cite{AL-75-JMP,AL-76-JMP}
\begin{equation}
\label{4p-AL}
\begin{array}{l}
\psi_{1, n+1}=\lambda\psi_{1, n}+Q_{n}\psi_{2, n}+S_{n}\psi_{2, n+1},\\
\psi_{2,n+1}=\lambda^{-1}\psi_{2, n}+R_{n}\psi_{1, n}+T_{n}\psi_{1, n+1},
\end{array}
\end{equation}
where $\lambda$ is a spectral parameter and $Q_{n}, R_{n}, S_{n}, T_{n}$ are four potential functions of $n$ and $t$.
When $S_n=T_n=0$ \eqref{4p-AL} reduces to the two-potential AL spectral problem, i.e.,
\begin{equation}
\label{2p-AL}
\begin{array}{l}
\psi_{1, n+1}=\lambda\psi_{1, n}+Q_{n}\psi_{2, n},\\
\psi_{2,n+1}=\lambda^{-1}\psi_{2, n}+R_{n}\psi_{1, n},
\end{array}
\end{equation}
which is a discrete version of the AKNS-ZS spectral problem (cf.\cite{Ablowitz-04-book}).
An alternative (matrix) form of \eqref{4p-AL} is\cite{AL-75-JMP}
\begin{equation}\label{5}
\Psi_{n+1}=\frac{1}{\Lambda_{n}}\left(
\begin{array}{cc} z^2+S_{n}R_{n}& Q_{n}+z^{-2}S_{n}\\
z^2T_{n}+R_{n}&z^{-2}+T_{n}Q_{n}
\end{array}\right)\Psi_{n},~~~~\Psi_{n}=\left(
\begin{array}{cc} \psi_{1,n}\\
\psi_{2,n}
\end{array}\right),
\end{equation}
where $\Lambda_{n}=1-S_{n}T_{n}$ and we have substituted $z^2$ for $\lambda$.
This form can be gauge-transformed to \cite{CY-86}
\begin{equation}
\Phi_{n+1}=U_n\Phi_{n},~~~
U_n=\left(
\begin{array}{cc} z^{2}+S_{n}R_{n}& zQ_{n}+z^{-1}S_{n}\\
zT_{n}+z^{-1}R_{n}&z^{-2}+T_{n}Q_{n}
\end{array}\right),~~~\Phi_{n}=\left(
\begin{array}{cc} \phi_{1,n}\\
\phi_{2,n}
\end{array}\right),
\label{4p-new}
\end{equation}
where $\Phi_{n}$ and $\Psi_n$ are related through
\begin{equation}
\label{6}
\Phi_{n}=\rho(z)\Psi_{n}\prod^{+\infty}_{i=n}\Lambda_{i}^{-1},
~~~~\rho(z)=\left(
\begin{array}{cc} z^{\frac{1}{2}}& 0\\
0&z^{-\frac{1}{2}}
\end{array}\right).
\end{equation}
Here on $U_n$ we impose a condition
\begin{equation}
\label{cd}
Q_nR_n+S_nT_n\neq 0
\end{equation}
so that $U_n'$ is an injective homomorphism.
Suppose that the time evolution of $\Phi_n$ is
\begin{equation}
\label{4p-new-time}
\Phi_{n,t}=V_{n}\Phi_{n}, \qquad V_n=\left(
\begin{array}{cc} A_{n}& B_{n}\\
C_{n}& D_{n}
\end{array}\right).
\end{equation}
Then the compatibility condition with \eqref{4p-new} yields
\begin{equation}
\label{zce}
U_{n, t}=(EV_{n})U_{n}-U_{n}V_{n}.
\end{equation}
Usually the above discrete zero-curvature equation contributes
a discrete nonlinear evolution equation hierarchy with four potentials and their recursion operator,
but this is not as easy as in two-potential case (related to \eqref{2p-AL}, cf. \cite{Zeng-95-JPA,Ma-Tamizhimani-JPSJ-1999,ZDJ-02-JPA,ZDJ-06-PLA}).
However, the spectral matrix $U_n$ can be separated into \cite{Geng-4p}
\begin{equation}
\label{U-12}
U_{n}=U_{n}^{(2)}U_{n}^{(1)},~~~
U_{n}^{(1)}=\left(
\begin{array}{cc} z & Q_{n}\\
R_{n}& z^{-1}
\end{array}\right),~~
U_{n}^{(2)}=\left(
\begin{array}{cc} z & S_{n}\\
T_{n}& z^{-1}
\end{array}\right).
\end{equation}
Then \eqref{zce} holds if \cite{Geng-4p}
\begin{equation}
\label{zce-aux}
U_{n, t}^{(1)}=\widehat{V}_{n}U_{n}^{(1)}-U_{n}^{(1)}V_{n},\qquad
U_{n, t}^{(2)}=(EV_{n})U_{n}^{(2)}-U_{n}^{(2)}\widehat{V}_{n},
\end{equation}
where $\widehat{V}_{n}= \left(
\begin{array}{cc} a_{n} & b_{n}\\
c_{n}& d_{n}
\end{array}\right)$.
In fact,
\begin{equation*}
U_{n, t}-(EV_{n})U_{n}+U_{n}V_{n} =(U_{n,
t}^{(2)}-(EV_{n})U_{n}^{(2)}+U_{n}^{(2)}\widehat{V}_{n})U_{n}^{(1)}
+U_{n}^{(2)}(U_{n,
t}^{(1)}-\widehat{V}_{n}U_{n}^{(1)}+U_{n}^{(1)}V_{n}).
\end{equation*}
Thus, one can consider the two auxiliary systems given in \eqref{zce-aux},
where each of $U_n^{(j)}$ contains two potentials.
Recently, starting from \eqref{zce-aux} Geng and Dai \cite{Geng-4p}
derived a four-potential AL hierarchy and their recursion relation and considered Hamiltonian structures and nonlinearization of Lax pair.
Noting that in the AL spectral problem \eqref{4p-AL}
the spectral parameter $\lambda$ appears symmetrically
w.r.t. positive and negative powers,
it is then understood that the recursion operator and its inverse can be derived
symmetrically. So are the negative order and positive order hierarchies.
In \cite{ZDJ-06-PLA}, starting from \eqref{2p-AL} we have expressed isospectral and non-isospectral
two-potential AL hierarchies in the form of $u_{n,t}=L^m H^{(0)}$,
where $m$ is an arbitrary integer (instead of a nature number) and $L$ is the recursion
operator.
This is also true for four-potential case. In the next section we will derive four-potential AL hierarchies
from the two auxiliary systems given in \eqref{zce-aux}.
\section{AL hierarchies and zero-curvature representations}
Now we derive isospectral and non-isospectral AL hierarchies and their recursion operator.
The procedure is quite like the one given in \cite{ZDJ-06-PLA}.
In addition, we will express the obtained isospectral and non-isospectral flows in terms of
zero-curvature equation, by means of which we will prove that the recursion operator is
hereditary and a strong symmetry of the isospectral hierarchy.
\subsection{Isospectral hierarchy}
The explicit form of the auxiliary systems \eqref{zce-aux} is
\begin{subequations}
\label{zce-aux-exp}
\begin{align}
z^{-1}z_{t}&= a_{n}-A_{n}+R_{n}b_{n}z^{-1}-C_{n}Q_{n}z^{-1},\label{zce-aux-a}\\
Q_{n, t}&= b_{n}z^{-1}-B_{n}z+Q_{n}(a_{n}-D_{n}),\label{zce-aux-b}\\
R_{n, t}&= c_{n}z-C_{n}z^{-1}+R_{n}(d_{n}-A_{n}),\label{zce-aux-c}\\
z(z^{-1})_{t}&=
d_{n}-D_{n}+Q_{n}c_{n}z-R_{n}B_{n}z;\label{zce-aux-d}
\end{align}
\begin{align}
z^{-1}z_{t}&= A_{n+1}-a_{n}+T_{n}B_{n+1}z^{-1}-c_{n}S_{n}z^{-1},\label{zce-aux-e}\\
S_{n, t}&= B_{n+1}z^{-1}-b_{n}z+S_{n}(A_{n+1}-d_{n}),\label{zce-aux-f}\\
T_{n, t}&= C_{n+1}z-c_{n}z^{-1}+T_{n}(D_{n+1}-a_{n}),\label{zce-aux-g}\\
z(z^{-1})_{t}&=
D_{n+1}-d_{n}+S_{n}C_{n+1}z-T_{n}b_{n}z.\label{zce-aux-h}
\end{align}
\end{subequations}
From \eqref{zce-aux-a}, \eqref{zce-aux-d}, \eqref{zce-aux-e} and \eqref{zce-aux-h} one can get
\begin{subequations}\label{2.7}
\begin{align}
A_{n}=&(E-1)^{-1}(S_{n}c_{n}z^{-1}+Q_{n}C_{n}z^{-1}-T_{n}B_{n+1}z^{-1}-R_{n}b_{n}z^{-1})+2nz^{-1}z_{t}+A_{0},\\
a_{n}=&
(E-1)^{-1}(S_{n}c_{n}z^{-1}+Q_{n}C_{n}z^{-1}-T_{n}B_{n+1}z^{-1}-R_{n}b_{n}z^{-1})\notag\\
&+Q_{n}C_{n}z^{-1}-R_{n}b_{n}z^{-1}+(2n+1)z^{-1}z_{t}+A_{0},\\
D_n=&(E-1)^{-1}(R_{n}B_{n}z+T_{n}b_{n}z-S_{n}C_{n+1}z-Q_{n}c_{n}z)+2nz(z^{-1})_{t}+D_{0},\\
d_n=&(E-1)^{-1}(R_{n}B_{n}z+T_{n}b_{n}z-S_{n}C_{n+1}z-Q_{n}c_{n}z)+R_{n}B_{n}z-Q_{n}c_{n}z\notag\\
&+(2n+1)z(z^{-1})_{t}+D_{0}.
\end{align}
\end{subequations}
Here $A_{0}=A_{n}|_{u_{n}=0}-2nz^{-1}z_{t}$ and
$D_{0}=D_{n}|_{u_{n}=0}-2nz(z^{-1})_{t}$ where $u_n=(Q_n,R_n,S_n,T_n)^T$. Thus \eqref{zce-aux-exp}
simplifies to
\begin{equation}
\label{u-t}
u_{n, t}=(zL_{1}+z^{-1}L_{2}) \left(
\begin{array}{cccc}
-B_{n}\\ C_{n}\\ -b_{n}\\ c_{n}
\end{array}\right)
+(A_{0}-D_{0}) \left(
\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right)
+z^{-1}z_{t} \left(
\begin{array}{cccc}
(4n+1)Q_{n}\\ -(4n+1)R_{n}\\ (4n+3)S_{n}\\ -(4n+3)T_{n}
\end{array}\right),
\end{equation}
where
\begin{subequations}
\begin{align}\label{L1}
L_{1}&=\left(
\begin{array}{cccc} 1 & 0 & 0 & 0\\ -R_{n}^{2} & 0 & 0 & \gamma_{n}^{2}\\
S_{n}R_{n} & 0 & 1 & S_{n}Q_{n}\\-T_{n}R_{n} & \pi_{n}^{2}E &
-T_{n}^{2} & -T_{n}Q_{n}
\end{array}\right)
+\left(\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right) (E-1)^{-1}(R_{n}, S_{n}E, T_{n}, Q_{n}),\\
\label{L2}
L_{2}&=\left(
\begin{array}{cccc} 0 & Q_{n}^{2} & -\gamma_{n}^{2} & 0\\ 0 & -1 & 0 & 0\\
-\pi_{n}^{2}E & S_{n}Q_{n} & R_{n}S_{n} & S_{n}^{2}\\0 & -T_{n}Q_{n}
& -T_{n}R_{n} & -1
\end{array}\right)
+\left(\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right) (E-1)^{-1}(T_{n}E, Q_{n}, R_{n}, S_{n}),
\end{align}
\end{subequations}
in which $\gamma_{n}=\sqrt{1-Q_{n}R_{n}},~
\pi_{n}=\sqrt{1-S_{n}T_{n}}.$
One can verify that the inverse
operators of $L_{1}$ and $L_{2}$ are
\begin{subequations}
\begin{align}\label{L1-inv}
L_{1}^{-1}=&\left(
\begin{array}{cccc} 1 & 0 & 0 & 0\\ \frac{T_{n-1}R_{n-1}E^{-1}}{\gamma_{n-1}^{2}}
& \frac{T_{n-1}Q_{n-1}E^{-1}}{\gamma_{n-1}^{2}}
& \frac{T_{n-1}^{2}E^{-1}}{\pi_{n-1}^{2}} & \frac{E^{-1}}{\pi_{n-1}^{2}}\\
-\frac{S_{n}R_{n}}{\gamma_{n}^{2}} &
-\frac{S_{n}Q_{n}}{\gamma_{n}^{2}} & 1 &0\\
\frac{R_{n}^{2}}{\gamma_{n}^{2}}& \frac{1}{\gamma_{n}^{2}} & 0 & 0
\end{array}\right)\notag\\
&-\left(\begin{array}{cccc}
Q_{n}\\ -T_{n-1}E^{-1}\\ S_{n}\\ -R_{n}
\end{array}\right) (E-1)^{-1}\biggl(\frac{R_{n}}{\gamma_{n}^{2}}, \frac{Q_{n}}{\gamma_{n}^{2}},
\frac{T_{n}}{\pi_{n}^{2}}, \frac{S_{n}}{\pi_{n}^{2}}\biggr),
\end{align}
\begin{align}\label{L2-inv}
L_{2}^{-1}=&\left(
\begin{array}{cccc} -\frac{S_{n-1}R_{n-1}E^{-1}}{\gamma_{n-1}^{2}} & -\frac{S_{n-1}Q_{n-1}E^{-1}}{\gamma_{n-1}^{2}}
& -\frac{E^{-1}}{\pi_{n-1}^{2}} & -\frac{S_{n-1}^{2}E^{-1}}{\pi_{n-1}^{2}}\\
0 & -1 & 0 & 0\\
-\frac{1}{\gamma_{n}^{2}} & -\frac{Q_{n}^{2}}{\gamma_{n}^{2}} & 0&
0\\\frac{T_{n}R_{n}}{\gamma_{n}^{2}} &
\frac{T_{n}Q_{n}}{\gamma_{n}^{2}} & 0 & -1
\end{array}\right)\notag \\
&-\left(\begin{array}{cccc}
S_{n-1}E^{-1}\\ -R_{n}\\ Q_{n}\\ -T_{n}
\end{array}\right) (E-1)^{-1}\biggl(\frac{R_{n}}{\gamma_{n}^{2}}, \frac{Q_{n}}{\gamma_{n}^{2}},
\frac{T_{n}}{\pi_{n}^{2}}, \frac{S_{n}}{\pi_{n}^{2}}\biggr).
\end{align}
\end{subequations}
To derive isospectral AL hierarchy, we need to take $z_t \equiv 0$ in \eqref{u-t} and expand $(B_{n}, C_{n}, b_{n}, c_{n})^{T}$ as
\begin{equation}
\label{BCbc+}
\left(
\begin{array}{cccc}
B_{n}\\ C_{n}\\ b_{n}\\ c_{n}
\end{array}\right)=\sum_{j=0}^{m}\left(
\begin{array}{cccc}
B_{n}^{(j)}\\ C_{n}^{(j)}\\ b_{n}^{(j)}\\ c_{n}^{(j)}
\end{array}\right)z^{2(m-j)+1},\qquad m=0,1,2,\cdots.
\end{equation}
Then by setting $(B_{n}^{(0)}, C_{n}^{(0)},
b_{n}^{(0)},c_{n}^{(0)})^{T}=(0, 0, 0, 0)^{T}$,
$A_{0}=-D_{0}=A_{n}|_{u_{n}=0}=-D_{n}|_{u_{n}=0}=\frac{1}{2}z^{2m}$, and comparing the coefficients of
the same powers of $z$ in \eqref{u-t} we get
\begin{subequations}
\label{ut+}
\begin{align}
&\left(
\begin{array}{cccc}
Q_{n}\\ R_{n}\\ S_{n}\\ T_{n}
\end{array}\right)_{t_{m}}
=(1-\delta_{0,m})L_{2}\left(
\begin{array}{cccc}
-B_{n}^{(m)}\\ C_{n}^{(m)}\\ -b_{n}^{(m)}\\ c_{n}^{(m)}
\end{array}\right)+\delta_{0,m}
\left(
\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right),\label{ut+a}\\
&\left(
\begin{array}{cccc}
-B_{n}^{(j+1)}\\ C_{n}^{(j+1)}\\ -b_{n}^{(j+1)}\\ c_{n}^{(j+1)}
\end{array}\right)=-L_{1}^{-1}L_{2}\left(
\begin{array}{cccc}
-B_{n}^{(j)}\\ C_{n}^{(j)}\\ -b_{n}^{(j)}\\ c_{n}^{(j)}
\end{array}\right), \qquad j=1,2,\cdots, m-1,\\
&\left(
\begin{array}{cccc}
-B_{n}^{(1)}\\ C_{n}^{(1)}\\ -b_{n}^{(1)}\\ c_{n}^{(1)}
\end{array}\right)=-L_{1}^{-1}\left(
\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right),\label{ut+c}
\end{align}
\end{subequations}
where the subindex $m$ for $t$ indicates the order of the expansion
\eqref{BCbc+} as well as the order of member in isospectral
hierarchy. This further yields a isospectral hierarchy
\begin{equation}
\label{hie-iso+}
u_{n, t_{m}}=K^{(m)}=L^{m}K^{(0)}, \qquad m=0, 1, 2, \cdots,
\end{equation}
where
\begin{equation}
\label{K0}
K^{(0)}=(Q_{n}, -R_{n}, S_{n}, -T_{n})^{T},
\end{equation}
and the
recursion operator $L$ is defined by
\begin{align}
\label{L}
L=-L_{2}L_{1}^{-1}= &\left(
\begin{array}{cccc} -S_{n}R_{n} & -S_{n}Q_{n} & \gamma_{n}^{2} & -Q_{n}^{2}E^{-1} \\
0 & 0 & 0 & E^{-1}\\ \pi_{n}^{2}E & -S_{n}^{2} & -S_{n}R_{n} &
-S_{n}Q_{n}E^{-1}\\
\frac{\pi_{n}^{2}R_{n}^{2}}{\gamma_{n}^{2}} &
\frac{1-Q_{n}R_{n}S_{n}T_{n}}{\gamma_{n}^{2}} & T_{n}R_{n} &
T_{n}Q_{n}E^{-1}
\end{array}\right)\notag\\
&-\left(\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right) (E-1)^{-1}(T_{n}E, S_{n}, R_{n}, Q_{n}E^{-1})\notag\\
&
-\left(\begin{array}{cccc}
S_{n}\gamma_{n}^{2}\\ -T_{n-1}\gamma_{n}^{2}\\ Q_{n+1}\pi_{n}^{2}E\\
-R_{n}\pi_{n}^{2}
\end{array}\right) (E-1)^{-1}\biggl(\frac{R_{n}}{\gamma_{n}^{2}}, \frac{Q_{n}}{\gamma_{n}^{2}},
\frac{T_{n}}{\pi_{n}^{2}}, \frac{S_{n}}{\pi_{n}^{2}}\biggr).
\end{align}
If we expand $(B_{n}, C_{n}, b_{n}, c_{n})^{T}$ in another direction, i.e.,
\begin{equation}\label{2.22}
\left(
\begin{array}{cccc}
B_{n}\\ C_{n}\\ b_{n}\\ c_{n}
\end{array}\right)=\sum_{j=m}^{0}\left(
\begin{array}{cccc}
{B}_{n}^{(j)}\\ {C}_{n}^{(j)}\\ {b}_{n}^{(j)}\\
{c}_{n}^{(j)}
\end{array}\right)z^{2(m-j)-1},\qquad m=0,-1,-2,\cdots,
\end{equation}
and take $({B}_{n}^{(0)}, {C}_{n}^{(0)}, {b}_{n}^{(0)},
{c}_{n}^{(0)})^{T}=(0, 0, 0, 0)^{T}$,
$A_{0}=-D_{0}=A_{n}|_{u_{n}=0}=-D_{n}|_{u_{n}=0}=\frac{1}{2}z^{2m}$, we can have relation
\begin{subequations}
\label{ut-}
\begin{align}
&\left(
\begin{array}{cccc}
Q_{n}\\ R_{n}\\ S_{n}\\ T_{n}
\end{array}\right)_{t_{m}}
=(1-\delta_{0,m})L_{1}\left(
\begin{array}{cccc}
-B_{n}^{(m)}\\ C_{n}^{(m)}\\ -b_{n}^{(m)}\\ c_{n}^{(m)}
\end{array}\right)+\delta_{0,m}
\left(
\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right),\\
&\left(
\begin{array}{cccc}
-B_{n}^{(j)}\\ C_{n}^{(j)}\\ -b_{n}^{(j)}\\ c_{n}^{(j)}
\end{array}\right)=-L_{2}^{-1}L_{1}\left(
\begin{array}{cccc}
-B_{n}^{(j+1)}\\ C_{n}^{(j+1)}\\ -b_{n}^{(j+1)}\\ c_{n+1}^{(j)}
\end{array}\right), \qquad j=-2,-3,\cdots, m,\\
&\left(
\begin{array}{cccc}
-B_{n}^{(-1)}\\ C_{n}^{(-1)}\\ -b_{n}^{(-1)}\\ c_{n}^{(-1)}
\end{array}\right)=-L_{2}^{-1}\left(
\begin{array}{cccc}
Q_{n}\\ -R_{n}\\ S_{n}\\ -T_{n}
\end{array}\right),
\end{align}
\end{subequations}
and further get a negative order isospectral hierarchy
\begin{equation}
\label{hie-iso-} u_{n, t_{m}}=K^{(m)}=L^{m}K^{(0)}, \qquad m=0, -1,
-2, \cdots,
\end{equation}
where $K^{(0)}$ and $L$ are given by \eqref{K0} and \eqref{L} respectively.
Obviously, \eqref{hie-iso+} and \eqref{hie-iso-} can be jointed
together and written as a uniformed isospectral AL hierarchy
\begin{equation}
\label{hie-iso} u_{n, t_m}=K^{(m)}=L^{m}K^{(0)}, \qquad m\in
\mathbb{Z}.
\end{equation}
In Appendix \ref{A-1} we will list out the first few isospectral equations and their related Lax pairs.
\subsection{Non-isospectral hierarchy}
For the non-isospectral case, we suppose the time evolution of
spectral parameter $z$ follows $z_{t_m}=\frac{1}{2}z^{2m+1}$ for any
given $m\in \mathbb{Z}$. We can first expand $(B_{n}, C_{n}, b_{n},
c_{n})^{T}$ as \eqref{BCbc+} and still take $(B_{n}^{(0)},
C_{n}^{(0)}, b_{n}^{(0)}, c_{n}^{(0)})^{T}=(0, 0, 0, 0)^{T}$ but
$A_{0}=-D_{0}=0$. Then we can get a recursion relation which is
similar to \eqref{ut+} but in this non-isospectral case \eqref{ut+a}
and \eqref{ut+c} are replaced by
\begin{align*}
&\left(
\begin{array}{cccc}
Q_{n}\\ R_{n}\\ S_{n}\\ T_{n}
\end{array}\right)_{t_{m}}
=(1-\delta_{0,m})L_{2}\left(
\begin{array}{cccc}
-B_{n}^{(m)}\\ C_{n}^{(m)}\\ -b_{n}^{(m)}\\ c_{n}^{(m)}
\end{array}\right)+\delta_{0,m}
\left(
\begin{array}{cccc}
(2n+\frac{1}{2})Q_{n}\\ -(2n+\frac{1}{2})R_{n}\\ (2n+\frac{3}{2})S_{n}\\
-(2n+\frac{3}{2})T_{n}
\end{array}\right),\\
&\left(
\begin{array}{cccc}
-B_{n}^{(1)}\\ C_{n}^{(1)}\\ -b_{n}^{(1)}\\ c_{n}^{(1)}
\end{array}\right)=-L_{1}^{-1}\left(
\begin{array}{cccc}
(2n+\frac{1}{2})Q_{n}\\ -(2n+\frac{1}{2})R_{n}\\
(2n+\frac{3}{2})S_{n}\\ -(2n+\frac{3}{2})T_{n}
\end{array}\right).
\end{align*}
It then follows that a positive order non-isospectral hierarchy is
\begin{equation}
\label{2.34}
u_{n, t_m}=\sigma^{(m)}=L^{m}\sigma^{(0)}, \qquad m=0, 1, 2, \cdots,
\end{equation}
where
\begin{equation}
\label{sigma0}
\sigma^{(0)}=\left(
\begin{array}{cccc}
(2n+\frac{1}{2})Q_{n}\\ -(2n+\frac{1}{2})R_{n}\\
(2n+\frac{3}{2})S_{n}\\ -(2n+\frac{3}{2})T_{n}
\end{array}\right),
\end{equation}
and $L$ is the recursion operator given by \eqref{L}.
After a discussion for a negative order expansion of $(B_{n}, C_{n}, b_{n}, c_{n})^{T}$
and deriving a negative order non-isospectral hierarchy, one can finally reach to
a uniformed non-isospectral AL hierarchy:
\begin{equation}
\label{hie-non}
u_{n, t_m}=\sigma^{(m)}=L^{m}\sigma^{(0)}, \qquad m\in \mathbb{Z}.
\end{equation}
In Appendix \ref{A-1} we will also list out the first few
non-isospectral equations and their related Lax pairs.
\subsection{Zero-curvature representations}
We have derived isospectral hierarchy \eqref{hie-iso} and non-isospectral hierarchy \eqref{hie-non}.
Suppose that their Lax pairs are respectively
\begin{equation}
\Phi_{n+1}=U_n\Phi_{n},~~~\Phi_{n,t_m}=G_n^{(m)}\Phi_n,
\label{Laxp-iso}
\end{equation}
and
\begin{equation}
\Phi_{n+1}=U_n\Phi_{n},~~~\Phi_{n,t_m}=W_n^{(m)}\Phi_n,
\label{Laxp-non}
\end{equation}
where $m\in\mathbb{Z}$ and $U_n$ is defined in \eqref{4p-new}.
Then in isospectral case the zero-curvature equation which is related to \eqref{Laxp-iso} is
\begin{equation}
U_{n,t_m}=(EG^{(m)}_{n})U_{n}-U_{n}G^{(m)}_{n}, \label{zce-iso}
\end{equation}
and in non-isospectral case
\begin{equation}
U_{n,t_m}=(EW^{(m)}_{n})U_{n}-U_{n}W^{(m)}_{n}. \label{zce-non}
\end{equation}
Noticing the definition \eqref{def-gat} for a Gateaux derivative, $U_{n,t_m}$ can be rewritten as
\begin{equation}
U_{n,t_m}=U_n'[u_{n,t_m}]+U_{n,z}\cdot z_{t_m},
\end{equation}
by means of which we have
\begin{proposition}\label{p1}
The isospectral flows $\{K^{(m)}\}$ and non-isospectral flows
$\{\sigma^{(m)}\}$ admit zero curvature representations
\begin{align}
& U_{n}^{\prime}[K^{(m)}]=(EG^{(m)}_{n})U_{n}-U_{n}G^{(m)}_{n},\label{zcr-iso}\\
&
U_{n}^{\prime}[\sigma^{(m)}]=(EW^{(m)}_{n})U_{n}-U_{n}W^{(m)}_{n}-\frac{1}{2}z^{2m+1} U_{n,z},\label{zcr-non}
\end{align}
where $m\in \mathbb{Z}$,
$K^{(m)}$ and $\sigma^{(m)} \in \mathcal{V}_{4}$, $G^{(m)}_{n}$ and
$ W^{(m)}_{n} \in \mathcal{Q}_{2}(z)$ and satisfy
\begin{equation}\label{2.44}
G^{(m)}_{n}|_{u_{n}=0}=\frac{z^{2m}}{2}\left(
\begin{array}{cc} 1 & 0\\ 0& -1
\end{array}\right),~~
W^{(m)}_{n}|_{u_{n}=0}=z^{2m}\left(
\begin{array}{cc} n & 0\\ 0& -n
\end{array}\right).
\end{equation}
\end{proposition}
Besides, noting that $U_n'$ is an injective homomorphism when $Q_nR_n+S_nT_n\neq 0$, we have (cf.\cite{ZDJ-02-JPA,ZDJ-06-PLA})
\begin{lemma}
The matrix equation
\begin{equation}
U_{n}^{\prime}[X_n]=(EV_{n})U_{n}-U_{n}V_{n}, ~~X_n\in
\mathcal{V}_{4},~ V_{n}\in \mathcal{Q}_{2}(z) ~\mathrm{and}
~V_{n}|_{u_{n}=0}=0
\end{equation}
has only zero solutions $X_n=0$ and $V_{n}=0$.
\end{lemma}
This lemma and the zero-curvature representations \eqref{zcr-iso} and \eqref{zcr-non}
will play important roles in constructing symmetries and determining their algebraic structures.
\subsection{Recursion operator and hereditary and strong symmetry}
The flows $\{K^{(m)}\}, \{\sigma^{(m)}\}$ and their recursion relation can also be derived from the following way.
Firstly, we start from the matrix equations
\begin{align}
& U_{n}^{\prime}[X_n]=(EG_{n})U_{n}-U_{n}G_{n},\label{zcr-iso-0}\\
& U_{n}^{\prime}[Z_n]=(EW_{n})U_{n}-U_{n}W_{n}-U_{n,z}\cdot
\frac{z}{2},\label{zcr-non-0}
\end{align}
where the unknowns are $X_n, Z_n\in \mathcal{V}_{4}$ and
$G_{n},W_n\in\mathcal{Q}_{2}(z)$. Obviously, both of the two
equations admit non-zero solution pairs $\{X_n, G_n\}$ and $\{Z_n,
W_n\}$ (see \eqref{zcr-iso} and \eqref{zcr-non} for $m=0$).
Secondly, for equation
\begin{equation}
\label{zce-rec} U_{n}'[X_n-z^{\alpha}Y_n]=(EV_{n})U_{n}-U_{n}V_{n}
\end{equation}
where $X_n, Y_n\in \mathcal{V}_{4}$ and
$V_{n}\in\mathcal{Q}_{2}(z)$, one can find that when $\alpha=2$, for
any given $Y_n\neq 0\in \mathcal{V}_{4}$, there exist unique
solutions $X_n\in \mathcal{V}_{4}$, $V_{n}\in\mathcal{Q}_{2}(z)$
where $V_{n}|_{u_n=0}=0$. Thus $X_n$ and $Y_n$ are connected by a
map:\footnote{If $\alpha=-2$ then the map is $X=L^{-1}Y$.}
\begin{equation}
\label{map:L} L:~~ X_n=LY_n.
\end{equation}
The map, $L$, is nothing but the recursion operator \eqref{L}. Then,
thirdly, let $Y_n=K^{(0)}$ or $\sigma^{(0)}$ in \eqref{zce-rec}, one
can get $K^{(1)}$ or $\sigma^{(1)}$ by taking $\alpha=2$ and
$K^{(-1)}$ or $\sigma^{(-1)}$ by taking $\alpha=-2$. Repeating the
procedure one can generate all the isospectral flows $\{K^{(m)}\}$
and non-isospectral flows $\{\sigma^{(m)}\}$.
Besides, with the recursion operator $L$ in hand, \eqref{zce-rec} becomes
\begin{equation}
U_{n}'[X-z^{2}LX]=(EV_{n})U_{n}-U_{n}V_{n}.
\end{equation}
Then, in the light of Theorem 1 in Ref.\cite{ZDJ-02-JPA} (also see Appendix \ref{A-2}),
we immediately have
\begin{proposition}
The recursion operator $L$ is hereditary and a strong symmetry\footnote{
For the definitions of a hereditary operator and a strong symmetry operator,
one can refer to \cite{Fuchssteiner-1981}.} for the isospectral AL hierarchy (\ref{hie-iso}).
\end{proposition}
\section{Symmetries and Lie algebras}
In this section, we construct two types of symmetries for the
isospectral and non-isospectral AL hierarchies, respectively.
To do that, let us first look at the algebraic structure of flows $\{K^{(m)}\}$ and
$\{\sigma^{(m)}\}$.
We note that the proofs for this section are quite similar to those in \cite{ZDJ-06-PLA} and here we skip them.
\subsection{Algebra of flows}
Making use of the identity\cite{Olver-book}
\begin{equation}
U_n'[[\![ f, g ]\!] ]=(U_n'[f])'[g]-(U_n'[g])'[f],~~~ \forall f,g \in \mathcal{V}_{4},
\label{identity-1}
\end{equation}
from the zero-curvature representations \eqref{zcr-iso} and \eqref{zcr-non}
we can derive the following relations:
\begin{lemma}\label{lem-1}
The isospectral flows $\{K^{(m)}\}$ and non-isospectral flows $\{\sigma^{(m)}\}$ satisfy
\begin{subequations}
\label{alg-flow}
\begin{align}
&U_{n}^{\prime}[[\![ K^{(m)},
K^{(s)}]\!] ]=(E<G_{n}^{(m)}, G_{n}^{(s)}>)U_{n}-U_{n}<G_{n}^{(m)}, G_{n}^{(s)}>,\label{45a}\\
&U_{n}^{\prime}[[\![ K^{(m)},
\sigma^{(s)}]\!] ]=(E<G_{n}^{(m)}, W_{n}^{(s)}>)U_{n}-U_{n}<G_{n}^{(m)}, W_{n}^{(s)}>,\label{45b}\\
&U_{n}^{\prime}[[\![ \sigma^{(m)},
\sigma^{(s)}]\!] ]=(E<W_{n}^{(m)},
W_{n}^{(s)}>)U_{n}-U_{n}<W_{n}^{(m)},
W_{n}^{(s)}>-\frac{1}{2}(m-s)U_{n, z}z^{2(m+s)+1},\label{45c}
\end{align}
\end{subequations}
where
\begin{subequations}\label{46}
\begin{align}
& <G_{n}^{(m)},
G_{n}^{(s)}>=G_{n}^{(m)\prime}[K^{(s)}]-G_{n}^{(s)\prime}[K^{(m)}]+[G_{n}^{(m)},
G_{n}^{(s)}],\label{46a}\\
& <G_{n}^{(m)},
W_{n}^{(s)}>=G_{n}^{(m)\prime}[\sigma^{(s)}]-W_{n}^{(s)\prime}[K^{(m)}]+[G_{n}^{(m)},
W_{n}^{(s)}]+\frac{1}{2}G_{n, z}^{(m)}z^{2s+1},\label{46b}\\
&<W_{n}^{(m)},
W_{n}^{(s)}>=W_{n}^{(m)\prime}[\sigma^{(s)}]-W_{n}^{(s)\prime}[\sigma^{(m)}]+[W_{n}^{(m)},
W_{n}^{(s)}]+\frac{1}{2}W_{n, z}^{(m)}z^{2s+1}-\frac{1}{2}W_{n,
z}^{(s)}z^{2m+1}.\label{46c}
\end{align}
\end{subequations}
\end{lemma}
This lemma can be proved via a similar procedure as in \cite{ZDJ-06-PLA}
and here we skip the proof.
Based on the lemma we then come up with a algebra for the flows $\{K^{(m)}\}$ and $\{\sigma^{(m)}\}$ (cf.\cite{ZDJ-06-PLA}).
\begin{lemma}
\label{lem-2}
The isospectral and non-isospectral flows, $\{K^{(m)}\}$ and
$\{\sigma^{(m)}\}$, compose an
infinite-dimensional Lie algebra $\mathcal{F}$ through the Lie
product $[\![ \cdot, \cdot ]\!] $ and possess the following relations
\begin{subequations}
\label{47}
\begin{align}
[\![ K^{(m)}, K^{(s)}]\!] & =0, \\
[\![ K^{(m)},\sigma^{(s)}]\!] &=mK^{(m+s)}, \\
[\![ \sigma^{(m)},\sigma^{(s)}]\!] &=(m-s)\sigma^{(m+s)}.
\end{align}
\end{subequations}
\end{lemma}
\subsection{Symmetries for the isospectral and non-isospectral AL hierarchies}
\begin{theorem}
\label{T-4.1}
Any given member $u_{n, t_m}=K^{(m)}$ in the isospectral four-potential AL hierarchy \eqref{hie-iso}
possesses the following two sets of symmetries, i.e.,
\begin{equation}
\label{sym-iso} \{K^{(s)}\} ~~~\mathrm{and} ~~~ \{\tau^{(m,
s)}=m t_m K^{(m+s)}+\sigma^{(s)}\}, ~~~ s\in \mathbb{Z}.
\end{equation}
These symmetries form a centerless Kac-Moody-Virasoro (KMV) algebra
$\mathcal{S}$ with the following structure
\begin{subequations}
\label{alg-sym-iso}
\begin{align}
[\![ K^{(l)}, K^{(s)}]\!] &=0,\\
[\![ K^{(l)}, \tau^{(m,s)}]\!] &=lK^{(l+s)},\\
[\![ \tau^{(m, s)}, \tau^{(m, l)} ]\!] &=(s-l)\tau^{(m, s+l)}.
\end{align}
\end{subequations}
\end{theorem}
Obviously, the Lie algebras $\mathcal{F}$ and $\mathcal{S}$ are
respectively generated by the following elements
\begin{align}
&\{\sigma^{(1)}~~ (\mathrm{or} ~\sigma^{(-1)}),~~~~ \sigma^{(2)},~~~~
\sigma^{(-2)},~~~~ K^{(1)}~~
(\mathrm{or} ~K^{(-1)})\};\label{51}\\
&\{\tau^{(m, 1)}~~ (\mathrm{or} ~\tau^{(m, -1)}),~~~~\tau^{(m, 2)},~~~~
\tau^{(m, -2)}, ~~~~K^{(1)}~~ (\mathrm{or}~ K^{(-1)})\}.\label{52}
\end{align}
\begin{theorem}
\label{T-4.2} Any given member $u_{n, t_m}=\sigma^{(m)}$ in the
non-isospectral four-potential AL hierarchy \eqref{hie-non} has two
sets of symmetries, i.e.,
\begin{subequations}
\label{sym-non}
\begin{align}
&\eta^{(m, s)}=\sum^{s}_{j=0}C_{s}^{j}(m t_m)^{s-j}\sigma^{(m-jm)}~~~~(s=0, 1, 2, \cdots),\label{56}\\
&\gamma^{(m, s)}
=\sum^{s}_{j=0}C_{s}^{j}(m t_m)^{s-j}K^{(-jm)}~~~~(s=0, 1, 2,
\cdots),\label{57}
\end{align}
\end{subequations}
which we call $\eta$-symmetries and $\gamma$-symmetries,
respectively. Here $C^{j}_{s}=\frac{s!}{j!(s-j)!} $. These
symmetries form a centerless KMV algebra ${\mathcal{H}}$ with the
following structure
\begin{subequations}
\label{alg-sym-non}
\begin{align}
[\![ \eta^{(m, s)}, \eta^{(m, l)}]\!] & =(l-s)m \eta^{(m, s+l-1)},\label{58}\\
[\![ \gamma^{(m, s)}, \gamma^{(m, l)}]\!] & =0,\label{59}\\
[\![ \eta^{(m, s)}, \gamma^{(m, l)}]\!] &
=lm\gamma^{(m,s+l-1)}.\label{60}
\end{align}
\end{subequations}
The algebra can be generated by
\begin{equation}
\label{61}
\{\eta^{(m, 0)},~~~~ \eta^{(m, 3)}, ~~~~\gamma^{(m, 1)}\}.
\end{equation}
\end{theorem}
\subsection{Relations between the recursion operator and flows}
\begin{theorem}
The isospectral flows $\{K^{(m)}\}$, non-isospectral flows $\{\sigma^{(m)}\}$ and recursion operator $L$
satisfy the relations
\begin{align}
&L^{\prime}[K^{(m)}]-[K^{(m)\prime}, L]=0,\label{53}\\
&L^{\prime}[\sigma^{(m)}]-[\sigma^{(m)\prime},
L]-L^{m+1}=0,\label{54}
\end{align}
where $m\in\mathbb{Z}$ and $[A,B]=AB-BA$.
\end{theorem}
Utilizing the relations one can also derive symmetries \eqref{sym-iso} and their algebra structure \eqref{alg-sym-iso}
by means of inductive approach, as in \cite{Tian-book-90}.
In that way the non-isospectral flow $\sigma^{(0)}$ will play the role of a master symmetry \cite{Fuchssteiner-1983}.
\section{Reduction to two-potential case}
It is possible to reduce isospectral, non-isospectral AL hierarchies and their symmetries
from four-potential case to two-potential case.
\subsection{Reduction of $(S_n,T_n)=(0,0)$}
\subsubsection{Spectral problem}
Let us start from the four-potential AL-spectral problem \eqref{4p-new}.
Taking $(S_n,T_n)=(0,0)$ in \eqref{4p-new} yields
\begin{equation}
\Phi_{n+1}=\overline{U}_n\Phi_{n},~~~
\overline{U}_n=\left(
\begin{array}{cc} z^{2} & zQ_{n}\\
z^{-1}R_{n} & z^{-2}
\end{array}\right),~~~\Phi_{n}=\left(
\begin{array}{cc} \phi_{1,n}\\
\phi_{2,n}
\end{array}\right),
\label{2p-AL-new}
\end{equation}
but this is not the canonical form \eqref{2p-AL} yet.
Next we introduce a gauge transformation
\begin{equation}
\label{gauge}
\Psi_{n}=\left(
\begin{array}{cc} z^{-1} & 0\\
0 & 1
\end{array}\right)\Phi_n,~~~\Psi_{n}=\left(
\begin{array}{cc} \psi_{1,n}\\
\psi_{2,n}
\end{array}\right),
\end{equation}
under which \eqref{2p-AL-new} becomes
\begin{equation}
\Psi_{n+1}=M_n\Psi_{n},~~~
M_n=\left(
\begin{array}{cc} z^{2} & Q_{n}\\
R_{n} & z^{-2}
\end{array}\right),
\label{2p-AL-can}
\end{equation}
which is the canonical form \eqref{2p-AL} in the light of $z^2=\lambda$.
\subsubsection{Closeness discussion}
For convenience we introduce some notations. Let
\begin{equation}
\overline{u}_{n}=\left(
\begin{array}{c} Q_{n}\\R_{n}
\end{array}\right),~~
{\overline{K}}^{(m)}=\left(
\begin{array}{c} K_1^{(m)}\[\![_2^{(m)}
\end{array}\right)_{(S_n,T_n)=(0,0)},~~
\overline{\sigma}^{(m)}=\left(
\begin{array}{c} \sigma_1^{(m)}\\ \sigma_2^{(m)}
\end{array}\right)_{(S_n,T_n)=(0,0)},~~
\end{equation}
where $K_j^{(m)}$ and $\sigma_j^{(m)}$ are the $j$-th elements of $K^{(m)}$ and $\sigma^{(m)}$.
Noting that the reduction $(S_n,T_n)=(0,0)$ yields
$u_n|_{(S_n,T_n)=(0,0)}=(\overline{u}_{n}^T,0,0)^T$, it is necessary
to discuss whether the reduction is closed, i.e., whether
\begin{equation}
{K}^{(m)}\bigr|_{(S_n,T_n)=(0,0)}=\left(
\begin{array}{c} \overline{K}^{(m)}\\0\\0
\end{array}\right),~~
\sigma^{(m)}\bigr|_{(S_n,T_n)=(0,0)}=\left(
\begin{array}{c} \overline{\sigma}^{(m)}\\0\\0
\end{array}\right).
\end{equation}
Next we will see this is true for $m=2h, h\in \mathbb{Z}$.
\begin{theorem}
\label{T-reduce}
Under the reduction $(S_n,T_n)=(0,0)$, those even order members in the four-potential AL hierarchies
\eqref{hie-iso} and \eqref{hie-non} reduce to two-potential isospectral AL-hierarchies
\begin{equation}
\label{hie-iso-2p}
\overline{u}_{n, t_{2h}}=\overline{K}^{(2h)}=\overline{L}^{h}\overline{K}^{(0)}, \qquad h\in \mathbb{Z}
\end{equation}
and
\begin{equation}
\label{hie-non-2p}
\overline{u}_{n, t_{2h}}=\overline{\sigma}^{(2h)}=\overline{L}^{h}\overline{\sigma}^{(0)}, \qquad h\in \mathbb{Z},
\end{equation}
where
\begin{equation}
\overline{K}^{(0)}=(Q_{n},
-R_{n})^T,~~~\overline{\sigma}^{(0)}=(2n+\frac{1}{2})(Q_{n},
-R_{n})^T, \label{K0-sigma0-2p}
\end{equation}
and the recursion operator $\overline{L}$ is
\begin{equation}
\overline{L}=\!\!\biggl(
\begin{array}{cc} E& 0\\0 & E^{-1}
\end{array}\biggr)\!+\left(
\begin{array}{cc} -Q_{n}E\\R_{n}
\end{array}\right)\!(E-1)^{-1}(R_{n}E, Q_{n}E^{-1})+\gamma_{n}^{2}\left(
\begin{array}{cc} -Q_{n+1} E\\R_{n-1}
\end{array}\right)\!(E-1)^{-1}\biggl(\frac{R_{n}}{\gamma_{n}^{2}},
\frac{Q_{n}}{\gamma_{n}^{2}}\biggr).
\end{equation}
\end{theorem}
\begin{proof}
First, look at \eqref{K0} and \eqref{sigma0}. When $m=0$, the reduction $(S_n,T_n)=(0,0)$ is obviously closed and the resulting flows
$\overline{K}^{(0)}$ and $\overline{\sigma}^{(0)}$
are as given in \eqref{K0-sigma0-2p}. Meanwhile, by direct calculation
we find
\begin{align*}
L^2\bigr|_{(S_n,T_n)=(0,0)}= \left(
\begin{array}{cc} \overline{L}& \mathbf{0}\\
\mathbf{0} & H
\end{array}\right),
\end{align*}
where
\begin{align*}
H=\left(
\begin{array}{cc} \gamma_{n+1}^{2}E & -Q_{n+1}^{2} \\
R_{n}^{2} &
(1+Q_{n}R_{n})E^{-1}
\end{array}\right)-2\left(
\begin{array}{cc} Q_{n+1}E \\
-R_{n}\end{array}\right)(E-1)^{-1}(R_{n}, Q_{n}E^{-1}).
\end{align*}
That means the closeness is valid for those even order members of four-potential AL hierarchies
\eqref{hie-iso} and \eqref{hie-non} when we take $(S_n,T_n)=(0,0)$.
Thus the proof is completed.
\end{proof}
\subsubsection{Flows under gauge transformation}
With the closeness in hand, let us make a comparison for the present results and those in \cite{ZDJ-06-PLA}.
Isospectral flows and recursion operator are exactly same but non-isospectral flows are different.
The basic non-isospectral flow $\widehat{\sigma}^{(0)}$ given in \cite{ZDJ-06-PLA} is
\begin{equation}
\widehat{\sigma}^{(0)}=(2n+1)(Q_{n}, -R_{n})^T,
\label{sigma0-2p-PLA}
\end{equation}
and the difference from $\overline{\sigma}^{(0)}$ is
\begin{equation}
\overline{\sigma}^{(0)}-\widehat{\sigma}^{(0)}=-\frac{1}{2}(Q_{n},
-R_{n})^T.
\label{sigma0-2p-dif}
\end{equation}
To understand the difference on non-isospectral flows, we go back to the gauge transformation \eqref{gauge}.
Obviously, the Lax pair of $\overline{u}_{n, t_{0}}=\overline{\sigma}^{(0)}$
is composed by \eqref{2p-AL-new} and
\begin{equation}
\label{2p-new-time}
\Phi_{n,t_0}=\overline{V}^{(0)}_{n}\Phi_{n}, \qquad \overline{V}^{(0)}_n={V}^{(0)}_n |_{(S_n,T_n)=(0,0)},
\end{equation}
of which the zero-curvature equation reads
\begin{equation}
\label{2p-new-zce}
\overline{U}_{n,
t_0}=(E\overline{V}^{(0)}_{n})\overline{U}_{n}-\overline{U}_{n}\overline{V}^{(0)}_{n}.
\end{equation}
Under the gauge transformation \eqref{gauge}, \eqref{2p-new-time} turns out to be
\begin{equation}
\Psi_{n, t_0}=\widehat{V}^{(0)}_{n}\Psi_{n}
\end{equation}
where
\begin{equation}
\widehat{V}^{(0)}_{n}=\widehat{N}^{(0)}_{n}-
\left(\begin{array}{cc} z^{-1}z_{t}& 0\\
0 & 0
\end{array}\right),
~~z_t=\frac{z}{2},~~
\widehat{N}^{(0)}_{n}=\left(
\begin{array}{cc} z^{-1}& 0\\
0 & 1
\end{array}\right)
\overline{V}^{(0)}_{n}\left(
\begin{array}{cc} z& 0\\
0 & 1
\end{array}\right).
\end{equation}
Meanwhile the
zero-curvature equation \eqref{2p-new-zce} is transformed to
\begin{equation}
M_{n, t_0}-(E\widehat{V}^{(0)}_{n})M_{n}+M_{n}\widehat{V}^{(0)}_{n}=0,
\end{equation}
i.e.,
\begin{equation}
M_{n, t_0}-(E\widehat{N}^{(0)}_{n})M_{n}+M_{n}\widehat{N}^{(0)}_{n}
=\frac{1}{2}\left(
\begin{array}{cc} 0& -Q_{n}\\
R_{n} & 0
\end{array}\right).
\label{zce-gu-tr}
\end{equation}
Compared with \cite{ZDJ-06-PLA} we find that
the l.h.s. of \eqref{zce-gu-tr} equating to zero yields the non-isospectral equation
$\overline{u}_{n,t_0}=\widehat{\sigma}^{(0)}$ while
the r.h.s. just gives the difference \eqref{sigma0-2p-dif}.
Now we can see that it is just the time-dependent multiplier
$\left(
\begin{smallmatrix} z^{-1}& 0\\
0 & 1
\end{smallmatrix}\right)$
in the transformation \eqref{gauge} contributes the extra term \eqref{sigma0-2p-dif} for the
non-isospectral flow $\overline{\sigma}^{(0)}$,
but in isospectral case this multiplier adds nothing new since in this turn $z$ is independent of time.
However, the extra term in \eqref{sigma0-2p-dif} is nothing but $\frac{1}{2}\overline{K}^{(0)}$,
which means the obtained non-isospectral flow $\overline{\sigma}^{(2h)}$
is only a summation of $\widehat{\sigma}^{(h)}$ and $-\frac{1}{2}\overline{K}^{(2h)}$,
where $\widehat{\sigma}^{(h)}=\overline{L}^{h}\widehat{\sigma}^{(0)}$
is the non-isospectral flow derived in \cite{ZDJ-06-PLA}.
\subsection{Symmetries}
The closeness also guarantees reduction of symmetries and their algebraic structures.
From Theorem \ref{T-4.1} and \ref{T-4.2} one can directly have
\begin{theorem}
\label{T-5.2}
Any given member $\overline{u}_{n, t_{2h}}=\overline{K}^{(2h)}$ in the isospectral two-potential AL hierarchy \eqref{hie-iso-2p}
can have two sets of symmetries
\begin{equation}
\label{sym-iso-2p}
\{\overline{K}^{(2s)}\} ~~~\mathrm{and} ~~~
\{\overline{\tau}^{(2h,
2s)}=2ht_{2h}\overline{K}^{(2h+2s)}+\overline{\sigma}^{(2s)}\}, ~~~
s\in \mathbb{Z},
\end{equation}
which form a centerless KMV algebra $\overline{\mathcal{S}}$ with
structure
\begin{subequations}
\label{alg-sym-iso-2p}
\begin{align}
[\![ \overline{K}^{(2l)}, \overline{K}^{(2s)}]\!] &=0,\\
[\![ \overline{K}^{(2l)}, \overline{\tau}^{(2h,2s)}]\!] &=2l\overline{K}^{(2l+2s)},\\
[\![ \overline{\tau}^{(2h, 2s)}, \overline{\tau}^{(2h, 2l)}
]\!] &=2(s-l)\overline{\tau}^{(2h, 2s+2l)},
\end{align}
\end{subequations}
and generators
\begin{equation}
\{\overline{\tau}^{(2h, 2)}~~ (\mathrm{or} ~\overline{\tau}^{(2h,
-2)}),~~~~\overline{\tau}^{(2h, 4)},~~~~ \overline{\tau}^{(2h,
-4)}, ~~~~\overline{K}^{(2)}~~ (\mathrm{or}~ \overline{K}^{(-2)})\}.
\end{equation}
\end{theorem}
\begin{theorem}
\label{T-5.3}
Any given member $\overline{u}_{n, t_{2h}}=\overline{\sigma}^{(2h)}$ in the non-isospectral two-potential AL hierarchy \eqref{hie-non-2p}
can have two sets of symmetries
\begin{subequations}
\label{sym-non-2p}
\begin{align}
&\overline{\eta}^{(2h, s)}=\sum^{s}_{j=0}C_{s}^{j}(2ht_{2h})^{s-j}\overline{\sigma}^{(2h-2jh)}~~~~(s=0, 1, 2, \cdots),\label{56}\\
&\overline{\gamma}^{(2h, s)}
=\sum^{s}_{j=0}C_{s}^{j}(2ht_{2h})^{s-j}\overline{K}^{(-2jh)}~~~~(s=0,
1, 2, \cdots),\label{57}
\end{align}
\end{subequations}
which form a centerless KMV algebra $\overline{\mathcal{H}}$ with
structure
\begin{subequations}
\label{alg-sym-non}
\begin{align}
[\![ \overline{\eta}^{(2h, s)}, \overline{\eta}^{(2h, l)}]\!] & =2(l-s)h \overline{\eta}^{(2h, s+l-1)},\label{58}\\
[\![ \overline{\gamma}^{(2h, s)}, \overline{\gamma}^{(2h, l)}]\!] & =0,\label{59}\\
[\![ \overline{\eta}^{(2h, s)}, \overline{\gamma}^{(2h,
l)}]\!] & =2lh\overline{\gamma}^{(2h,s+l-1)},\label{60}
\end{align}
\end{subequations}
and generators
\begin{equation}
\{\overline{\eta}^{(2h, 0)},~~~~ \overline{\eta}^{(2h, 3)},
~~~~\overline{\gamma}^{(2h, 1)}\}.
\end{equation}
\end{theorem}
\section{Conclusions}
We have derived isospectral and non-isospectral four-potential AL
hierarchies and their recursion operator. Both hierarchies have been
shown to have infinitely many symmetries and Lie algebras which
belong to centerless KMV algebras, respectively. These structures
are derived by means of zero-curvature representations of flows.
What is special is that these structures cover both positive and
negative order four-potential AL hierarchies, and each member in the
non-isospectral AL hierarchy also possesses two sets of symmetries
which compose a centerless KMV algebra too. These two points make
the algebraic structure of the AL hierarchies quite different from
many other Lax integrable systems. This is due to the symmetrical
form (in terms of $z$ and $z^{-1}$) of the AL spectral matrix (see
\eqref{4p-new}). Besides, it is now clear for the relation between
four-potential and two-potential AL hierarchies. All the even order
members in the four-potential isospectral and non-isospectral
hierarchies can be reduced to two-potential case by directly taking
$(S_n,T_n)=(0,0)$. The new recursion operator for two-potential case
is $L^2$. Meanwhile, the reduction keeps the algebraic structures of
symmetries invariant. The procedure and main results are possible to
apply to Ablowitz-Ladik systems with vector potentials and this will
be discussed elsewhere. This paper will be continued by the second
part in which we will discuss symmetries and recursion operator for
the integrable discrete nonlinear Schr\"odinger equation and the
discrete AKNS hierarchy.
\vspace{1cm}
\section*{Acknowledgement}
This project is supported by the National Natural Science Foundation
of China (10671121) and Shanghai Leading Academic Discipline
Project (No.J50101).
{\small
|
1,314,259,995,003 | arxiv | \section{INTRODUCTION}
The symmetry of the superconducting order parameter
in the cuprate high-$T_c$ superconductors had been
controversial\cite{Levi} before
phase-sensitive experiments firmly established
the $d_{x^2-y^2}$-wave pairing symmetry in YBa$_2$Cu$_3$O$_7$,
using tricrystal ring magnetometry\cite{tricrystal},
SQUID interferometry\cite{SQUID}, and single-junction
modulation\cite{junction}.
Migdal-Eliashberg-type diagrammatic theories find
$d$-wave pairing to be favored by antiferromagnetic (AF)
spin-fluctuation exchange\cite{Scalapino} and
$s$-wave pairing by the conventional electron-phonon
mechanism.\cite{Scalapino,Pao}
There is indeed strong experimental evidence for
the importance of {\em both \/} AF spin correlations\cite{afm_exp}
{\em and \/} electron-phonon interactions\cite{phonon_exp}
in the cuprates.
However, when combined in the diagrammatic approach,
the two mechanisms are mutually destructive, since
$d$-wave pairing is strongly suppressed
by phonons and $s$-wave pairing is suppressed
by AF spin fluctuations, respectively.
Also, the magnitude of the observed isotope effect in cuprate systems
away from ``optimal'' doping\cite{isotope_exp} points towards
an unusually strong electron-phonon effect which cannot be
accounted for in the diagrammatic approaches.\cite{Pao}
Strong-coupling studies,\cite{YBL,Zhong,Roeder,recent_EP}
going beyond the Migdal-Eliashberg regime, suggest that
the AF spin correlations themselves can effectively
enhance the electron-phonon effect, by lowering
the electron-phonon coupling threshold for polaron formation,
that is, the threshold for electron-phonon induced
self-localization\cite{Holstein} of the dopant-induced carriers
in the CuO$_2$ planes.
In the present paper, we show how the tunneling dynamics
of such self-localized holes in an AF correlated spin background
may lead to $d$- and other non-$s$-wave pairing states which
are {\it not} suppressed by coupling
to the lattice degrees of freedom.
A Berry phase factor in finite systems with time-reversal symmetry
has been relevant to the observation of half-odd-integer quantum
numbers in the spectrum of the Na$_3$ molecule,\cite{Delacretaz},
to the cross section of the H+H$_2$ reaction and its isotope
analogs,\cite{Wu} and to the problem of integer vs. half-odd-integer
spin tunneling in anisotropic potentials.\cite{Loss}
Contributions to the pair binding energy in the C$_{60}$ molecule
have also been discussed in terms of Berry phase
arguments.\cite{Auerbach}
In the present case, the non-$s$-wave symmetry is caused
by a ($-1$) Berry phase factor, associated with predominantly
second- and third-neighbor polaron tunneling processes.
It also determines the total momentum:
the one-polaron ground state has a momentum on the Fermi surface
of the half-filled tight-binding model on the square lattice.
The dynamics of few hole polarons reflects
the local AF spin correlations of many electrons
through the Berry phase factor.
This paper is organized as follows:
In Sec.~\ref{sec:model}, we introduce the basic
Holstein-Hubbard and Holstein-$ tJ $ model
Hamiltonians, and their extensions to include
2nd neighbor hopping or long-range Coulomb repulsion.
We then derive the effective action for the lattice
degrees of freedom in the adiabatic approximation.
In Sec.~\ref{sec:two-site}, we illustrate the basic physical
principles and formal concepts
of our adiabatic treatment of the polaron tunneling
in the context of a simple two-site model.
In Sec.~\ref{sec:validity}, we discuss the conditions
under which the adiabatic approximation is valid,
as well as its limitations when applied to polaronic
systems on large / macroscopic lattice systems.
In particular, we clear up some recent misunderstandings
concerning the applicability of the adiabatic approach to
polaronic systems.
In Sec.~\ref{sec:instanton}, we use an instanton approach
to elucidate the basic structure of the
low-energy tunneling dynamics of hole polarons in
Holstein-Hubbard or Holstein-$ tJ $ systems near half-filling.
We show that the dynamics of such hole polarons
is governed by an effective tight-binding Hamiltonian
which includes 2nd and 3rd neighbor hopping matrix elements
and a 1st neighbor attraction.
In Sec.~\ref{sec:symmetry}, we discuss the Berry phase factors
and, with the help of lattice symmetry operations, we
show how such phases can be properly assigned to
each segment of a closed tunneling path.
The Berry phase factors are then interpreted in
terms a quasiparticle statistics
and internal symmetries of
the many-electron wave functions.
In Sec.~\ref{sec:phase}, we analytically solve the effective model
to show how the Berry phase factors determine the total momenta and
internal symmetries of the few-hole-polaron wave functions.
In Sec.~\ref{sec:effective}, we report numerical results
for the effective polaron hopping and
effective pair binding energy as functions of the phonon frequency
and electron-phonon coupling strength.
In Sec.~\ref{sec:p_liquid}, we discuss
the implications
of our numerical results for a possible superconducting
pairing instability, the isotope effect and the
pseudo-gap in a hole polaron liquid at finite
doping concentration in the nearly half-filled
Holstein-$ tJ $ and -Hubbard systems and
compare to experimental observations in the
cuprates.
In Sec.~\ref{sec:summary}, we summarize the present work.
Part of the results presented in this paper were reported briefly
in an unpublished paper and proceedings.\cite{YZS}
\section{MODEL AND EFFECTIVE ACTION}
\label{sec:model}
We use mainly the Holstein-$ tJ $ model\cite{Zhong,Roeder}
and occasionally the Holstein-Hubbard model for comparison.
Later, we also include 2nd neighbor electron hopping
and/or long-range electron-electron repulsion terms in the model.
The total Hamiltonian is of the general form
\begin{equation}
H = H_{\rm e} + H_{\rm e-ph} + H_{\rm ph}
\;,\label{eq:model}
\end{equation}
where $H_{\rm e}$ is the purely electronic $ tJ $ or Hubbard model
part, defined on a two-dimensional (2D) square lattice with lattice
sites $j = 1 \dots N$ and on-site electron occupation numbers $n_j$,
as specified below.
\begin{equation}
H_{\rm e-ph}=C \sum_j u_j n_j
\label{eq:h_elph}
\end{equation}
is the Holstein electron-phonon (EP) interaction, coupling
the local oscillator displacement $u_j$ to the electron
on-site occupation $n_j$ with an EP coupling constant $C$ and
\begin{equation}
H_{\rm ph}= \frac{K}{2} \sum_j u_j^2
+ \frac{1}{2M} \sum_j p_j^2
\equiv H_{\rm K} + H_{\rm M}
\label{eq:h_phon}
\end{equation}
describes the non-interacting Einstein phonon system,
consisting of the bare harmonic lattice potential $H_{\rm K}$,
with restoring force constant $K$, and of the lattice kinetic
energy $H_{\rm M}$ with an atomic mass $M$ and
conjugate momenta $ p_j \equiv -i \hbar \partial /
\partial u_j $. If we rescale to dimensionless
displacements and conjugate momenta
\begin{equation}
\bar{u}_j \equiv u_j / u_{\rm P},\;\;\;\;\;
\bar{p}_j\equiv -i \partial / \partial \bar{u}_j
\;,
\label{eq:u_bar}
\end{equation}
with the small polaron shift
\begin{equation}
u_{\rm P} \equiv \frac{C}{K}
\;,
\label{eq:u_pol}
\end{equation}
then $H_{\rm e-ph}$ and $H_{\rm ph}$
can be completely parametrized in terms of only two
characteristic energies, the bare Einstein phonon energy
\begin{equation}
\Omega \equiv \hbar \Big(\frac{K}{M}\Big)^{\frac12}
\;,
\label{eq:omega}
\end{equation}
and the ionic-limit ($t\to0$)
small polaron binding energy
\begin{equation}
E_{\rm P}\equiv \frac{C^2} {K}
\;.
\label{eq:e_pol}
\end{equation}
All results in the following are therefore stated in terms
of $u_{\rm P}$, $\Omega$ and $E_{\rm P}$ only \cite{Zhong,Holstein}.
The $ tJ $ model is written as\cite{Anderson}
\begin{equation}
H_{\rm e} = -t \sum_{\langle i,j \rangle, \sigma} \left(
c_{i \sigma}^\dagger c_{j \sigma} + {\rm H.c.} \right)
+J \sum_{\langle i,j \rangle} \left(
{\bf S}_i \cdot {\bf S}_j - \frac{n_i n_j}{4} \right)
\label{eq:h_tj}
\end{equation}
with 1st neighbor electron hopping $t$
and AF exchange coupling $J$. Here, $c_{i \sigma}$ annihilates
an electron with spin $ \sigma $ at site $i$,
$ n_{i \sigma} = c_{i \sigma}^\dagger c_{i \sigma} $,
$ n_i = \sum_\sigma n_{i \sigma} $,
$ {\bf S}_i = \frac12 \sum_{\alpha, \beta}
c_{i \alpha}^\dagger {\bf \sigma}_{\alpha \beta} c_{i \beta} $
with ${\bf \sigma}\equiv(\sigma_x,\sigma_y,\sigma_z)$ denoting the
vector of Pauli spin matrices.
The Hilbert space is restricted to states with no double
occupancy at any site $j$, i.e., $n_j=0,1$ only.
The Hubbard model is written as
\begin{equation}
H_{\rm e} = -t \sum_{\langle i,j \rangle, \sigma} \left(
c_{i \sigma}^\dagger c_{j \sigma} + {\rm H.c.} \right)
+U \sum_i n_{i \uparrow} n_{i \downarrow}
\label{eq:h_hub}
\end{equation}
with on-site repulsion $U$ and no restrictions on the
on-site occupancy, i.e., $n_j=0,1,2$.
In the following, we set $ \hbar \equiv 1 $, $ t \equiv 1 $ and use
$ J = 0.5t $ or $ U = 8t $ in the $ tJ $ or Hubbard model,
respectively, unless stated otherwise.
In addition to the standard $ tJ $ and Hubbard electronic model,
we will also study the effects of additional, potentially
important electronic terms, the 2nd neighbor hopping $H_{t'}$
and the long-range Coulomb repulsion $H_{\rm lc}$. Namely,
\begin{equation}
H_{t'}=
-t'\sum_{\{ i,j \}, \sigma} \left(
c_{i \sigma}^\dagger c_{j \sigma} + {\rm H.c.} \right)
\label{eq:h_t2}
\end{equation}
where $\{ i,j \}$ denotes 2nd neighbor bonds and $t'$ is the
corresponding 2nd neighbor matrix element.
The long-range $1/r$ Coulomb repulsion is
\begin{equation}
H_{\rm lc}= \frac12
V_{\rm C} \sum_{i\neq j} \frac{ n_i n_j }{ | r_{ij} | }
\label{eq:h_lc}
\end{equation}
where $i$ and $j$ are summed independently over all sites
excluding $ i = j $ and $r_{ij}$ denotes the vector pointing
from $i$ to $j$, measured in units of the 2D lattice constant
$a\equiv 1$. On a lattice with periodic boundary conditions
we make the definition of $| r_{ij}|$ unique by requiring
$r_{ij}$ to be a vector of the shortest possible length
connecting $i$ to $j$, subject to all possible periodic boundary
shifts.
The matrix element $V_{\rm C}$ is thus the Coulomb repulsion
energy between two electrons at 1st neighbor distance.
To study the tunneling dynamics of self-localized holes,
we consider the path integrals for transition amplitudes
in imaginary time in the Born-Oppenheimer (adiabatic)
approximation. Following the standard Feynman-Trotter
approach \cite{Feynman-Hibbs}, we break up the Hamiltonian
in the imaginary-time evolution operator
\begin{equation}
e^{-\beta H}= \lim_{L\to\infty}
\Big( e^{-\Delta\tau H_0} e^{-\Delta\tau H_{\rm M}}\Big)^L
\label{eq:e_bh}
\end{equation}
where $\Delta\tau\equiv\beta/L$, $H_{\rm M}$ is the lattice
kinetic energy defined in Eq.~(\ref{eq:h_phon})
and the 0th order part $H_0\equiv H-H_{\rm M}$
commutes with all lattice displacement operators $u_j$.
At each time slice $\tau_k\equiv k\Delta\tau$, with $k=1\dots L$,
we now insert a complete set of electron-phonon basis states
$\mid\chi^{(\kappa)}_u\rangle$
which are chosen to be simultaneous eigenstates
of $H_0$ and of all $u_j$.
They can be written in the form
\begin{equation}
\mid\chi_u^{(\kappa)}\rangle = \mid\Psi^{(\kappa)}(u)\rangle
\times\mid \Phi_u\rangle
\label{eq:chi_u}
\end{equation}
where $\mid\Phi_u\rangle$ is the lattice part
and $\mid\Psi^{(\kappa)}(u)\rangle$
the electronic part of $\mid\chi_u^{(\kappa)}\rangle$.
Written in 1st quantized notation, the lattice part is simply
\begin{equation}
\Phi_u(x)=\delta(u-x) = \prod_j \delta(u_j-x_j)
\label{eq:phi_u}
\end{equation}
with lattice coordinate vectors
$x\equiv(x_1\dots x_N)$ and $u\equiv(u_1\dots u_N)$.
The electronic part $\mid\Psi^{(\kappa)}(u)\rangle$
denotes the $\kappa$-th electronic eigenstate
of the 0th order adiabatic Hamiltonian
\begin{equation}
H_0(u)= H_{\rm e} + H_{\rm e-ph}(u) + H_{\rm K}(u)
\;,
\label{eq:h_0}
\end{equation}
at fixed $u$. That is, $H_0(u)$ is
defined to act only on the electronic degrees of freedom
at {\it fixed} ($c$-number) lattice displacement coordinates
$u\equiv(u_1\dots u_N)$ and
\begin{equation}
H_0(u)\mid\Psi^{(\kappa)}(u)\rangle=
W_0^{(\kappa)}(u) \mid\Psi^{(\kappa)}(u)\rangle
\label{eq:w_0}
\end{equation}
where $\mid\Psi^{(\kappa)(u)}\rangle$ and its eigenenergy
$W_0^{(\kappa)}(u)$ depend parametrically on the lattice
displacements $u$.
The exact imaginary time evolution under $H$ can thus be represented
by a path integral with a Euclidean action, written at finite $L$ as
\begin{eqnarray}
S[u(\tau),\kappa(\tau)]&&= \sum_{k=1}^{L} \Big[
(M/2) \sum_j \frac{[ u_j(\tau_k) - u_j(\tau_{k-1}) ]^2}{\Delta \tau}
\nonumber \\
&&
+ \Delta \tau W_0^{(\kappa_k)}(u(\tau_k))
\nonumber \\
&&
- \ln \langle \Psi^{(\kappa_k)}(u(\tau_k)) \mid
\Psi^{(\kappa_{k-1})}(u(\tau_{k-1})) \rangle \Big]
\;.
\label{eq:s_exact}
\end{eqnarray}
The path integration is to be carried out both over the
continuous lattice coordinates
$u(\tau_k)\equiv(u_1(\tau_k)\dots u_N(\tau_k)$)
and over the discrete electronic quantum numbers
$\kappa_k\equiv\kappa(\tau_k)$.
In the 0th order adiabatic approximation, corresponding formally
to the $M\to\infty$ limit, one neglects the imaginary time
evolution of $u$ altogether and replaces $u_k$ by a $\tau$-independent
classical field. The 1st order adiabatic approximation
restores the $\tau$-dependence of the lattice coordinates $u$,
under the simplifying assumption that the electrons follow the
motion of the lattice adiabatically. That is, the path integration
is restricted to configurations where, during $\tau$-evolution,
the electrons remain in the same eigenstate, i.e.,
$\kappa_k=\kappa_{k-1}\equiv\kappa={\rm const}$. Transitions between
different electronic eigenstates $\kappa_k\neq\kappa_{k-1}$ are
neglected.
Formally, this approximation restores the
leading order $1/M$ corrections
to the lattice dynamics.
At sufficiently low temperatures, one restricts the path
integral further to include only the electronic groundstate
$\kappa=0$. Suppressing the $(\kappa)$-superscript altogether,
one then arrives at the standard 1st order adiabatic
(Born-Oppenheimer) approximation, with an effective Euclidean action
\begin{eqnarray}
S_{\rm Ad} [u(\tau)] &&= \sum_{k=1}^{L} \big[
\frac{M}{2}\sum_j
\frac{[ u_j(\tau_k) - u_j(\tau_{k-1}) ]^2} {\Delta \tau}
\nonumber \\
&& + \Delta \tau W_0(u(\tau_k))
\nonumber \\
&&- \ln \langle \Psi(u(\tau_k)) \mid \Psi(u(\tau_{k-1})) \rangle \big]
\;.
\label{eq:s_eff0}
\end{eqnarray}
Note that $S_{\rm Ad}$ depends explicitly only on the $u$-coordinates
of the lattice.
The first ($M/2$-) term is the
standard form of the lattice kinetic
energy for discretized imaginary time (finite $L$).
The electronic groundstate
energy $W_0(u)$ plays the role
of a 0th-order (in $1/M$) effective lattice potential energy.
The last term, containing the logarithms of the electronic groundstate
wavefunction overlaps at adjacent time slices during $\tau$-evolution,
contains the Berry phase and $1/M$ corrections to the potential
energy, as we will now discuss.
In $ \exp( -S_{\rm Ad} [u(\tau)] ) $, the overlap product
\begin{equation}
Q [u(\tau)] \equiv \prod_{k=1}^{L}
\langle \Psi(u(\tau_k)) \mid \Psi(u(\tau_{k-1})) \rangle
\label{eq:overlap}
\end{equation}
enters which contains the Berry phase factor,
\begin{equation}
\exp( -i \theta [u(\tau)] )
\equiv Q [u(\tau)] /\mid Q [u(\tau)] \mid
\;,
\end{equation}
{\it i.e.},
$
\theta [u(\tau)] = -
{\rm Im}\ln( Q [u(\tau)]).
$
Due to time-reversal symmetry, all
$\mid\Psi(u(\tau_k))\rangle$ have
real amplitudes in an appropriately chosen electron
basis and hence the phase factor is real:
$ \exp( -i \theta [u(\tau)] )
= {\rm sign}( Q [u(\tau)] ) $.
Taking $ L \rightarrow \infty $, we can also rewrite
Re$(\ln Q[u(\tau)]) \equiv \ln \mid Q[u(\tau)] \mid $ in
$ S_{\rm Ad} [u(\tau)] $
as a $ 1/M $ correction to the effective lattice potential
which thus becomes
\begin{equation}
W(u) \equiv W_0(u) + W_1(u)
\;,
\end{equation}
with $W_1$ given by
\begin{equation}
W_1(u) = \frac1{2M} \sum_j
\langle \partial_{u_j} \Psi(u) \mid \partial_{u_j} \Psi(u) \rangle
\;.
\label{eq:w_1}
\end{equation}
Thus, the effective action for $L\to\infty$ becomes
\begin{eqnarray}
S_{\rm Ad} [u(\tau)] &&= \sum_{k=1}^{L} \big[
\frac{M}{2}
\sum_j \frac{[ u_j(\tau_k) - u_j(\tau_{k-1}) ]^2}{\Delta \tau}
\nonumber \\
&&+ \Delta \tau W(u(\tau_k)) \big] + i \theta [u(\tau)]
\;.\label{eq:s_eff}
\end{eqnarray}
Equivalent results can be derived in the Hamiltonian
approach to the adiabatic approximation. The basic idea
here is to restrict the full electron-lattice Hilbert
space to an ``adiabatic'' subspace which is spanned
by the set of 0th order adiabatic
electron-lattice eigenstates
$\mid\chi_u^{(\kappa)}\rangle$ defined above in
Eqs.~(\ref{eq:chi_u})-(\ref{eq:w_0})
with $\kappa$ restricted to the electronic
groundstate $\kappa=0$. The adiabatic subspace
thus consists of EP states of the general form
\begin{equation}
\mid \phi \rangle = \int d^Nu \;\; \phi(u) \mid \Psi_u^{(0)} \rangle
\label{eq:phi_ad}
\end{equation}
where $\phi(u)$ is an arbitrary (square-integrable) wavefunction
amplitude which depends only on the lattice coordinates $u$.
The basic approximation step is then to
project the full EP Hamiltonian $H$ onto
the adiabatic subspace. In this manner one
arrives at a 1st order effective
Hamiltonian $H_{\rm Ad}$ which is mathematically equivalent
to the 1st order adiabatic Euclidean action $S_{\rm Ad}$
in (\ref{eq:s_eff}), after $L\to\infty$.
Since the adiabatic EP states $\mid \phi\rangle$
can be expressed entirely in terms of their
wavefunction amplitude $\phi(u)$, one can recast
$H_{\rm Ad}$ into the form of an effective Hamiltonian
acting only on the lattice coordinates $u$ in $\phi(u)$,
without explicit reference to the underlying electronic
groundstate wavefunction $\mid\Psi_u^{(0)}\rangle$
contained in $\mid\phi\rangle$.
However, it is crucial to keep in mind the
formal relationship (\ref{eq:phi_ad})
between the full adiabatic EP state $\mid \phi\rangle$
and its lattice wavefunction amplitude $\phi(u)$
if one wants to properly compare 1st order adiabatic results
to exact results, obtained by e.g. numerically diagonalizing
the full EP Hamiltonian on small model
clusters \cite{RaTh}.
In systems obeying standard harmonic lattice dynamics, the
0th order Born-Oppenheimer ``energy surface'' $W_0(u)$ exhibits
one unique global minimum configuration $u^{({\rm min})}$ which is,
in terms of energy or in terms of configurational distance,
well separated from other, if existent, local minima. In that case,
the path integral is dominated by small-amplitude ``harmonic''
fluctuations around $u^{({\rm min})}$ and a description of the
lattice dynamics in terms of renormalized harmonic oscillators,
i.e., phonons, remains valid.
Since the displacement excursions around $u^{({\rm min})}$
are small, so are the fluctuations in the electronic
wavefunction $\mid\Psi(u)\rangle$;
hence the small-amplitude (``phonon'') paths
all have $\theta[u(\tau)]=0$ and Berry phase
effects are negligible. Also, the $u$-derivatives
of $\mid\Psi(u)\rangle$ entering into $W_1$ are well behaved and
the $m$-th order $u$-derivatives of the overlap
matrix elements
$\langle \partial_{u_j} \Psi(u) \mid \partial_{u_j} \Psi(u) \rangle$
are typical of the order of inverse lattice constants
or inverse atomic distances raised to the $(m+2)$-th power.
The $W_1$-contribution to the harmonic restoring force constants,
for example, are thus smaller than the 0th order
$W_0$-contributions by factors
of order of the 4th power of the lattice oscillator
zero-point displacement amplitude over the lattice constant.
Thus, the electronic overlap factor effects
$W_1(u)$ and $\theta[u(\tau)]$ can be altogether neglected.
By contrast, in polaronic systems
the 0th order lattice potential $W_0$ exhibits a large number
of nearly degenerate local minima. The low-energy
lattice dynamics is dominated by
tunneling processes between the local minima
which requires anharmonic large-amplitude
excursions of the local displacement coordinates $u_j$
and large local rearrangements of the electronic wavefunction
$\mid\Psi(u)\rangle$ \cite{Zhong}.
In that case electronic overlap
effects arising from both $\theta[u(\tau)]$ and
$W_1(u)$ can become quite important.
\section{TWO-SITE PROBLEM}
\label{sec:two-site}
The two-site version of the Holstein-Hubbard model \cite{EmHo,capone}
(\ref{eq:model}--\ref{eq:h_phon})
is a simple toy problem which retains some essential physical
features of the lattice polaron problem. We will use
it here to elucidate the basic underlying physical ideas
and formal concepts of
our adiabatic treatment of polaron formation
and polaron tunneling
dynamics and, also, to
test the validity and illustrate some important limitations
of the adiabatic approximation.
We restrict ourselves to the single-electron case
on two sites, with an electronic intersite hybridization $t$.
Hence there are no correlation (Hubbard-$U$) effects
and the adiabatic electronic wavefunction $\mid\Psi(u)\rangle$
can be solved exactly by diagonalizing
$H_0(u)$ which reduces to a $2\times 2$ matrix.
The two sites are labeled $1$ and $2$ with on-site
oscillator coordinates $u_1$ and $u_2$ and on-site
electron occupation numbers $n_1$ and $n_2$.
Introducing symmetrized displacement coordinates
\begin{equation}
u_\pm=(u_1 \pm u_2)/\sqrt{2}
\;,
\label{eq:u_pm}
\end{equation}
$W_0$ and $W_1$ can be written as
\begin{equation}
W_0(u) \equiv W_{0+}(u_+) + W_{0-}(u_-) \;,
\label{eq:w_0-2s}
\end{equation}
where
\begin{eqnarray}
W_{0+}(u_+) &=& \frac{K}{2}u_+^2 + \frac{C}{\sqrt{2}}u_+
\nonumber \\ &=&\left[ \frac{1}{2} \Big(\frac{u_+}{u_{\rm P}}\Big)^2
+ \frac{1}{\sqrt{2}} \Big(\frac{u_+}{u_{\rm P}}\Big) \right]E_{\rm P}
\;,
\label{eq:w_0plus-2s}
\end{eqnarray}
\begin{eqnarray}
W_{0-}(u_-) &=& \frac{K}{2}u_-^2 -
\sqrt{ \frac{C^2u_-^2}{2}+t^2 }
\nonumber \\ &=&\left[ \frac{1}{2} \Big(\frac{u_-}{u_{\rm P}}\Big)^2
- \sqrt{ \frac{1}{2} \Big(\frac{u_-}{u_{\rm P}}\Big)^2
+\Big(\frac{t}{E_{\rm P}}\Big)^2 } \right]E_{\rm P}
\;,
\label{eq:w_0minus-2s}
\end{eqnarray}
and
\begin{equation}
W_1(u)\equiv W_{1-}(u_-)=
\frac{1}{4}\frac{\Omega^2}{E_{\rm P}}
\frac
{
\Big( \frac{t}{ E_{\rm P} } \Big)^2
}
{
\left[ \Big(\frac{u_-}{u_{\rm P}}\Big)^2
+2 \Big( \frac{t}{ E_{\rm P} } \Big)^2 \right]^2
}
\;.
\label{eq:w_1-2s}
\end{equation}
There is no Berry phase, i.e., $\theta[u(\tau)]\!\equiv\!0$, and the
problem becomes equivalent to solving the Hamiltonian dynamics of
a (fictitious) quantum particle of mass $M$
moving in a two-dimensional $(u_+,u_-)$-plane subject
to the effective potential $W(u)=W_0(u)+W_1(u)$.
Because of (\ref{eq:w_0-2s}) and (\ref{eq:w_1-2s})
this dynamics is separable when written in terms of
$u_+$- and $u_-$-coordinates.
Since $u_+$ couples only to the total electron charge
$n_+\equiv n_1+n_2$, which is conserved, the $W_{0+}$-part of
$W_0$ is just a harmonic potential, with its equilibrium position
shifted to
\begin{equation}
u_+^{(0)}= -u_{\rm P}/\sqrt{2}
\label{eq:u_+0}
\end{equation}
by the constant pulling force $Cn_+/\sqrt{2}$ exerted by the total
electron charge. Also, the electron groundstate
wavefunction $\mid\Psi(u)\rangle\equiv\mid\Psi(u_-)\rangle$,
and hence $W_1$, does not depend on $u_+$.
The dynamics of the $u_+$-coordinate is therefore trivial,
at least for processes conserving the total electron number.
Since $u_-$ couples to the charge imbalance
$n_-\equiv n_1-n_2$ between the two sites,
the shape of $W_{0-}$ is renormalized by the EP coupling
and $W_1$ contributes to the $u_-$-dynamics. We first consider
the 0th order contribution $W_{0-}$,
shown in Fig.~\ref{fig:double_well}(a)
for several $E_{\rm P}$-values.
For small $E_{\rm P}$, $W_{0-}$ retains a single global minimum at
$u_-=0$. Its harmonic restoring force
\begin{equation}
K_{0-} \equiv \frac{\partial^2}{\partial u_-^2} W_{0-}(u_-=0)
= \Big( 1 - \frac{E_{\rm P}}{2t} \Big) K
\label{eq:k_0-}
\end{equation}
softens with increasing $E_{\rm P}$ and changes sign
when $E_{\rm P}$ reaches a critical value
\begin{equation}
E_{\rm P}^{({\rm crit})} = 2 t
\;.
\label{eq:e_p-crit}
\end{equation}
For $E_{\rm P}>E_{\rm P}^{({\rm crit})}$, the character of $W_{0-}$
changes qualitatively: $W_{0-}$ acquires two degenerate minima
at $u_-=\pm u_{-}^{(0)}$, separated by a maximum at $u_-=0$, with
\begin{equation}
u_{-}^{(0)} = \sqrt{ 1 - \Big( \frac{2t}{E_{\rm P}}\Big)^2 }
\frac{u_{\rm P}}{\sqrt{2}}
\label{eq:u_-0}
\end{equation}
where $u_{\rm P}=C/K$ is the polaron shift (\ref{eq:u_pol}).
$u_{-}^{(0)}$ approaches $u_{\rm P}/\sqrt{2}$
in the strong-coupling limit
\begin{equation}
E_{\rm P}\gg t
\;.
\label{eq:strong_ep}
\end{equation}
The height of the 0th order potential
barrier separating the two minima,
\begin{equation}
\Delta_{B0} \equiv W_{0-}(0)-W_{0-}(\pm u_-^{(0)}) =
\Big( \frac{1}{2} - \frac{t}{E_{\rm P}} \Big)^2 E_{\rm P}
\label{eq:delta_b0}
\end{equation}
increases with $E_{\rm P}$ and approaches $ E_{\rm P}/4 $
in the strong-coupling limit (\ref{eq:strong_ep}).
The physical origin of the double-well potential can be
most easily understood starting from the ``ionic'' ($t=0$)
limit of the model:
For $t=0$, the two electronic eigenstates of $H_0(u)$,
\begin{equation}
\mid \Psi^{(\ell 1)} \rangle \equiv\mid n_1=1,\;\; n_2=0\rangle
\label{eq:psi-i}
\end{equation}
and
\begin{equation}
\mid\Psi^{(\ell 2)}\rangle \equiv\mid n_1=0,\;\; n_2=1\rangle
\label{eq:psi-ii}
\end{equation}
have the electron completely localized on site $1$ and $2$,
respectively, with eigenenergies
\begin{equation}
W^{(\ell 1,2)}(u_-)=\frac{K}{2}(u_- \pm u_{\rm P}/\sqrt{2})^2
- \frac14 E_{\rm P}
\;,
\label{eq:w-i-ii}
\end{equation}
where the upper (lower) sign refers to $W^{(\ell 1)}$
($W^{(\ell 2)}$), as shown by the two parabolic potential curves
in Fig.~\ref{fig:double_well}(a).
Assuming $C>0$, $\mid\Psi^{(\ell 1)}\rangle$ is the groundstate
for $u_-<0$ and $\mid\Psi^{(\ell 2)}\rangle$ for $u_->0$. At $u_-=0$,
the two parabolic eigenenergy curves $W^{(\ell 1)}(u_-)$
and $W^{(\ell 2)}(u_-)$ intersect, both states
are degenerate and the groundstate wavefunction changes
discontinuously as a function of $u_-$.
When the hybridization $t$ is turned on,
the two fully localized wavefunctions $\mid\Psi^{(\ell 1)}\rangle$
and $\mid\Psi^{(\ell 2)}\rangle$ become mixed,
the electronic degeneracy at $u_-=0$ is lifted
and a minimum excitation gap of $2t$ opens up between the
electronic ground- and 1st excited states. The sharp cusp
at $u_-=0$ in the $t=0$ double-parabolic potential function
\begin{eqnarray}
W_{0-}(u_-)\mid_{t=0}&&=\min(W^{(\ell 1)}(u_-), W^{(\ell 2)}(u_-))
\nonumber\\
&&= \frac{K}{2}(\mid u_-\mid-u_{\rm P}/\sqrt{2})^2
- \frac14 E_{\rm P}
\label{eq:w_0t0}
\end{eqnarray}
[see Fig.~\ref{fig:double_well}(a)] is
rounded by the finite $t$; as a function of
$u_-$, the groundstate wavefunction $\mid\Psi(u_-)\rangle$
now changes continuously at $u_-=0$.
However, $\mid\Psi(u_-)\rangle$ still has predominantly
$\mid\Psi^{(\ell 1)}\rangle$-character near
$u_-=-u_{-}^{(0)}$ and predominantly
$\mid\Psi^{(\ell 2)}\rangle$-character near $u_-=u_{-}^{(0)}$.
With increasing $t$, the tunneling barrier height decreases,
initially by about $t$. The barrier vanishes when $t$
reaches $t^{({\rm crit})}=E_{\rm P}/2$
which is equivalent to the above condition
(\ref{eq:e_p-crit}), for $E_{\rm P}^{({\rm crit})}$.
If one examines the groundstate wavefunction $\mid\Psi(u_-)\rangle$
and its charge distribution
$\langle\Psi(u_-)\mid n_j\mid\Psi(u_-)\rangle$
for $E_{\rm P}>E_{\rm P}^{({\rm crit})}$,
near the two potential minima
$\pm u_{-}^{(0)}$, one thus finds the electron predominantly
localized at site $1$ when $u_-\cong-u_{-}^{(0)}$
and predominantly at site $2$ when $u_-\cong +u_{-}^{(0)}$, assuming
again $C>0$ here and in the following.
By contrast, at the potential minimum $u_-=0$
in the regime $E_{\rm P}<E_{\rm P}^{({\rm crit})}$,
the electron charge is delocalized evenly
between the two sites $1$ and $2$.
Thus, at the level of the 0th order adiabatic approximation
(i.e., neglecting the lattice kinetic energy),
the transition from the single-well potential
case $E_{\rm P}<E_{\rm P}^{({\rm crit})}$ to the double-well case
$E_{\rm P}>E_{\rm P}^{({\rm crit})}$
is essentially a transition from a delocalized non-degenerate
groundstate ($u_-=0$)
to a localized degenerate groundstate ($u_-=\pm u_{-}^{(0)}$).
In the former case, the electron's delocalization energy
dominates and it is energetically favorable for the electron
wavefunction to be spread out between the two sites; in the latter
case, the EP coupling dominates and favors localizing the electron
charge on only one of the two sites. The lattice spontaneously
distorts so as to set up an attractive EP ``potential well''
which binds and localizes the electron. The electronic binding
energy thus gained in turn stabilizes the local lattice distortion.
This self-localization mechanism is the essence of polaron formation.
Localizing the electron on either one
of the two sites is energetically equivalent due to the
reflection symmetry ($(1,2)\to(2,1)$) of the underlying
Hamiltonian. At the level of the 0th order adiabatic approximation,
this symmetry is broken in the 2-fold degenerate 0th
order groundstates $u_-=\pm u_{-}^{(0)}$.
The existence of two {\it degenerate} local minima
in $W_{0-}$ can thus be understood as a direct consequence of the
symmetry breaking which accompanies the self-localization transition.
In the 1st order adiabatic approximation, the lattice kinetic energy
restores this symmetry by inducing tunneling processes between the
two potential minima, thus giving rise to
a non-degenerate groundstate in which the two degenerate
0th order states are admixed with equal probability
weight.
From the above discussion it is clear that such
tunneling processes are accompanied by a
transfer of electron charge between the two sites.
These {\it lattice} tunneling processes, occurring within
the EP-induced multiple-well Born-Oppenheimer potential,
constitute the basic low-energy mechanism whereby
self-localized electrons can move through the lattice.
At higher temperatures, thermally activated
over-the-barrier hopping will dominate the
polaron transport
\cite{Holstein,EmHo}
which, again, can be described
as a purely lattice dynamical phenomenon.
Thus, within the framework of the 1st order
adiabatic approximation, polaron
formation and polaron dynamics is fundamentally
reduced to a problem of {\it non-linear lattice}
dynamics.
We now turn to the 1st order potential correction $W_{1-}(u_-)$
(\ref{eq:w_1-2s}) in the two-site problem, shown for several
values of $t/E_{\rm P}$ in Fig.~\ref{fig:double_well}(b).
Since $W_1(u)$, according to (\ref{eq:w_1}),
is controlled by the $u$-gradient
of the electron wavefunction
$\mid\Psi(u)\rangle$, we should expect it to
exhibit peaks wherever $\mid\Psi(u)\rangle$ varies most
rapidly with $u$. In the two-site problem, this occurs at
$u_-=0$ where $\mid\Psi(u)\rangle$ changes its character from being
predominantly localized on site $1$ to being localized
on site $2$, as discussed above. For large $\mid u_-\mid$,
$\mid\Psi(u_-)\rangle$ approaches a constant, either
$\mid\Psi^{(\ell 1)}\rangle$
or $\mid\Psi^{(\ell 2)}\rangle$,
hence $W_{1-}(u_-)\to 0$ for $\mid u_-\mid\to\infty$.
In the polaron regime $E_{\rm P}>E_{\rm P}^{({\rm crit})}$,
the primary effect of $W_1$
is to enhance the tunneling barrier separating the two
potential minima. In addition, $W_{1-}(u_-)$ will also tend to shift
the two polaronic potential minima further apart,
thus causing the tunneling barrier to become wider than
in the 0th order potential $W_0$. Both of these $W_1$-effects
tend to suppress the tunneling rate through the barrier.
Even though $W_1(0)$ may be small compared
to the 0th order barrier height $\Delta_{B0}$ (\ref{eq:delta_b0}),
its effect on the polaron tunneling rates can be
quantitatively of some importance,
since tunneling rates are typically
exponentially sensitive to changes
in the tunneling barrier.
In the delocalized regime $E_{\rm P}<E_{\rm P}^{({\rm crit})}$,
the primary effect of $W_1$ is to soften the harmonic restoring
force constant $K_-\equiv \partial_{u_-}^2 W_-(0)$ by an amount
\begin{eqnarray}
K_{1-} &&\equiv \frac{\partial^2}{\partial u_-^2} W_{1-}(u_-=0)
\nonumber \\
&&= - \frac18 \Big(\frac{\Omega}{t}\Big)^2
\Big(\frac{E_{\rm P}}{t}\Big)^2 K < 0
\;.
\label{eq:k_1-}
\end{eqnarray}
Thus, $W_1$ also lowers the critical $E_{\rm P}$ for the on-set
of polaron formation. However, in the large-$M$ limit
where the adiabatic approximation is valid, that is,
for $\Omega\ll t$ (see below), these corrections are smaller
than the 0th order results (\ref{eq:k_0-},~\ref{eq:e_p-crit})
by factors of order $(\Omega/t)^2\times(E_{\rm P}/t)^2$.
Provided $t$ and $E_{\rm P}$ are of comparable magnitude
and $\Omega \lesssim t$ (see below),
$W_1$ does not qualitatively
alter the basic structure of the lattice potential $W$
in either coupling-strength regime.
However, $W_1$ can become qualitatively important
in suppressing certain non-adiabatic
processes in lattice systems,
as will be discussed in the next section.
\section{VALIDITY AND LIMITATIONS OF THE ADIABATIC APPROXIMATION}
\label{sec:validity}
The basic criterion for the validity of the adiabatic
approximation is that the longest time scale of
the electronic motion should be short compared to the
shortest time scale of the lattice motion or, equivalently,
the lowest electronic frequency scale should be large
compared to the highest lattice frequency scale.
In the two-site problem, the lowest electronic
frequency scale is the excitation energy between the
electronic groundstate $\mid\Psi(u)\rangle$ and the 1st excited
state which is at least $2t$ (at $u_-=0$) or larger.
The highest lattice frequency scale is the phonon
energy $\Omega$ and hence we expect the adiabatic
approximation to work, provided that
\begin{equation}
\Omega \ll 2t
\;.
\label{eq:valid}
\end{equation}
In the polaron regime $E_{\rm P}>E_{\rm P}^{({\rm crit})}$,
the lattice (not the electron !) motion acquires an additional,
low frequency scale, given by the polaronic tunneling splitting
$2t_P$ between the ground- and 1st excited states in
the double-well lattice potential $W(u)$.
This tunneling energy scale
is typically smaller than or, at most, comparable to
the bare phonon energy scale $\Omega$,
given the conditions where a polaronic
double-well forms in the first place.
Hence, the basic criterion (\ref{eq:valid})
applies in the polaronic regime just as well
as in the delocalized regime, regardless of the
electron-phonon coupling strength. Criterion (\ref{eq:valid})
applies even in the strong-coupling regime
(\ref{eq:strong_ep}) where $2t_P$ becomes orders of
magnitude smaller than $\Omega$.
Although the foregoing point has been established
for some 30 years now \cite{Holstein,EmHo,capone}, we wish
to strongly re-emphasize it here because
a great deal of confusion about this
has been created in the more recent literature
on the two-site problem, as for example in
Ref.~\onlinecite{RaTh}.
The basic error in some of the more recent
work is to regard the polaron tunneling splitting
$2t_P$, rather than $2t$, as the lowest
relevant electronic energy scale. Doing so, one then arrives
at the much too restrictive validity criterion
\begin{equation}
\Omega \ll 2t_P
\;.
\label{eq:wrong}
\end{equation}
If correct, this would imply that the polaron regime
$E_{\rm P}>E_{\rm P}^{({\rm crit})}$ can not be treated
in the adiabatic approximation, since typically
$t_P\lesssim \Omega$ even under the most favorable
conditions. In the strong-coupling regime
(\ref{eq:strong_ep}) where $t_P \ll \Omega$
the adiabatic approximation should break down
completely according to (\ref{eq:wrong}).
The fundamental misconception here is that $2t_P$
is of course {\it not} the lowest {\it electronic}
energy scale, but rather represents an energy scale
of the {\it lattice} motion,
as discussed above. The relevant lowest electronic
energy splitting, between the electronic
ground- and 1st excited states
{\it at fixed lattice coordinate} $u$
is at least $2t$ in the two-site model,
regardless of whether $E_{\rm P}$ is small or large.
To illustrate this point, we have generated exact
numerical solutions of the two-site problem
using the full Hamiltonian $H$ without any approximation,
and compared to solutions of the 1st order
effective adiabatic effective Hamiltonian
$H_{\rm Ad}\equiv H_{\rm M}+W$, corresponding
to the effective action $S_{\rm Ad}$ from (\ref{eq:s_eff}).
For both the exact and the adiabatic problems, we have used
a sufficiently fine discretization of the $u_-$ coordinate
and a sufficiently large cut-off at large $u_-$ to ensure
a numerical accuracy of better than $1\%$ in the calculated
energy splittings over the entire parameter range
studied. In Fig.~\ref{fig:tunnel2site}, we show the logarithm
of the polaron tunneling splitting $2t_P$,
that is, the excitation energy from the ground- to the
1st excited states of the full electron-phonon system,
as a function of $E_{\rm P}/\Omega$ for $t\equiv 1$ and four
different EP couplings, $E_{\rm P}=2.5$, $3$, $4$ and $8$
which are well inside the polaronic regime
($E_{\rm P}>E_{\rm P}^{({\rm crit})}$).
In addition to the exact solution,
we show two different adiabatic solutions in
Fig.~\ref{fig:tunnel2site}, one obtained
with the full adiabatic lattice potential $W\equiv W_0+W_1$,
the other using only the 0th order potential, $W\cong W_0$.
These are being referred to in the following
as the ``full'' and as the ``simple''
adiabatic solutions, respectively. As expected from
the Holstein-Lang-Firsov strong-coupling
expansion \cite{Holstein,capone,LaFi}
and from semi-classical (WKB) arguments,
the tunneling splitting at fixed $E_{\rm P}$ and
$t$ decreases exponentially with $1/\Omega$,
as indicated by a roughly linear dependence of
$\ln (2t_P)$ on $1/\Omega$ in Fig.~\ref{fig:tunnel2site}.
Remarkably, the full adiabatic result agrees with the
exact solution to better than $14\%$ over a parameter region
$0.15t<\Omega<0.5t$ wherein $2t_P$ varies by more than 9
orders of magnitude, including the regime where $2t_P$
is orders of magnitude smaller than $\Omega$. The simple
($W\cong W_0$) adiabatic solution reproduces the qualitative features
of the $1/\Omega$- and $E_{\rm P}$-dependence of $2t_P$
quite well, but the quantitative agreement is noticeably
worse than for the full adiabatic solution.
The agreement between the full adiabatic
and the exact results is all the
more convincing in light of the fact that the tunneling
splitting is ``exponentially sensitive'' to small
errors or changes in the wavefunction inside the tunneling barrier.
Thus, our comparison of the tunneling splittings
constitutes a much more stringent test of the underlying
approximations than a comparison of, say,
low-lying-state expectation values or wavefunction
amplitudes. Other exact numerical results for the two-site problem,
such as reported e.g. in Ref.~\onlinecite{RaTh}, are generally
in equally good agreement with the corresponding adiabatic
solution, provided, that is, one exercises enough care
to use the proper adiabatic wavefunctions
$\mid\phi\rangle$, Eq.~(\ref{eq:phi_ad}), in carrying out
the comparison.
As expected from (\ref{eq:valid}), the agreement between adiabatic
and exact results deteriorates at high phonon frequencies
when $\Omega$ becomes comparable to $t$. As a practical
matter, even for $\Omega\cong 2t$, the agreement is still quite
acceptable. For applications to lattice systems,
it is of interest to explore in some detail how
the adiabatic approximation actually breaks down as one enters
into the ``anti-adiabatic'' regime
\begin{equation}
t\ll\Omega
\;.
\label{eq:anti_ad}
\end{equation}
As a limiting case, we consider the ionic
limit $t\to 0$, already discussed above. Here, the
Holstein-Hubbard problem can be trivially solved exactly
\cite{SZF}. Obviously there cannot
be any electron tunneling between the two
sites and the polaron tunneling splitting $2t_P$
vanishes.
By contrast, in the simple adiabatic approximation
$W\cong W_0$, $W_{0-}$ approaches the double-parabolic
potential (\ref{eq:w_0t0})
for $t\to0$, which has a tunneling
barrier of finite height and width.
The simple adiabatic approximation would thus predict
a non-vanishing finite tunneling splitting $2t_P>0$
even for $t=0$, a clearly unphysical result.
If instead one uses the full adiabatic approximation,
with $W=W_0+W_1$, the correct qualitative
physical behavior of $2t_P$ is restored by the $W_1$-term,
shown in Fig.~\ref{fig:double_well}(b):
According to Eq.(\ref{eq:w_1-2s})
the $W_1$-peak height (at fixed $E_{\rm P}$ and $\Omega$)
diverges as $t^{-2}$, while at the same time
its peak width vanishes, but only
linearly in $t$ in the limit $t\to0$. It is then easy to show
that the transmission amplitude through the
$W_1$-barrier vanishes, that is, the barrier becomes
impenetrable in the limit $t\to0$ which forces
$2t_P\to0$ for $t\to0$.
Thus, as far as the tunneling
splitting $2t_P$ is concerned, the full adiabatic approximation
reproduces qualitatively the correct physical behavior
even in the extreme anti-adiabatic regime.
The actual failure of the full adiabatic approximation in the
$t\to 0$-limit is a more subtle problem.
It consists of the unphysical constraint being imposed
on the dynamics of the $u_-$-coordinate by the
impenetrability of the $W_1$-barrier. For $t\to0$, the
$W_1$-barrier forces the lattice wavefunction $\phi(u)$ in
(\ref{eq:phi_ad})
to vanish identically either to the right ($u_->0$)
or to the left ($u_-<0$) of the barrier. Thus,
the amplitude for propagation from an initial $u_-<0$
to a final $u_->0$ (or reverse) vanishes in
the full adiabatic approximation at $t=0$.
In the exact solution of the $t=0$ problem,
this constraint does not exist; the lattice is free to propagate
with some finite amplitude from $u_-<0$ to $u_->0$.
In the exact $t=0$ solution, the lattice dynamics is
governed either by the left or
the right parabolic well,
$W^{(\ell 1)}$ or $W^{(\ell 2)}$, corresponding respectively to the
left-localized or to the right-localized electron states,
$\mid \Psi^{(\ell 1)}\rangle$ or $\mid \Psi^{(\ell 2)}\rangle$,
discussed in Sec.~\ref{sec:two-site}.
The problem with the adiabatic approximation is that
the $t=0$ electron groundstate $\mid \Psi(u)\rangle$
exhibits a level crossing and thus changes discontinuously
at $u_-=0$, as discussed above. The adiabatic approximation,
by construction, excludes transitions between, say, the
electronic ground- and 1st excited eigenstates.
But this is just what happens at $u_-=0$ in the $t\to0$-limit:
If the lattice coordinate crosses $u_-=0$ from the left, say,
under exact time evolution, the electron remains in its localized
state $\mid\Psi^{(\ell 1)}\rangle$, which is the groundstate only
for $u_-<0$, but becomes the 1st excited state when $u_->0$.
The adiabatic approximation on the other hand
forces the electron to remain in the groundstate
which changes discontinuously at $u_-=0$, from
$\mid\Psi^{(\ell 1)}\rangle$ to $\mid\Psi^{(\ell 2)}\rangle$.
In the two-site problem, the foregoing impenetrability
constraint causes only a small error, of order
$\exp(-E_{\rm P}/\Omega)$,
in the low-lying lattice eigenstates and energies
if the lattice oscillator zero-point amplitude
is small compared
to the double-well separation $\sqrt{2}u_{\rm P}$,
that is, if $\Omega\ll E_{\rm P}$.
However, the impenetrability constraint
may introduce a qualitative failure
of the adiabatic approximation
if applied to large systems $N\to\infty$
and tunneling processes which
transfer a polaron in a single step
over large distances, as we will now discuss.
Let us consider for simplicity the
case of the Holstein model for
just a single electron in a large lattice
with sufficiently strong $E_{\rm P}$ to form a polaron.
Suppose the polaron is
localized at some site $\xi$, say, and we want to study the
tunneling barrier for transferring the polaron
in a single tunneling step to a distant
site $\zeta=\xi+r$, i.e., with $|r|\gg a$ where $a$
is the lattice constant.
Let $u^{(\xi)}\equiv(u^{(\xi)}_1\dots u^{(\xi)}_N)$
denote that lattice configuration which
minimizes $W_0(u)$ and localizes
the polaron around the ``centroid site'' $\xi\in\{1\dots N\}$.
That is, $|u^{(\xi)}_\ell|$ and the
corresponding electron charge density
$\langle n_\ell\rangle^{(\xi)}$ are
maximal at $\ell=\xi$ and die out exponentially
at large distances $|\ell-\xi|$ from the centroid.
Likewise, let $u^{(\zeta)}$ denote the lattice configuration
which localizes the polaron around site $\zeta$. By
lattice translational invariance
\begin{equation}
u^{(\zeta)}_\ell= u^{(\xi)}_{\ell-r}
\;.
\label{eq:latt_trans}
\end{equation}
if $\zeta=\xi+r$. Notice that polaron formation
breaks the translational symmetry of the lattice
in the $0th$ order adiabatic approximation.
As a consequence, $W_0$ exhibits $N$ degenerate
local minima, corresponding to the $N$ different,
but translationally equivalent
$u^{(\xi)}$ configurations on an $N$-site lattice
with periodic boundary conditions.
This is the lattice analogue to the breaking of
reflection symmetry in the two-site problem.
Let $u^{(\zeta\xi)}(s)$ denote the linear path segment
in the $N$-dimensional $u$-space connecting
$u^{(\xi)}$ to $u^{(\zeta)}$, i.e.,
\begin{equation}
u^{(\zeta\xi)}(s) = \Big({1\over2}-s\Big) u^{(\xi)}
+\Big({1\over2}+s\Big) u^{(\zeta)}
\label{eq:tun_path}
\end{equation}
with $s\in [-{1\over2},+{1\over2}]$.
In the following discussion, we consider
(\ref{eq:tun_path}) as a representative of
low-action tunneling trajectories
connecting $u^{(\xi)}$ to $u^{(\zeta)}$.
The $s$-coordinate can thus be regarded as
the lattice analogue to the $u_-$ tunneling coordinate
(\ref{eq:u_pm}) in the two-site problem.
Note in particular that $W_0(u^{(\zeta\xi)}(s))$
has local minima at $s=-{1\over2}$ and $s=+{1\over2}$
which must, by continuity, be separated
by (at least) one intervening maximum, i.e., by
a tunneling barrier. The simplest scenario, normally
borne out in the numerical calculations
discussed below, is that there is only one
barrier maximum, by symmetry located at $s=0$.
Thus, along $u^{(\zeta\xi)}(s)$, $W_0$ has qualitatively
the same structure as $W_{0-}(u_-)$ described above
for the two-site problem.
The first crucial point to note here
is that the width of this tunneling barrier,
that is, the Euclidean distance from $u^{(\xi)}$ to $u^{(\zeta)}$
in their $N$-dimensional $u$-space,
\begin{eqnarray}
d(\zeta,\xi) &&\equiv |u^{(\zeta)}-u^{(\xi)}|
\nonumber\\
&&\le |u^{(\zeta)}|+|u^{(\xi)}| =
2|u^{(\xi)}|\equiv d_\infty
\label{eq:udist}
\end{eqnarray}
is finite and bounded by an upper limit
$d_\infty$ which is independent of the
spatial distance $|\zeta-\xi|=|r|$. Note that
$d_\infty$ is independent of $\xi$ or $\zeta$
due to lattice translational invariance.
Thus two polaron configurations $u^{(\xi)}$ and
$u^{(\zeta)}$ are never further apart from each other
than $d_{\infty}$ in $u$-space, regardless of
how far apart their centroid sites $\xi$ and $\zeta$
are in real space.
The second important point is that
the height of the 0th order ($W_0$)
tunneling barrier along $u^{(\zeta\xi)}(s)$
is also bounded independently of lattice
distances $|\zeta-\xi|$.
To see this, note that the EP potential
$C u_\ell$ acting on the electron
is attractive, i.e., $Cu_\ell<0$,
for any $u$-configuration along the path
$u^{(\zeta\xi)}(s)$ between $s=0$ and $s=1$.
Hence, the contribution to $W_0(u)$ from $H_{\rm e}+H_{\rm e-ph}(u)$
is bounded from above by the electron
groundstate energy of the undistorted lattice.
Also, by an argument analogous to (\ref{eq:udist}),
the elastic energy contribution $H_{\rm K}(u)$
is bounded from above by ${3\over2}H_{\rm K}(u^{(\xi)})$.
Both of these upper bounds are independent of $|\zeta-\xi|$.
The foregoing considerations suggest that a
manifold of tunneling trajectories exists, sufficiently close to
$u^{(\zeta\xi)}(s)$, which will all connect $u^{(\xi)}$ to
$u^{(\zeta)}$ through a $W_0$-barrier whose height
and width is bounded by upper limits
independent of $|\zeta-\xi|$.
Within the simple adiabatic approximation, $W=W_0$,
one thus arrives at the unphysical result
that the polaron can tunnel in a single
(``instanton'') tunneling step from any site $\xi$ to any
site $\zeta$ in the lattice with a tunneling
matrix element $t_P(\zeta-\xi)$ which does {\it not}
go to zero for $|\zeta-\xi|\to\infty$, but rather
\begin{equation}
\lim_{|\zeta-\xi|\to\infty} |t_P(\zeta-\xi)| \equiv t_{P\infty} > 0
\;.
\label{eq:t_p-infty}
\end{equation}
The foregoing argument can be made formally more rigorous,
by employing instanton methods similar to those
described in the next section for short-distance
tunneling processes.
We will not engage in that exercise here. Suffice it to
say that the simple adiabatic result (\ref{eq:t_p-infty})
for the lattice is analogous to the above described
two-site result in the $t=0$ limit:
the simple adiabatic approximation allows tunneling
solely on the basis of the $W_0$
electronic groundstate {\it energy} barrier,
regardless of whether there is actually any
electronic {\it wavefunction overlap} between the initial
and final $u$-configurations of the tunneling process.
To account for wavefunction overlap effects in long-distance
tunneling processes, the $W_1$-term (\ref{eq:w_1})
has to be included in the total potential $W=W_0+W_1$.
Let us consider the evolution of the
electronic groundstate wavefunction
$\mid \Psi(u) \rangle$
along the linear tunneling trajectory $u^{(\zeta\xi)}(s)$
(\ref{eq:tun_path})
between two centroid sites $\zeta$ and $\xi$
with $|\zeta-\xi|\gg \ell_P(u)$. Here $\ell_P(u)$ denotes
the exponential localization length of $\mid \Psi(u)\rangle$
for local lattice distortions comparable to $u^{(\xi)}$.
As a simplest scenario, let us assume
that the wavefunction $\mid\Psi(u)\rangle$ remains
localized for all $u$ along $u^{(\zeta\xi)}(s)$.
This situation will be realized at
EP coupling strengths $E_{\rm P}$ which are sufficiently
large compared to $E_{\rm P}^{({\rm crit})}$. The electronic
groundstate problem can then be qualitatively described
as follows:
The EP potential $Cu^{(\zeta\xi)}_\ell(s)$,
acting on the electron at sites $\ell$,
consists of two localized wells,
$C({1\over2}-s)u^{(\xi)}_\ell$
and
$C({1\over2}+s)u^{(\zeta)}_\ell$, the former centered around
site $\ell=\xi$,
the latter around $\ell=\zeta$.
As $s$ is varied from $-{1\over2}$ to $+{1\over2}$,
the EP well at $\xi$ becomes shallower and the EP well
at $\zeta$ deepens. At $s=0$, the two wells become degenerate.
Assuming large real-space tunneling distances
$|\zeta-\xi|$, the electron
wavefunction overlap between these two wells
is exponentially small.
Hence, the electron groundstate wavefunction
$\mid\Psi(u^{(\zeta\xi)}(s))\rangle$ will remain
localized at site $\xi$ for most $s<0$ until $s$ gets
very close to $s=0$.
Within a very small interval around $s=0$,
$\mid\Psi(u^{(\zeta\xi)}(s))\rangle$
will then switch over from being localized
around $\xi$ to being localized around $\zeta$. In that narrow
$s$-region around $s=0$,
the electron wavefunction consists of the superposition of two almost
non-overlapping localized parts, one centered around $\xi$, the
other around $\zeta$. Since $\mid \Psi(u)\rangle$ changes very rapidly
as a function of $u$ near $u^{(\zeta\xi)}(0)$, $W_1(u)$
will exhibit a sharp peak along $u^{(\zeta\xi)}(s)$
which increases the tunneling barrier at $s=0$ and hence
suppresses the tunneling amplitude.
Formally, this problem can be treated by a tight-binding ansatz
for the electron groundstate wavefunction:
$\mid\Psi(u^{(\zeta\xi)}(s))\rangle$
near $s=0$ is approximated by a superposition of
$\mid \Psi(u^{(\xi)}/2) \rangle$ and
$\mid \Psi(u^{(\zeta)}/2) \rangle$,
i.e., by the single-well groundstates of the two EP wells
${1\over2}Cu^{(\xi)}_\ell$ and
${1\over2}Cu^{(\zeta)}_\ell$, discussed above.
As $s$ is varied near $s=0$,
the response of $\mid \Psi(u^{(\zeta\xi)}(s))\rangle$ to
the changing EP well depths is then governed
by the effective {\it electronic} hybridization overlap
\begin{eqnarray}
t_{\rm eff}(\zeta-\xi) &&=
\langle \Psi(u^{(\xi)}/2)\mid H_{\rm e}
\mid \Psi(u^{(\zeta)}/2) \rangle
\nonumber \\
&&\sim t \exp \Big( -2 { |\zeta-\xi| \over \ell_{P,{1\over2}} }\Big)
\label{eq:t_eff-mj}
\end{eqnarray}
where $\ell_{P,{1\over 2}}\equiv \ell_P(u^{(\xi)}/2)$
is the localization length of
$\mid \Psi(u^{(\xi)}/2) \rangle$.
Within the tight-binding ansatz, the problem
then becomes analogous to the two-site problem
in the $t\to 0$ limit, with the tight binding-basis states
$\mid \Psi(u^{(\xi)}/2) \rangle$
and $\mid \Psi(u^{(\zeta)}/2) \rangle$
replacing the two-site basis states
$\mid\Psi^{(\ell 1)}\rangle$ and $\mid\Psi^{(\ell 2)}\rangle$,
respectively. $W_1(u^{(\zeta\xi)}(s))$ exhibits a sharply peaked
barrier at $s=0$, analogous to the $t\to0$ limit of
the two-site problem.
The $W_1$-barrier will be
roughly of the form given by Eq.~(\ref{eq:w_1-2s}),
with $u_-$ replaced by $u_-(s)\equiv d(\zeta,\xi)\times s $
and with $t$ replaced by $t_{\rm eff}(\zeta-\xi)$.
Thus, along with $t_{\rm eff}(\zeta-\xi)$,
the transmission amplitude through the $W_1$-barrier
and the effective polaron tunneling matrix element $t_P(\zeta-\xi)$
will decrease exponentially with the tunneling
distance $|\zeta-\xi|$, analogous to
the $t\to 0$-limit in the two-site problem.
The long-distance
polaron tunneling processes are in the anti-adiabatic
regime, since the relevant effective electronic
hybridization overlap matrix elements $t_{\rm eff}(\zeta-\xi)$
become small compared to the phonon energy $\Omega$
at large tunneling distances $|\zeta-\xi|$ on
large lattice sizes $N$. The $W_1$ potential
ensures, at least qualitatively,
that the effective polaron tunneling matrix
elements $t_P(\zeta-\xi)$ are properly suppressed to zero
at large tunneling distances. Hence, the full
adiabatic approximation $W=W_0+W_1$ restores
the correct long-distance behavior, as far as the
polaron tunneling matrix element is concerned.
However, just as in the anti-adiabatic limit of the
two-site problem, the $W_1$-term also imposes
an unphysical constraint on the lattice coordinates.
In the present case, involving long-distance
tunneling on a lattice, this constraint acts to couple the
lattice displacement coordinates at arbitrarily
large distances $|\zeta-\xi|$, thereby introducing
unphysical infinite-range interactions between the
lattice coordinates.
Thus, in long-distance tunneling processes, the preconditions
for the adiabatic approximation break down. However,
from the foregoing discussion it is clear that the
effective action for the corresponding paths increases exponentially
and that the corresponding tunneling matrix element dies out
exponentially with the tunneling distance. The simplest
way of dealing with such long-distance
tunneling processes is therefore to altogether neglect the
corresponding tunneling paths in the path integral.
This is what we will do in the following analysis.
As far as the polaron tunneling dynamics is concerned,
the short-distance processes will be dominant.
The relevant effective electronic matrix elements $t_{\rm eff}$
for short-distance processes are of order of the 1st neighbor $t$
which is normally larger than or at least comparable to the phonon
energy scale in typical solid state situations.
We {\it can} therefore use the adiabatic approximation
to accurately estimate the effective action for short-distance
tunneling paths. And it is only in this limited sense that
the adiabatic approximation {\it will} be used in the following.
\section{INSTANTONS AND EFFECTIVE HAMILTONIAN}
\label{sec:instanton}
The problem of polaron formation in the 2D Holstein-$ tJ $
and Holstein-Hubbard models has already been studied
extensively.\cite{YBL,Zhong,Roeder,Holstein}
In the nearly ${1\over2}$-filled band regime, the
dopant induced hole carriers in the AF spin background
can form polarons with much less EP coupling
strength than is required for a single electron in an empty band.
Thus $E_{\rm P}^{({\rm crit})}$ for forming a single hole polaron
in the ${1\over2}$-filled system is reduced by a factor of about
$4-5$, compared to a single electron polaron formation
in the empty band system. This reduction in $E_{\rm P}^{({\rm crit})}$
has been explained in terms of the hole mass enhancement
and self-localization effect
in the AF spin background of the nearly ${1\over2}$-filled
Hubbard system \cite{Zhong}.
The basic idea here is that the coupling to the AF spin background
already provides some form of self-localization of the
hole relative to a self-induced local distortion of
the AF spin correlations \cite{Zhong,ChSc}.
This spin polaron effect is manifested in the strongly
reduced hole quasiparticle bandwidth, from $8t$ in the
non-interacting system to $\sim 2J$ in Hubbard or $ tJ $ systems
near half-filling. In the presence of EP
coupling, this electronic bandwidth reduction
permits the hole quasi-particle to become self-trapped by
a much weaker EP potential well; hence the reduction
in $E_{\rm P}^{({\rm crit})}$.
The fact that the polaron formation threshold
$E_{\rm P}^{({\rm crit})}>0$ remains non-zero
even in the strongly correlated systems
is dictated by the so-called
small-polaron dichotomy \cite{emin_holstein_scaling},
as discussed further in Sec.~\ref{sec:p_liquid}.
For a multi-hole system containing
\begin{equation}
P \equiv N - \sum_j n_j
\label{eq:p_hole}
\end{equation}
doping-induced holes on an $N$-site lattice, there are
(\raise0.9ex\hbox{$N$}\kern-0.9em\lower1.0ex\hbox{$P$})
possible configurations for accommodating the $P$ localized holes
on the $N$ available sites.
The lattice potential $ W(u) $ is therefore expected
to have up to
(\raise0.9ex\hbox{$N$}\kern-0.9em\lower1.0ex\hbox{$P$})
nearly degenerate local minima, denoted by $u^{\xi}$
in the following,
corresponding to the
(\raise0.9ex\hbox{$N$}\kern-0.9em\lower1.0ex\hbox{$P$})
different centroid configurations
$\xi \equiv ( \xi_1,\dots,\xi_P )$ \cite{Zhong}.
Here, $\xi_i\equiv(\xi_{i,x}\xi_{i,y})$
denotes lattice (centroid) site occupied by the $i$-th hole.
As noted above, each of these local-minimum configurations
breaks the translational symmetry of the lattice
at the level of the 0th order adiabatic approximation.
The symmetry is restored
in the 1st order adiabatic approximation
by polaron tunneling processes
between the different $u^{\xi}$.
At EP coupling strengths $E_{\rm P}$ larger than, but sufficiently
close to $E_{\rm P}^{({\rm crit})}$, it is possible
that some of the
(\raise0.9ex\hbox{$N$}\kern-0.9em\lower1.0ex\hbox{$P$})
centroid configurations $\xi$ do not have corresponding
stable local-minimum configurations $u^{\xi}$ in $W(u)$.
This may happen, for example, in a two-hole system ($P=2$),
if one tries to accommodate the
two polarons at 1st neighbor sites,
$\xi_1$ and $\xi_2$, in the presence of a
1st neighbor Coulomb repulsion $V_{\rm C}$.
At sufficiently strong $V_{\rm C}$, the corresponding local minimum
$u^\xi\!\equiv\!u^{(\xi_1,\xi_2)}$
becomes locally unstable, which is signaled by the
smallest eigenvalue of the restoring force matrix
$\partial^2 W/\partial u^2|_{u^{\xi}}$ becoming negative.
In the following, we will not consider such situations, but rather
restrict ourselves to parameter regions where all
the local minimum configurations
$u^{\xi}$ are stable.
To establish the basic structure of the effective polaron
tunneling dynamics, we treat the path integral
for the effective action $S_{\rm Ad}$ (\ref{eq:s_eff})
or its equivalent Hamiltonian $H_{\rm Ad}$
by a lattice dynamical many-body tight-binding approach.
The basic idea behind this approach is that an effective
polaron tunneling Hamiltonian $H_{\rm P}$ can be defined
which operates in a ``low-energy'' sub-space of nearly orthogonal
tight-binding basis states $\mid \phi_\xi \rangle$,
labeled by the localized polaron centroid configurations $\xi$.
Each such basis state represents a
lattice wavefunction $\phi_\xi(u)$ which
is assumed to be localized in $u$-space around the corresponding
local potential minimum configuration $u^\xi$.
For example, $\phi_\xi(u)$
could be chosen as the vibrational (``phonon'') groundstate obtained
in a harmonic approximation by expanding $W(u)$ to quadratic
order around $u^\xi$.
By restricting the lattice Hilbert space to such a set of basis states
$\phi_\xi$, all vibrational excited states
around the polaronic local minima
are neglected. Thus, formally, our approach can be regarded as a
tight-binding approximation, formulated for the quantum
dynamics of the multiple-well lattice potential $W(u)$ in the
$N$-dimensional lattice configuration ($u$-) space.
In the simplest tight-binding approach one would then simply
estimate the matrix elements of $H_{\rm P}$
by projecting the adiabatic lattice
Hamiltonian $H_{\rm Ad}$ onto the corresponding tight-binding
low-energy sub-space
spanned by all $\phi_\xi$. In such a 1st order projection
approach, one neglects all
effects arising from virtual excitations
out of the low-energy sub-space.
Since tunneling matrix elements are exponentially sensitive
to small corrections in, for example, the tunneling
barriers, such a tight-binding projection
could cause severe quantitative errors in the estimation
of the magnitude of tunneling matrix elements.
Also, as a practical matter, the accurate evaluation of
Hamiltonian matrix elements
with basis functions defined on the $N$-dimensional
$u$-space can become quite difficult.
Lastly, in the 1st order projection
approach, it is difficult to include the tunneling
Berry phases into $H_{\rm P}$.
To avoid the foregoing difficulties, we have adopted
an approach which is based on a direct mapping
of imaginary-time tunneling paths,
rather than a mapping of Hamiltonian matrix elements.
Formally, this is accomplished by the path integral instanton
method \cite{Rajaraman,Coleman,Yonemitsu}. In this method,
an ``instantaneous'' polaron hopping process $\xi\to\zeta$ induced
by $H_{\rm P}$ between two polaron centroid configurations
$\xi\equiv(\xi_1\dots\xi_P)$ and $\zeta\equiv(\zeta_1\dots\zeta_P)$
is identified with the (restricted) path
sum of instanton tunneling paths connecting
$u^\xi$ to $u^\zeta$ in $u$-space.
The effective action $S_{\rm P}$
of the instantaneous hopping paths, so obtained,
can then be immediately translated into matrix elements
of the effective tunneling Hamiltonian $H_{\rm P}$.
Since only tunneling paths, but no basis states
enter into the mapping, the results do not depend
on any particular choice of tight-binding basis
states $\phi_\xi$.
As a specific starting point, we consider the trace of
the resolvent operator at complex energies $E$
\begin{equation}
{\rm Tr} \ (E-H)^{-1} = -\int_0^\infty d \beta \
e^{\beta E} \ {\rm Tr} \ e^{-\beta H}
\;,\label{eq:resolvent}
\end{equation}
written in the imaginary-time domain in path-integral form,
\begin{equation}
{\rm Tr} \ e^{-\beta H} = \int_{u(\beta) = u(0)} {\cal D}u(\tau) \
e^{- S_{\rm Ad} [u(\tau)]}
\;.
\label{eq:trace_eb}
\end{equation}
The trace operation in Eq.~(\ref{eq:resolvent}) leads to periodic
boundary conditions on the imaginary time interval
$[0,\beta]$ in Eq.~(\ref{eq:trace_eb}).
These periodic boundary conditions
in Eq.~(\ref{eq:trace_eb}) impose not only the closed path constraint
$u(\tau)=u(0)$, but also the condition that the initial
and final electron wavefunctions must be the same, including
their phase factors. That is, for the electron wavefunctions
$\mid\Psi(u(\tau))\rangle$ entering into $S_{\rm Ad}$ via
Eq.~(\ref{eq:overlap}), the constraint
$\langle \Psi(u(\beta)) \mid \Psi(u(0))\rangle=+1$ must be imposed
for all paths $u(\tau)$ integrated over in Eq.~(\ref{eq:trace_eb}).
The latter requirement ensures that the Berry phase contribution
to $S_{\rm Ad}$ in Eq.~(\ref{eq:trace_eb}) is uniquely defined
for every closed path $u(\tau)$, independent of the choice of phase
for each individual electronic wavefunction
$\mid \Psi(u(\tau))\rangle$ along such a path.
Quantized eigenenergies can be found from
(\ref{eq:resolvent}) by searching
for the poles of the trace of the resolvent operator on the
real $E$-axis.
The main contributions to the low-energy part of
(\ref{eq:resolvent}) arise from instanton path
configurations, i.e., $u$-paths which are almost
always close to one of the centroid configurations, occasionally
transfer from one to another centroid configuration by an
almost instantaneous polaron
hopping process, and finally return to the initial
$u$-configuration at imaginary time $ \beta $,
in order to satisfy the closed path constraint.
Important closed-path tunneling processes for polaron states with
$P$=1 and 2 dopant-induced holes
are shown in Fig.~\ref{fig:closed_path}.
Each black circle represents an occupied polaron centroid
site in the initial configuration $\xi$ of the hopping process.
Arrows indicate the hopping processes transferring the
initial configuration $\xi$ into the final configuration $\zeta$.
Thus, in $u$-space each arrow corresponds to a set of instanton-type
tunneling paths which connects the two respective minimum-$W$ endpoint
configurations $u^\xi$ and $u^\zeta$
and traverse the $W$-barrier
separating the two minima.\cite{Zhong}
Note that, as discussed above,
via such tunneling paths, a hole {\em polaron \/} can
tunnel in a single step between 2nd-, 3rd-, etc. neighbor
sites even if the original {\em electron \/} Hamiltonian
(\ref{eq:model}) contains only a 1st-neighbor $t$.\cite{Zhong}
First, we consider the case of $P$=1. For the time being,
we take into account
only the 2nd and 3rd neighbor
processes denoted by amplitudes $ t_1^{(2)} $ and $ t_1^{(3)} $ in
Fig.~\ref{fig:hopping}(a).
Single-polaron {\it inter\/}-sublattice processes are strongly
suppressed by the AF spin correlations.\cite{Trugman} Hence,
the 1st-neighbor amplitude $ t_1^{(1)} $ can be much smaller than
or, at most, comparable to $ t_1^{(2)} $ and $ t_1^{(3)} $
(to within 20-30\%) in the case of $P$=1.
Then, instanton path configurations are classified according to
the numbers of intra-sublattice processes:
$ n_x $ counts the number of $ t_1^{(3)} $ processes to the right,
$ m_x $ the $ t_1^{(3)} $ processes to the left,
$ n_y $ the $ t_1^{(3)} $ processes to the upper,
$ m_y $ the $ t_1^{(3)} $ processes to the lower,
$ n_u $ the $ t_1^{(2)} $ processes to the upper-right,
$ m_u $ the $ t_1^{(2)} $ processes to the lower-left,
$ n_v $ the $ t_1^{(2)} $ processes to the lower-right, and
$ m_v $ the $ t_1^{(2)} $ processes to the upper-left neighbors.
Path integration over the corresponding instanton paths
gives\cite{Rajaraman,Coleman,Yonemitsu}
\begin{eqnarray}
&&{\rm Tr} \ e^{-\beta H}= \nonumber\\
&&e^{-\beta W(u^{({\rm min},1)})} \sum_{n_x,\dots,m_v}
\frac1{n_x! m_x! n_y! m_y! n_u! m_u! n_v! m_v!} \nonumber \\
&\times & \int \frac{d p_x}{2\pi}
e^{i p_x(2n_x-2m_x+n_u-m_u+n_v-m_v)} \nonumber \\
&\times & \int \frac{d p_y}{2\pi}
e^{i p_y(2n_y-2m_y+n_u-m_u-n_v+m_v)} \nonumber \\
&\times & (e^{-\delta R_{\rm 1}^{(2)} -i \theta_1^{(2)}}
J_1^{(2)}K_1^{(2)}\beta)^{ n_u + m_u + n_v + m_v } \nonumber \\
&\times & (e^{-\delta R_{\rm 1}^{(3)} -i \theta_1^{(3)}}
J_1^{(3)}K_1^{(3)}\beta)^{ n_x + m_x + n_y + m_y } \nonumber \\
&=& \int \frac{d p_x}{2\pi} \frac{d p_y}{2\pi} \exp \left[
-\beta \left\{ W(u^{({\rm min},1)}) +2 t_1^{(2)} [ \cos(p_x + p_y)
\right. \right. \nonumber \\
&&
+ \cos(p_x-p_y) ] \left. \left. +2 t_1^{(3)}
[ \cos(2p_x) + \cos(2p_y) ] \right\} \right]
\;.\label{eq:one_polaron}
\end{eqnarray}
The effective hopping matrix elements
$ t_P^{(\nu)} $ are obtained as
\begin{equation}
t_P^{(\nu)} = - J_P^{(\nu)} K_P^{(\nu)}
e^{-\delta R_P^{(\nu)} -i \theta_P^{(\nu)}}
\;.\label{eq:effective_hopping}
\end{equation}
$ W(u^{({\rm min},1)}) $ is the absolute minimum lattice potential
energy obtained at a minimum-$W$ configuration
$ u^{({\rm min},1)}\equiv u^{(\xi_1)} $
for $P=1$.
Factorial factors such as $ n_x! $, etc. are
introduced to account for identical
species of instantons. The $ p_x $ and $ p_y $-integrals are
introduced to enforce the imaginary-time periodic boundary
condition.
The quantity $ \delta R_P^{(\nu)} $ is the
single-instanton contribution to the real part of the action
for the path segment of the corresponding
tunneling process $t_P^{(\nu)}$
and $\theta_P^{(\nu)}$
is the corresponding Berry phase contribution.
The assignment of a unique Berry phase factor
$e^{-i \theta_P^{(\nu)}}$ to each such open path segment
requires more detailed symmetry considerations and will
be postponed until Sec.~\ref{sec:symmetry}.
The quantity $ K_P^{(\nu)} $ in (\ref{eq:effective_hopping})
represents the $-\frac12$-th power of the fluctuation determinant
for the instanton solution with the zero mode excluded
divided by that for the static solution at $ u^{({\rm min},1)} $,
and $ J_P^{(\nu)} $ is the Jacobian needed for a special
treatment of the corresponding zero mode.
They are defined as in Eqs.~(10.13) and (10.14) of
Ref.~\onlinecite{Rajaraman}
for the periodic potential problem.
Substituting the result of the path integral (\ref{eq:one_polaron})
into the formula (\ref{eq:resolvent}), we obtain the dispersion
relation shown in the parenthesis of $\exp \left[ -\beta
\{ \dots \} \right]$ in Eq.~(\ref{eq:one_polaron}).
Note that the effective hopping matrix element is
defined such that it is positive if the corresponding
path segment carries a nontrivial $(-1)$ Berry
phase factor: the sign convention of our polaron tunneling
matrix elements $t_P^{(\nu)}$ is opposite to that used in the
original electron Hamiltonian (\ref{eq:model}).
Next, we consider the case of $P=2$.
Since self-localization reduces substantially the polaron kinetic
energy scale, it is favorable for two polarons in an AF
spin background to be bound in a pair:
the binding energy can easily be of the order of the effective
polaron nearest-neighbor attraction, i.e., comparable to
the AF spin exchange coupling $ J $\cite{phase_sep} in the
Holstein-$ tJ $ model.
As a first approximation, we therefore restrict the
path integration to include only
1st neighbor pair configurations $u^{(\xi_1,\xi_2)}$
and the instanton tunneling paths connecting them.
Our numerical studies described below suggest
that these 1st neighbor configurations represent the
{\it absolute} minimum of $W(u)$ for $P=2$.
Other, more distant pair configurations with $|\xi_1-\xi_2|>1$
are either represented by local $W$-minima $u^{(\xi_1\xi_2)}$
of higher energy or they don't form local minima
in $W(u)$ at all.
We are thus limiting ourselves, for now, to the tunneling
processes $ t_2^{(2)} $ and $ t_2^{(3)} $ between the
degenerate, absolute-minimum $u$-configurations
as shown in Fig.~\ref{fig:hopping}(b).
The technique used above for $P=1$ can be generalized in a
straightforward manner to the present case $P=2$. Here, in addition
to the lattice translational degeneracy of the minimum-$W$
$u$-configurations, the $P=2$ system exhibits 2-fold
internal degeneracy, corresponding to the two possible
orientations of the 1st neighbor polaron pair, along either
the $x$- or along the $y$-axis. Because of this 2-fold
internal degree of freedom, the instanton exponential
function in the path integral takes the form of the trace
over a $2\times2$ matrix exponential, namely
\begin{eqnarray}
&& {\rm Tr} \ e^{-\beta H} = \nonumber \\
&& \int \frac{d p_x}{2\pi} \frac{d p_y}{2\pi} \ {\rm Tr} \ \exp \left[
-\beta W(u^{({\rm min},2)})
\left( \begin{array}{cc} 1 & 0 \\ 0 & 1 \end{array} \right)
\right. \nonumber \\
&& \left.
-\beta \left( \begin{array}{cc}
2 t_2^{(3)} \cos p_x &
4 t_2^{(2)} \cos \frac{p_x}{2} \cos \frac{p_y}{2} \\
4 t_2^{(2)} \cos \frac{p_x}{2} \cos \frac{p_y}{2} &
2 t_2^{(3)} \cos p_y \end{array} \right) \right]
\;,\label{eq:two_polaron}
\end{eqnarray}
where $ W(u^{({\rm min},2)}) $ denotes
the absolute minimum lattice potential energy for $P=2$,
obtained at $ u^{({\rm min},2)} \equiv u^{(\xi_1\xi_2)} $ with
$\xi_1$ and $\xi_2$ denoting 1st neighbor centroid sites.
The tunneling matrix elements
$ t_P^{(\nu)} $ are expressed analogous
to Eq.~(\ref{eq:effective_hopping}) in terms of the
action contributions, fluctuation determinants, and Jacobians
of the respective instanton path segments.
The 2 low-lying eigenenergies of the polaron pair
at total momentum $p\equiv(p_x,p_y)$
are obtained by diagonalizing the $2\times 2$
matrix in $\exp \left[ -\beta \{ \dots \} \right]$
of Eq.~(\ref{eq:two_polaron}).
The generalization of the foregoing path integral approach
to $P>2$ hole polaron states is in principle straightforward,
but becomes practically difficult to implement
with increasing polaron number $P$.
Analogous to (\ref{eq:two_polaron}),
the approach leads to a momentum integral over the trace
of a matrix exponential where the matrix dimension reflects
the number of (nearly) degenerate, translationally inequivalent
polaron centroid configurations $(\xi_1\dots\xi_P)$ included
in the tunneling analysis. In the following, we restrict ourselves
to the cases $P=$0, 1, and 2 which will allow us to extract the
effective 1-polaron tunneling and 2-polaron interaction matrix
elements.
The low-lying tunneling eigenenergies identified by
the foregoing instanton path integral method
(and their corresponding eigenstates)
can be equivalently represented in terms of
an effective polaron tunneling Hamiltonian $H_{\rm P}$
where each polaron is represented as a spin-$1\over2$
fermion. $H_{\rm P}$ is thus
defined to operate in an effective spin-${1\over2}$ fermion
Hilbert space with the effective fermions occupying sites
$\xi_i$ on the 2D square lattice of the original EP Hamiltonian.
A $P$-polaron centroid configuration $(\xi_1\dots\xi_P)$
is mapped onto the corresponding state of $P$
site-localized fermions with minimum possible total spin,
i.e., with $S_{tot}={1\over2}$ (0) for odd (even) $P$.
The latter mapping condition reflects the fact
that the absolute electron
groundstates $\mid\Psi(u)\rangle$,
numerically calculated on finite clusters,
exhibit minimum total spin quantum number.
Notice however that by representing the polaron as an effective
spin-$1\over2$ fermion, we are actually including
low-energy spin excitations into the effective Hamiltonian
description. In order to derive the effective polaron spin-spin
interactions, our adiabatic path integral
treatment can be straightforwardly generalized
to include restricted electron groundstates in
Hilbert space sectors of higher total spin quantum numbers
$S_{tot}\geq 1$. In this manner, the spin-$1\over2$ fermion
representation can be extended well beyond the scope of our original
adiabatic approximation which retains only the (minimum-spin)
absolute electron groundstate $\mid\Psi(u)\rangle$.
In the following analysis, we limit ourselves to the absolute
groundstate only. Hence we are only studying the
total spin-singlet pair state in the $P=2$ case.
Using our numerical Berry phase results, we will show
in Sec.~\ref{sec:symmetry}
that each single polaron in such a singlet pair behaves
indeed as a spin-$1\over2$ fermion.
In generalizing the above 1st neighbor approach,
it is also straightforward to include inter-sublattice hopping
processes:
the dimension of the matrix increases, the ${\bf k}$-independent
term is no longer proportional to the unit matrix, and $ t_P^{(1)} $
(more precisely, $ t_1^{(1)} $, $ t_2^{(1a)} $, and $ t_2^{(1b)} $)
are defined as above.
Then, the effective Hamiltonian describing the polaron
tunneling dynamics and effective polaron-polaron
interactions can be written in the form,
\begin{eqnarray}
H_{\rm P} =&& \sum_{i \neq j, \sigma}
( t_{ij} + \sum_k \Delta t_{ijk} n_{d k})
\nonumber \\
&&\;\;\;\;\times(1-n_{d j,-\sigma})
d_{j \sigma}^\dagger d_{i \sigma}
(1-n_{d i,-\sigma})
\nonumber \\
&& -\sum_{\langle i,j \rangle}
V_{\rm P} n_{d i} n_{d j}
\;.\label{eq:effective_Hamiltonian}
\end{eqnarray}
Thus, $d_{j\sigma}^\dagger $ creates a spin-$1\over2$-fermion polaron
with spin $ \sigma $ at site $ j $,
$ n_{d j} = \sum_\sigma d_{j\sigma}^\dagger d_{j\sigma} = 0, 1 $
and $ P = \sum_j n_{d j} = 1, 2 $.
The hopping term is to include, appropriately, the amplitudes
$ ( t_{ij} + \sum_k \Delta t_{ijk} n_{dk}) \equiv t_1^{(1)} $,
$ t_1^{(2)} $, $ t_1^{(3)} $, $ t_2^{(1a)} $, $ t_2^{(1b)} $,
$ t_2^{(2)} $, or $ t_2^{(3)} $ [with appropriate sign
according to the corresponding Berry phase factor]
for $ i \rightarrow j $ tunneling processes
shown in Fig.~\ref{fig:hopping}. Note here that the sign convention
for the polaron tunneling amplitudes $t_P^{(\nu)}$ in
(\ref{eq:effective_Hamiltonian}) is opposite to that used in
the underlying electron Hamiltonians (\ref{eq:h_tj},\ref{eq:h_hub}).
The 1st neighbor attraction $ V_{\rm P} $ in
(\ref{eq:effective_Hamiltonian}) is estimated as
\begin{equation}
V_{\rm P} =
2 W(u^{({\rm min},1)}) - W(u^{({\rm min},2)}) - W(u^{({\rm min},0)})
\;, \label{eq:effective_attraction}
\end{equation}
where $ W(u^{({\rm min},P)}) $ is the (absolute)
minimum potential energy $ W(u) $
for the $ u $-configurations
$u^{({\rm min},P)}\equiv u^{(\xi_1\dots\xi_P)}$
which minimize $W(u)$
for $ P $ holes. For $ P $=2, our numerical calculations
suggest that the absolute minimum-$ W $ $ u $-configuration
does indeed correspond to the 1st neighbor pair.
For purposes of estimating $V_{\rm P}$ numerically on
small model clusters, we have minimized $W_0$ instead of $W=W_0+W_1$,
thus neglecting the effect of $W_1$ on $u^{({\rm min},P)}$.
In the physical parameter regime of interest, $\Omega\ll t, E_{\rm P}$,
these $W_1$-corrections to $u^{({\rm min},P)}$ are indeed small,
of order $(\Omega/t)^2$. The full potential $W=W_0+W_1$ was used
to calculate $ W(u^{({\rm min},P)}) $.
To obtain order of magnitude estimates for $ t_P^{(\nu)} $,
we have used both the dilute-instanton-gas
approach\cite{Rajaraman,Coleman,Yonemitsu}, as explained above,
and a constrained lattice dynamics approach\cite{Zhong} which is more
straightforward and adopted in Sec.~\ref{sec:effective}.
The two approaches have given similar results.
In the latter approach, the lattice Schr\"odinger equation
corresponding to $ S_{\rm Ad} $ is solved exactly for $ u $
constrained to the linear tunneling path $u^{(\zeta\xi)}(s)$
which is defined analogous to Eq.~(\ref{eq:tun_path})
and connects the two
energetically degenerate, minimum-$ W $ polaron end-point
$ u $-configurations $u^{\xi}$ and $u^{\zeta}$ of the respective hop.
The hopping matrix element $ |t_P^{(\nu)}| $ is then estimated as
one half of the ground-to-1st-excited state energy splitting.
\section{SYMMETRY OPERATIONS AND BERRY PHASES}
\label{sec:symmetry}
Before going into numerical estimations of effective model
parameters, we need to settle the quasiparticle statistics and
the signs of effective polaron hopping processes by calculating
Berry phase factors.
To calculate $ \exp( -i \theta [u(\tau)] ) $ for tunneling paths
$ u(\tau) $ shown in Fig.~\ref{fig:closed_path},
we discretize $ \tau $ with at least 5 $ \tau $-points
per linear path segment and obtain $\mid \Psi(u(\tau)) \rangle$ of the
Holstein-$ tJ $ model by the Lanczos exact diagonalization
method on an $ N $=4$ \times $4 lattice with
periodic boundary conditions.
The electron Hilbert space is restricted to the sector of
minimum total spin ($S$=0, $1/2$, 0 for $P$=0, 1, 2, respectively),
which comprises the absolute ground state
$\mid \Psi(u) \rangle$ for $ u $-configurations
near the local $ W $-minima.
The results for all paths in Fig.~\ref{fig:closed_path} are
summarized by
\begin{equation}
\theta [u(\tau)] = \pi \left(
m^{(2)} + m^{(3)} + m_2^{(1)} \right)
\;, \label{eq:closed_path}
\end{equation}
where $ m^{(\nu)} $ is the number of $ \nu $-th neighbor hops
with $ \nu $=2, 3, and $ m_2^{(1)} $ for $ P $=2 denotes
the number of 1st-neighbor hops indicated by the dashed bonds shown
in Fig.~\ref{fig:parity}(a) by the first polaron in close
proximity to the second, static polaron, indicated as a black circle.
The effect of the $ m_2^{(1)} $-term can be illustrated, for example,
by comparing the Berry phase factors $e^{-i\theta}$ of
the triangular paths (a)(A) and (b)(B) shown in
Fig.~\ref{fig:closed_path}.
In both paths, a single polaron
is taken around the triangle in 3 steps, consisting of two
1st neighbor and one 2nd neighbor transfer. For the one-polaron case,
(a)(A), the phase factor is $(-1)$, for the two-polaron case (b)(B)
it is $(+1)$. Thus, the close proximity of the second,
static polaron in (b)(B) has altered the Berry phase of the first
polaron tunneling around a closed path.
The origin of the $ m_2^{(1)} $-term can be traced back to the
internal symmetry of $\mid \Psi(u) \rangle$:
For the local minimum-$ W $ $ u $-configurations of
2nd- and 3rd-neighbor polaron pairs, $\mid \Psi(u) \rangle$
is odd under reflection along the dashed lines shown
in Fig.~\ref{fig:parity}(b), i.e., along the
pair axis for the 2nd-neighbor pair and perpendicular to
the pair axis for the 3rd-neighbor pair.
Suppose, for example, that the first polaron hops from (1,0) to
(1,1) in a first step and from (1,1) to (0,1) in a second step with
the second polaron staying fixed at (0,0). These are the first
two steps of path (b)(B) in Fig.~\ref{fig:closed_path}.
Note that the two steps generate the same final centroid configuration
as a reflection along the dashed (0,0)--(1,1) line, shown in
Fig.~\ref{fig:parity}(b).
Because of this odd ``internal'' parity of $\mid \Psi(u) \rangle$
for the intermediate (2nd neighbor polaron pair) configuration,
one of the two 1st neighbor hops must
contribute an additional factor $(-1)$. Assigning this
$(-1)$ phase factor to one of the two 1st neighbor steps
in path (b)(B) of Fig.~\ref{fig:closed_path}
is to some extent arbitrary. The pattern
of dashed-line and full-line bonds surrounding the static
polaron in Fig.~\ref{fig:parity}(a) represents one possible
assignment which is consistent with all the closed-path Berry phase
results in Fig.~\ref{fig:closed_path}(b).
As a consequence of its odd internal parity,
the 2nd neighbor polaron pair configuration is actually allowed to
contribute with finite amplitude to polaron pair wave functions of
$ d_{x^2-y^2} $ symmetry, in spite of the fact that the 2nd neighbor
pair axis points along
the nodal axis of the $d_{x^2-y^2}$ pair wavefunction.
The $ m^{(2)} $- and $ m^{(3)} $-terms can be regarded as due to
strong antiferromagnetic correlation.
Suppose a polaron is initially located at (0,0) and hops to (2,0),
(1,1), and then back to (0,0) along the path (a)(B) in
Fig.~\ref{fig:closed_path}. The electron initially located
at (2,0) hops to (0,0) and then to (1,1), while the electron
initially located at (1,1) hops to (2,0). Thus, if one approximates
the AF spin background by a N\'{e}el state, two electrons of like spin
are exchanged. This produces a fermionic $(-1)$-factor.
More generally, when a closed path consists of an odd number of 2nd-
or 3rd-neighbor hopping processes, an even number of electrons
within a sublattice are cyclically permuted, producing the $(-1)$
factor within the N\'{e}el approximation to the AF spin background.
In order for this to occur, the AF spin correlation has to be strong,
but it need not be long-ranged.
For $P$=1, the Berry phase rule can be completely explained
in this way.
For both $P$=1 and 2, $ \theta [u(\tau)] $ is given by a sum of
independent single-polaron hopping contributions and
$ \exp( -i \theta [u(\tau)] ) $ does {\em not \/} depend on
whether or not the two polarons are being
adiabatically exchanged in a given path
[Fig.~\ref{fig:closed_path}(b)].\cite{Arovas}
Thus, for example, paths (b)(B) and (b)(C) in
Fig.~\ref{fig:closed_path} contain the same
1st and 2nd neighbor hops and they have the same Berry phase.
The two paths differ only in that
(b)(C) exchanges the two polarons, whereas (b)(B) does not.
Since the pair is a total spin singlet, this implies that
each single polaron in the pair behaves effectively as a
spin-$1/2$ fermion or as a spin-0 boson.
Only the spin-$1/2$ fermion representation is consistent with
the half-odd-integer total spin in odd-$ P $ systems and, as discussed
in the previous sections, it is the one we have adopted.
Equation~(\ref{eq:closed_path}) rules out the possibility of
representing dopant-induced hole polarons as spin-0 fermions
or as spin-$1/2$ bosons.
To settle the signs of effective polaron hopping processes, we need
to define Berry phase factors for the corresponding single-hop
open path segments.
Let the initial $ u $-configuration of such a single-hop
path segment be denoted by $ u^{(\xi)} $ and
the final $ u $-configuration by $ u^{(\zeta)}$.
The assignment of a Berry phase to such a path
segment can be made unique by fixing the
phase of the corresponding wavefunction
$\mid\Psi( u^{(\zeta)})\rangle$
relative to that of $\mid \Psi( u^{(\xi)})\rangle$
in some unique manner.
Given $u^{(\xi)}$ and $\mid \Psi(u^{(\xi)})\rangle$,
let $\mid\Psi^{({\rm ref})}(u^{(\zeta)})\rangle$ denote such a
final-state reference wavefunction.
Also, let $\mid\Psi^{({\rm ad})}(u^{(\zeta)})\rangle$
denote that groundstate wavefunction $\mid \Psi(u^{(\zeta)})\rangle$
which one obtains by adiabatically
evolving $\mid\Psi(u)\rangle$ along the tunneling path segment,
without discontinuity in phase, beginning with
$\mid \Psi(u^{(\xi)})\rangle$.
The Berry phase of the path segment
is then defined as the phase difference between
$\mid\Psi^{({\rm ad})}(u^{(\zeta)})\rangle$
and $\mid\Psi^{({\rm ref})}(u^{(\zeta)})\rangle$,
that is, as the phase of the wavefunction overlap
$
\langle\Psi^{({\rm ref})}(u^{(\zeta)})
\mid
\Psi^{({\rm ad})}(u^{(\zeta)})\rangle
$.
If, for example, $ u^{(\xi)} $ and $ u^{(\zeta)} $ are
related by a lattice symmetry operation,
we can choose $\mid\Psi^{({\rm ref})}(u^{(\zeta)})\rangle$
as the groundstate wavefunction
$\mid \Psi(u^{(\zeta)})\rangle$
generated by applying to $\mid \Psi(u^{(\xi)})\rangle$ the symmetry
operation which transforms $u^{(\xi)}$ into $u^{(\zeta)}$.
If $u^{(\xi)}$ and $u^{(\zeta)}$
are related by several different symmetry operations
giving different $\mid\Psi^{({\rm ref})}(u^{(\zeta)})\rangle$,
we need to specify the reference $\mid \Psi(u^{(\zeta)}) \rangle$,
i.e., which symmetry operation is chosen to generate the reference
$\mid \Psi(u^{(\zeta)}) \rangle$ from $\mid \Psi(u^{(\xi)}) \rangle$.
There does not always exist such a symmetry operation to relate
$\mid \Psi(u^{(\zeta)}) \rangle$ and $\mid \Psi(u^{(\xi)}) \rangle$,
e.g., the 2nd- or 3rd-neighbor pair and the 1st-neighbor one.
Then, we can arbitrarily choose the phase of
$\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$.
The Berry phase factor for the corresponding path is also arbitrary.
Figure~\ref{fig:parity}(a) is an example.
If we chose the different phase (i.e., the negative) of
$\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$ for the
2nd (3rd)-neighbor pair, the signs of
all the $t_2^{(1a)}$ ($t_2^{(1b)}$) processes would be reversed.
For $ P $=1, we first fix, arbitrarily, the phase of
$\mid \Psi(u^{(\xi)}) \rangle$ for
the centroid configuration $\xi$=((0,0)).
All the $\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$ are then
uniquely defined by either translation or rotation operations.
The Berry phase factors for $ P $=1 are summarized
in Fig.~\ref{fig:open_path}(a).
Here, the translation $(x,y) \rightarrow (x+a,y+b)$
is denoted by $T(a,b)$, and the rotation
$(x,y) \rightarrow (x\cos\phi-y\sin\phi,x\sin\phi+y\cos\phi)$
is denoted by $R(\phi)$.
The left-hand side shows $\mid \Psi^{({\rm ad})}(u^{(\zeta)}) \rangle$.
The right-hand side shows the possible choices of
$\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$, generated
from the same $\mid \Psi(u^{(\xi)}) \rangle$ by
the appropriate lattice symmetry operations.
Note that, for 2nd and 3rd neighbor hops, both
translation and rotation operations generate the same
$\mid \Psi^{({\rm ref})}(u^{(\zeta)})\rangle$.
The first three lines of Fig.~\ref{fig:open_path}(a)
give
$ \exp [ -i \theta_1^{(1)} ] $=$+1$,
$ \exp [ -i \theta_1^{(2)} ] $=$-1$,
$ \exp [ -i \theta_1^{(3)} ] $=$-1$,
and thus
\begin{equation}
t_1^{(1)} < 0 \;, \ \ t_1^{(2)} > 0 \;, \ \ t_1^{(3)} > 0 \;.
\end{equation}
Obviously, these results are consistent with
the relation (\ref{eq:closed_path}).
For $ P $=2, we first fix the phase of $\mid \Psi(u^{(\xi)}) \rangle$
for the centroid configuration
$\xi\equiv( \xi_1,\xi_2 )$=((0,0),(1,0)).
Rotating this $\mid \Psi(u) \rangle$ by angle $\pi/2$, we define the
$\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$
for $\zeta\equiv(\zeta_1,\zeta_2 )$=((0,0),(0,1)).
The $\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$
for the other 1st neighbor pair configurations $\zeta$
are defined by applying translation operations
to either $\mid\Psi(u^{(\xi)})\rangle$ or to its rotated version
$R(\pi/2)\mid\Psi(u^{(\xi)})\rangle$.
The resulting Berry phase factors for $ P $=2 are summarized
in Fig.~\ref{fig:open_path}(b).
The first two lines imply that
$ \exp [ -i \theta_2^{(2)} ] $=$-1$,
$ \exp [ -i \theta_2^{(3)} ] $=$-1$,
and thus
\begin{equation}
t_2^{(2)} > 0 \;, \ \ t_2^{(3)} > 0 \;.
\end{equation}
The last two lines in Fig.~\ref{fig:open_path}(b) imply,
by comparison to the first two lines, that available
rotations would generate the same
$\mid \Psi^{({\rm ref})}(u^{(\zeta)}) \rangle$
as the translations.
The 1st-neighbor hops $ t_2^{(1a)} $ and $ t_2^{(1b)} $ are
positive or negative for the processes indicated by the dashed
and full bonds, respectively, of Fig.~\ref{fig:parity}(a),
as discussed above.
\section{BERRY PHASES AND QUANTUM NUMBERS}
\label{sec:phase}
Using the effective Hamiltonian
(\ref{eq:effective_Hamiltonian}) with parameters
$ t_1^{(1)} $, $ t_1^{(2)} $, $ t_1^{(3)} $,
$ t_2^{(1a)} $, $ t_2^{(1b)} $, $ t_2^{(2)} $, $ t_2^{(3)} $
(with signs determined above), and $ V_{\rm P} $,
we can now calculate the low-energy eigenstates
for the $P=1$ and $P=2$ polaron systems.
In the case $ P $=1, the dispersion relation from $H_{\rm P}$
is given by
\begin{eqnarray}
\epsilon_1({\bf p}) & = &
2 t_1^{(1)} [ \cos p_x + \cos p_y ]
+2 t_1^{(2)} [ \cos(p_x + p_y) \nonumber \\
&+& \cos(p_x - p_y) ]
+2 t_1^{(3)} [ \cos(2p_x) + \cos(2p_y) ]
\;.\label{eq:one_polaron_dispersion}
\end{eqnarray}
As mentioned in Sec.~\ref{sec:instanton}, on finite lattices,
$t_1^{(1)}$ is smaller than the 2nd and 3rd neighbor $t$'s
for $P=1$. As the cluster size increases, the overlap between
the two minimum-$W$ wavefunctions connected by the
$ t_1^{(1)} $ process, $\mid \Psi(u^{(\xi)}) \rangle$ and
$\mid \Psi(u^{(\zeta)}) \rangle$ for a 1st neighbor bond
$(\xi,\zeta)$, becomes small. Then, the potential energy
$W(u)$ would develop a higher barrier for the 1st neighbor hop,
due to $W_1(u)$, so that $t_1^{(1)}$ would become further
smaller.
Allowing for arbitrary values of $ t_1^{(2)} $ and
$ t_1^{(3)} $ but $t_1^{(1)}=0$, the one-polaron band-minimum is
located at momentum ${\bf p} = (\pi/2,\pi/2)$
for $ \mid t_1^{(2)} \mid < 2 t_1^{(3)} $,
at $(\pi,0)$
for $ \mid t_1^{(2)} \mid > 2 t_1^{(3)} $ and $ t_1^{(2)}>0 $,
and at $(0,0)$ and $(\pi,\pi)$
for $ \mid t_1^{(2)} \mid > 2 t_1^{(3)} $ and $ t_1^{(2)}<0 $,
as shown in Fig.~\ref{fig:phase}(a).
For the physically relevant signs implied by the Berry phase
factors,
$ t_1^{(2)}, t_1^{(3)} > 0 $, the momentum
of the one-polaron band-minimum is thus at $(\pi/2,\pi/2)$ or
$(\pi,0)$ which lies on the Fermi surface of the noninteracting
nearest-neighbor tight-binding band model at half filling.
The one-polaron bandwidth is given by
\begin{equation}
B_1 = \left\{ \begin{array}{lll}
4 t_1^{(2)} + 8 t_1^{(3)} & {\rm for} &
0< t_1^{(2)}<2 t_1^{(3)} \;,\nonumber \\
8 t_1^{(2)} & {\rm for} &
0<2 t_1^{(3)}< t_1^{(2)} \;.
\end{array} \right.
\end{equation}
For the cluster geometries studied here
($N = \sqrt{8} \times \sqrt{8}$,
$N = \sqrt{10} \times \sqrt{10}$ and $N = 4 \times 4$) and with
only nearest-neighbor terms ($t$, $J$) included in the
original Hamiltonian (\ref{eq:model}),
certain ``accidental'' symmetries exist which cause
$ t_P^{(2)} = t_P^{(3)} $. As a consequence,
the band-minimum is at $(\pi/2,\pi/2)$, and the eigenvalues
of the inverse effective mass tensor at this point are
\begin{equation}
(m_1^{-1})_r = 8 t_1^{(3)} + 4 t_1^{(2)} = 12 t_1^{(2,3)}
\;,
\end{equation}
\begin{equation}
(m_1^{-1})_\phi = 8 t_1^{(3)} - 4 t_1^{(2)} = 4 t_1^{(2,3)}
\;,
\end{equation}
where the subscript $r$ is for the (1,1) direction and
$\phi$ is for the (1,$-1$) direction.
If we include finite and negative $ t_1^{(1)} $ (representing, e.g.,
the $ N \rightarrow \infty $ limit at finite, fixed hole density,
rather than at fixed hole number $P=1$), then
the band-minimum is shifted by $ t_1^{(1)} $ from $(\pi/2,\pi/2)$
to some point $(p,p)$ with $p<\pi/2$ which would
fall on the Fermi surface
of the noninteracting band model at corresponding filling.
For $ P $=2, we first consider the tightly-bound pair limit,
$ V_{\rm P} \gg \mid t_2^{(\nu)} \mid $, where we can approximate
the polaron pair ground state by including only nearest-neighbor
pair configurations, thus retaining only the $ t_2^{(2)} $ and
$ t_2^{(3)} $ matrix elements of $H_{\rm P}$.
The pair dispersion relations are then given by
\begin{eqnarray}
\epsilon_2^\pm({\bf p}) && = - V_{\rm P}
+ t_2^{(3)} ( \cos p_x + \cos p_y ) \nonumber \\
\pm && \left[
t_2^{(3)2} ( \cos p_x \! - \! \cos p_y )^2
+ ( 4 t_2^{(2)} \cos \frac{ p_x }2 \cos \frac{ p_y }2 )^2
\right]^{1/2}
\;. \nonumber \\
\label{eq:two_polaron_dispersion}
\end{eqnarray}
Allowing for arbitrary values of $ t_2^{(2)} $ and $ t_2^{(3)} $,
the pair wave function in the nearest-neighbor pair approximation
for $ \mid t_2^{(2)} \mid > t_2^{(3)} $ has $ d_{x^2-y^2} $-wave
symmetry if $ t_2^{(2)} > 0 $, and $ s $-wave symmetry if
$ t_2^{(2)} < 0 $, and, in either case, total momentum
${\bf p}=(0,0)$, as shown in Fig.~\ref{fig:phase}(b), at the
band-minimum.
For $ \mid t_2^{(2)} \mid < t_2^{(3)} $, the pair ground states
are multiply degenerate:
the horizontal pair (with pair axis parallel to the $x$ axis)
with total momentum ${\bf p}=(\pi,p_y)$
for arbitrary $\mid p_y \mid \leq \pi$, and
the vertical pair (with pair axis parallel to the $y$ axis)
with total momentum ${\bf p}=(p_x,\pi)$
for arbitrary $\mid p_x \mid \leq \pi$ all have the same energy.
The two-polaron bandwidth is given by
\begin{equation}
B_2 \equiv
\max_{\bf p}\epsilon^-_2({\bf p}) -
\min_{\bf p}\epsilon^-_2({\bf p})
= 4 \mid \mid t_2^{(2)} \mid - t_2^{(3)} \mid
\;.
\label{eq:b2}
\end{equation}
If we take account of 1st-, 2nd-, and 3rd-neighbor pair
configurations, including also the 1st-neighbor hopping terms,
$ t_2^{(1a)} $ and $ t_2^{(1b)} $, in second-order
perturbation theory, the initially degenerate energy
along ${\bf p}=(\pi,p_y)$,
\begin{equation}
\epsilon^-_2(\pi,p_y) = - V_{\rm P} -2 t_2^{(3)}
\;,
\end{equation}
is lowered by
\begin{equation}
\delta \epsilon^-_2(\pi,p_y) =
- \frac{4( 2 t_2^{(1a)2} \cos^2\frac{p_y}2 + t_2^{(1b)2}
)}{V_{\rm P} +2 t_2^{(3)}} \;.
\label{eq:degeneracy_1}
\end{equation}
It is reasonable for the $ t_2^{(1a)} $-term to favor
$ p_y = 0 $ because the process of
$(\xi_1,\xi_2)$=$((0,0),(1,0)) \rightarrow ((0,0),(1,1))$
and the process of $((0,0),(1,1)) \rightarrow ((0,1),(1,1))$,
for example, are in phase, as shown in Fig.~\ref{fig:parity}(a).
The ground states are still doubly degenerate:
the horizontal pair with ${\bf p}=(\pi,0)$ and
the vertical pair with ${\bf p}=(0,\pi)$, which
would correspond to $ p_x $- and $ p_y $-wave though
they are total-spin-singlets.\cite{YZS1}
For $ t_2^{(2)}, t_2^{(3)} > 0 $ implied by the Berry phase
factors, we thus get either $ d_{x^2-y^2} $- or
$ p_{x(y)} $-pairing symmetry with total momentum
${\bf p}=(0,0)$ or ${\bf p}=(\pi,0)[(0,\pi)]$, respectively.
The accidental symmetries for our finite cluster geometries
(in the absence of longer-range
terms in the original Hamiltonian (\ref{eq:model}) studied here)
lead to $ t_2^{(2)} = t_2^{(3)} $, which is exactly
on the $ d $-$ p $ phase boundary
where $ B_2 $ vanishes due to a frustration effect.\cite{Trugman}
So, the energy $\epsilon^-_2({\bf p})$ is independent of ${\bf p}$.
If we take account of 1st-, 2nd-, and 3rd-neighbor pair
configurations again, in second-order perturbation theory, the
initially degenerate energy on the $ d $-$ p $ phase boundary,
\begin{equation}
\epsilon^-_2({\bf p}) \mid_{ t_2^{(2)} = t_2^{(3)} } =
-V_{\rm P} -2 t_2^{(2,3)}
\;,
\end{equation}
is lowered by
\begin{equation}
\delta \epsilon^-_2({\bf p}) \mid_{ t_2^{(2)} = t_2^{(3)} } =
- \frac{f({\bf p})}{V_{\rm P} +2 t_2^{(2,3)}}
\label{eq:degeneracy_2}
\end{equation}
with
\begin{eqnarray}
f({\bf p}) &&= 4 t_2^{(1a)2} (2+\cos p_x+\cos p_y) \nonumber\\
&&+ 4 t_2^{(1b)2} \frac{1-\cos p_x \cos p_y}{2+\cos p_x+\cos p_y}
\;. \label{eq:degeneracy_2a}
\end{eqnarray}
Then the ground state has $ d_{x^2-y^2} $ symmetry with
${\bf p}=(0,0)$ for $\sqrt{2} \mid t_2^{(1a)} \mid >
\mid t_2^{(1b)} \mid$ and $ p_{x(y)} $-wave with
${\bf p}=(\pi,0)$ [$(0,\pi)$] otherwise.
For the $ N $=4$ \times $4 Holstein-$ tJ $ cluster with
periodic boundary conditions, an accidental symmetry
leads to $ \mid t_2^{(1a)} \mid = \mid t_2^{(1b)} \mid $
and thus $ d_{x^2-y^2} $-pairing symmetry.
It is reasonable for the $ t_2^{(1a)} $-term to favor
the $ d_{x^2-y^2} $-wave state because the process of
$(\xi_1,\xi_2)=((0,0),(1,0)) \rightarrow ((0,0),(1,1))$
and the process of $((0,0),(1,1)) \rightarrow ((0,0),(0,1))$,
for example, have opposite signs.
Also, it is reasonable for the $ t_2^{(1b)} $-term to favor
the $ p_{x} $-wave state because the process of
$(\xi_1,\xi_2)=((0,0),(1,0)) \rightarrow ((0,0),(2,0))$
and the process of $((0,0),(2,0)) \rightarrow ((1,0),(2,0))$,
for example, have opposite signs,
as shown in Fig.~\ref{fig:parity}(a).
Once again we note that the 2nd-neighbor polaron pair
configuration contributes to the polaron pair wave function
of $ d_{x^2-y^2} $ symmetry.
\section{EFFECTIVE HOPPING AND ATTRACTION}
\label{sec:effective}
We have seen how total momenta and internal symmetries of
few-hole-polaron states are determined by the signs and
relative magnitudes of the effective
polaron tunneling matrix elements.
In this section, we show numerical estimates of them with
effective polaron nearest-neighbor attraction and effective
pair binding energy to see the energy scales of polaron dynamics.
The relative energy scale of kinetic energy to interaction
strength is controlled by the phonon frequency in
the original Hamiltonian (\ref{eq:model}).
It is noted again that we use a constrained lattice dynamics
approach and exactly solve the lattice Schr\"odinger equation
corresponding to the effective action (\ref{eq:s_eff})
for the lattice-displacement configurations constrained to
the linear tunneling path of the respective hop,
as described at the end of Sec.~\ref{sec:instanton}.
The effective lattice potentials are based on Lanczos calculations
on finite clusters with periodic boundary conditions.
The numerical results should be regarded as very rough
order-of-magnitude estimates only.
The nearest-neighbor attraction $ V_{\rm P} $ is calculated
according to the formula (\ref{eq:effective_attraction}).
The pair binding energy $ \Delta $ is estimated in the
nearest-neighbor pair approximation, according to
\begin{equation}
\Delta = 2 \epsilon_1({\bf p}_1^{\rm (min)}) -
\epsilon_2^-({\bf p}_2^{\rm (min)})
\;,
\label{eq:delta_1}
\end{equation}
where $ \epsilon_1({\bf p}) $ and $ \epsilon_2^-({\bf p}) $ are
defined in Eq.~(\ref{eq:one_polaron_dispersion}) (with
$ t_1^{(1)} $=0) and Eq.~(\ref{eq:two_polaron_dispersion}),
respectively, measured relative to the $P=0$ groundstate energy, and
$ {\bf p}_P^{\rm (min)} $ are the respective (thus different) momenta
at the band minima discussed above.
Note that the sign of $\Delta$ is so defined that
$\Delta>0$ signifies a net attraction, $\Delta<0$ repulsion.
Figure~\ref{fig:parameter}(a) shows the logarithm of the
dominant 2nd- and 3rd-neighbor hopping amplitudes
$ t_P^{(2)} $ and $ t_P^{(3)} $ for $ P $=1, 2
in the Holstein-$ tJ $ model
on an $ N = \sqrt{8} \times \sqrt{8} $ cluster.
As expected in a polaronic system,\cite{Holstein} all
$ t_P^{(\nu)} $ are suppressed, roughly exponentially,
with increasing $ E_{\rm P}/\Omega $ and strongly reduced
compared to the bare electronic $ t $.
However, for $ E_{\rm P} $ near $ E_{\rm P}^{\rm (crit)} $,
the $ t_P^{(\nu)} $ can become comparable
to the phonon energy scale $ \Omega $.
For $ P $=2, the proximity of the second, static polaron
strongly enhances the amplitudes $ t_2^{(2)} $ and $ t_2^{(3)} $
relative to $ t_1^{(2)} $ and $ t_1^{(3)} $.
It is worth noting that this effect occurs
only in the presence of
strong electron correlations where bipolaron
formation is prevented by the strong on-site Coulomb
repulsion. By contrast, this effect
never occurs in ordinary polaronic systems with the
electron-phonon interaction $E_{\rm P}$ larger than the local
Coulomb repulsion. In the latter case small bipolarons will
form \cite{AlRa} which are much heavier than polarons. To generate
the above-described delocalization
(and hence mobility !) enhancement effect,
it is essential to keep the two polarons spatially separated
by strong enough on-site Coulomb effects.
The accidental symmetries leading to $ t_P^{(2)} = t_P^{(3)} $
will be lifted on larger lattices and, more importantly, by
inclusion of longer-range couplings, such as 2nd-neighbor
hopping $ t' $, Eq.~(\ref{eq:h_t2}), and extended Coulomb repulsion
$ V_{\rm C} $, Eq.~(\ref{eq:h_lc}),
in the original EP Hamiltonian (\ref{eq:model}),
as will be shown below.
Due to the exponential dependence of the delocalization
matrix elements $ t_P^{(\nu)} $
on the lattice potential parameters, such additional couplings
can substantially affect the magnitudes of the $ t_P^{(\nu)} $
parameters, without necessarily altering the Berry phase factors
or the predominance of the 2nd- and 3rd-neighbor hopping terms
($t_P^{(2,3)}>t_P^{(1)}$)
and their two-polaron enhancement ($t_2^{(\nu)}>t_1^{(\nu)}$).
The Berry phase factors should be a robust feature
of our model, since they reflect the topological properties of
the relevant tunneling paths relative to certain singular manifolds
of the lattice action in $u$-space.
Figure~\ref{fig:parameter}(b) shows the nearest-neighbor attraction
$ V_{\rm P} $ and the pair binding energy $ \Delta $, where
the latter quantity is given by
\begin{equation}
\Delta = V_{\rm P} +2 t_2^{(2,3)} -8 t_1^{(2,3)}
\label{eq:delta_2}
\end{equation}
for $t_P^{(2)}=t_P^{(3)}$, using (\ref{eq:delta_1}).
Since two self-localized nearest-neighbor holes mutually inhibit
their delocalization, the $ t $-term in the original Hamiltonian
(\ref{eq:model}) gives a repulsive contribution to $ V_{\rm P} $:
in the parameter range shown in the figure,
$ V_{\rm P} < 0.342J $ ($ 0.316J $) is substantially reduced
compared to $ V_{\rm P}(t=0) = 1.00J $ ($ 0.926J $) on
$ N = \sqrt{8} \times \sqrt{8} $ ($ \sqrt{10} \times \sqrt{10}$)
sites in the $ t \rightarrow 0 $ limit.
Compared to the $ tJ $ model, $ V_{\rm P} $ can be larger or
smaller: self-localization reduces the effective polaron hopping
processes, giving an attractive contribution, and it is more
effective in the one-hole state than in the two-hole state,
giving a repulsive contribution.
The binding energy $ \Delta $ is enhanced by the two-polaron hopping
amplitudes $ t_2^{(2,3)} $, but it is smaller, in most of the
parameter range shown in the figure, than $ V_{\rm P} $
due to the restricted hopping processes for the polaron pair and
due to the non-negligible $ t_1^{(2,3)} $-term for large $ \Omega $.
In a more realistic theory, the possible competition between
polaron pairing and phase separation\cite{phase_sep} would need to be
considered for finite density of holes.
In order to see a finite size effect, we have calculated
the effective model parameters
on $ N = \sqrt{10} \times \sqrt{10} $ sites (not shown)
to compare with those on $ N = \sqrt{8} \times \sqrt{8}$ sites above.
We find no qualitative difference between them.
In the parameter range shown in the figure, the values of
$ t_P^{(\nu)} $ are different by a factor of two at most, but
these values are rough order-of-magnitude estimates in any case.
The values of $ V_{\rm P} $ for $ N = \sqrt{10} \times \sqrt{10} $
are smaller by a factor of 0.8-0.9.
For the Holstein-Hubbard model, we find results
(Fig.~\ref{fig:Hubbard}) quite similar to those shown above.
However, the values of $ V_{\rm P} $ are only 30\% of those
in the Holstein-$ tJ $ model, which are reminiscent of the fact
that the hole binding energy is larger for the $ tJ $ model
than for the Hubbard model.
Furthermore, the values of $ t_2^{(2,3)} $ are smaller than
those of the Holstein-$ tJ $ model for $ \Omega < 0.2t $,
and the values of $ t_1^{(2,3)} $ are larger by a factor of
1.6-3.7 in the parameter range shown in the figure.
All these results make the pair binding energy smaller
in the Holstein-Hubbard model.
For large $ \Omega $, the polaron pair becomes unbound,
though our results are based on the adiabatic approximation
and the nearest-neighbor pair approximation so that they
are less reliable for large $ \Omega $.
We now turn to the effects of 2nd neighbor
electron hybridization and
long-range Coulomb couplings which lift
the accidental finite-cluster degeneracy,
$ t_P^{(2)} = t_P^{(3)} $, and thus shift the system
off the $ d $-$ p $ phase boundary for $ P $=2, already
in the absence of $ t_2^{(1a,1b)} $-processes.
The 2nd-neighbor electron hopping term
in the original Hamiltonian (\ref{eq:model})
enhances the 2nd-neighbor hopping $ t_2^{(2)} $,
lowers the 3rd-neighbor one $ t_2^{(3)} $, and thus
favors the $ d_{x^2-y^2} $-wave symmetry if $ t' $ is positive
by the definition in Sec.~\ref{sec:model}
(Fig.~\ref{fig:2nd_n_hopping_plus}), and the effects are
opposite if $ t' $ is negative
(Fig.~\ref{fig:2nd_n_hopping_minus}).
Note that, in the noninteracting tight-binding model,
the positive $ t' $ raises the energy of $p$=($\pi$,0) state
[thus the energy of $p$=($\pi/2$,$\pi/2$) state is relatively
lowered] and makes the Fermi surface convex.
A $t'$-term which helps the 2nd-neighbor electron hopping
also helps the 2nd-neighbor polaron hopping.
The long-range repulsion term enhances $ t_2^{(3)} $ more than
$ t_2^{(2)} $, so that it favors the $ p_{x(y)} $-wave symmetry
(Fig.~\ref{fig:long_range_Coulomb}).
This can be understood if we recall the second-order perturbation
theory with respect to $ t_2^{(1a,1b)} / V_{\rm P} $.
The $ V_{\rm C} $-term raises the energy of the intermediate
2nd-neighbor pair favoring the $ d_{x^2-y^2} $-wave symmetry,
compared to that of the intermediate 3rd-neighbor pair favoring
the $ p_{x(y)} $-wave symmetry.
Note that $ V_{\rm C} $ enhances both of the $ t_2^{(2)} $ and
$ t_2^{(3)} $ processes.
This happens because the lattice distortion and thus the localization
potential is weakened by $ V_{\rm C} $.
If $V_{\rm C}$ is too strong, however, it may overcome
the nearest-neighbor attraction and the polaron pairing
will then be suppressed altogether. This will be discussed
further in the next section.
\section{POLARON LIQUIDS AND THE CUPRATE SUPERCONDUCTORS}
\label{sec:p_liquid}
To the extent that the qualitative features
of the above-discussed effective Hamiltonian
(\ref{eq:effective_Hamiltonian})
and the resulting tunneling and pairing dynamics
remain intact at finite hole doping concentrations,
the foregoing results have some potentially
interesting consequences for the physical
properties of the polaron liquid formed
at finite polaron densities. In the present section,
we will speculate on some of these properties
and compare to experimental observations
in the cuprate high-$T_c$ superconductors.\cite{YZS}
If the above-discussed polaron pair state
remains stable and delocalized at finite hole doping, then
formation of a superconducting
polaron pair condensate can occur
at low enough temperatures.
The foregoing discussion has focussed primarily
on the tightly-bound-pair limit where such a condensate
would be formed via Bose condensation of the {\it pre-existing}
polaron pairs. However, the qualitative $ \Omega $-dependences of
the delocalization energies $ t_P^{(\nu)} $ and
of the pairing potential $V_{\rm P}$,
shown in Fig.~\ref{fig:parameter}
suggest that with increasing $ \Omega $ (and fixed electronic
parameters $ t $, $ J $ and $ E_{\rm P} $), such a condensate
may exhibit a crossover from
tightly-bound-pair to a BCS-like,
extended-pair behavior: For small $\Omega$, the delocalization
matrix elements $t_2^{(\nu)}$ and resulting
polaron pair bandwidth become small compared
to the pairing potential $V_{\rm P}$.
Hence tightly bound pairs will form,
as described above, with a pair wavefunction
extending only over 1-2 lattice constants.
For large $\Omega$ on the other hand, the
polaron bandwidths ($B_1$ and $B_2$)
can become comparable or larger than the pairing
potential, thus leading to a BCS-like extended pair state,
with a pair wavefunction extending over several/many lattice constants.
In the tightly-bound-pair regime, the Bose condensation
$ T_c $ is controlled by the pair density $x_{\rm pair}$
and the pair bandwidth $B_2$, that is, roughly
\begin{equation}
T_c\sim x_{\rm pair} B_2
\label{eq:t_c}
\end{equation}
where $B_2$ is the pair bandwidth (\ref{eq:b2}) and
\begin{equation}
x_{\rm pair}={1\over2}(1-\langle n\rangle) = {1\over2} x
\label{eq:x_pair}
\end{equation}
is the pair concentration, i.e.,
the half of the hole concentration $x$.
$B_2(\Omega)$ and hence $T_c$ decreases
with decreasing $\Omega$.
In the BCS-like extended-pair regime,
$T_c$ is controlled by the pair binding energy
$ \Delta $ which decreases
with increasing delocalization energy and hence with increasing
$\Omega$. As a consequence, there must
exist, somewhere in the cross-over regime between
the tightly-bound-pair and the BCS (extended-pair) limits,
an optimal phonon frequency $ \Omega_o $ where
the transition temperature $ T_c(\Omega) $
is maximized. $\Omega_o$ is roughly
determined by the condition
\begin{equation}
B_2(\Omega_o) \sim V_{\rm P}
\;.
\label{eq:omega_o}
\end{equation}
and the maximum possible $T_c$ (as a function of $\Omega$ !),
estimated by extrapolation of (\ref{eq:t_c})
from the tightly-bound-pair side, is of order
\begin{equation}
T_{co}\equiv T_c(\Omega_o)
\sim x_{\rm pair} B_2(\Omega_o)
\sim x_{\rm pair} V_{\rm P}
\;,
\label{eq:t_co}
\end{equation}
where $B_2(\Omega)$ is the polaron pair bandwidth,
Eq.~(\ref{eq:b2}),
as a function of phonon frequency $\Omega$.
One crucial, experimentally observable difference
between the tightly-bound- and
the extended-pair condensate is the relation between pair
formation and superconducting transition: In the tightly-bound-pair
regime the pairs, and hence the pairing gap $\Delta$
in the excitation spectrum,
can be {\it preformed}.
That is, the polaron pairs and the
energy gap for pair breaking exist
already at temperatures $T\sim\Delta$ which could
be well above $T_c$, provided that
$\Delta\gg T_c$. By contrast, in the extended-pair BCS-like regime
we expect the pair formation to coincide with the superconducting
transition, that is, the pairing gap should be observable only
at temperatures $T$ below $T_c$ and should vanish at $T_c$.
The existence of such an optimum phonon frequency
implies that $T_c$ exhibits a vanishing
isotope exponent $\alpha$ when $\Omega=\Omega_o$. To show this,
we note that the isotopic mass dependence enters
into the theory only via the phonon frequency $\Omega$, if the
electron-phonon Hamiltonian is parametrized,
as in (\ref{eq:e_pol}) and (\ref{eq:omega}),
in terms of $E_{\rm P}$ and $\Omega$, since electron-phonon
potential constants ($C$) and
harmonic restoring force constants ($K$), and hence $E_{\rm P}$,
are of purely electronic origin, i.e., do {\it not}
depend on atomic/isotope masses. Using $\Omega\propto M^{-1/2}$,
from (\ref{eq:omega}), we conclude that
\begin{equation}
\alpha \equiv -{\partial \log T_c \over \partial \log M}
\Big|_{\rm el}
={1\over2}{\partial \log T_c \over \partial \log \Omega}
\Big|_{\rm el}
\end{equation}
which vanishes at the $T_c$-maximum
$
\Omega\!=\!\Omega_o
$.
The notation $\dots|_{\rm el}$ here means that the derivatives are
to be taken with all purely electronic model parameters ($t$, $U$, $J$,
$E_{\rm P}$, etc.) held constant. The $T_c$-maximum at $\Omega_o$
also implies that $\alpha$
is positive in the tightly-bound-pair regime $\Omega_o>\Omega$,
but negative in the extended-pair regime $\Omega_o<\Omega$.
The vanishing of $\alpha$ at
$\Omega=\Omega_o$ does however {\it not} imply that $\alpha$
is generically a small number. Quite to the contrary,
because of the strong $\Omega$-dependence of the
polaron bandwidth parameters,
we should expect $\alpha$ to attain quite substantial magnitudes,
with $|\alpha|\sim{\cal O}(1)$, as the system is tuned away
from the optimal phonon frequency, i.e., when $\Omega\neq\Omega_o$.
It is tempting to compare the foregoing features of a finite-density
polaron liquid to the observed properties of the cuprates.
The doping-dependence of the superconducting and normal-state
properties of the cuprates is, in some respects, very much reminiscent
of a cross-over from tightly-bound-pair to BCS/extended-pair
behavior: In the underdoped cuprates, there is now a substantial
body of evidence suggesting that the superconducting gap is
pre-existing, in the form of a ``pseudo-gap'', at temperatures
well above $T_c$.\cite{pseudo_gap_exp}
With increasing hole doping concentration $x$,
$T_c$ approaches a maximum, while the pseudo-gap above $T_c$ is
gradually suppressed, and, in close proximity to the optimal doping
concentration $x_o$, the pseudo-gap above $T_c$ vanishes.
Well inside the overdoped regime $x>x_o$, there is no detectable
pseudo-gap and $T_c$ rapidly decreases with increasing $x$.
The isotope exponents $\alpha$ in the underdoped cuprates
are typically quite large in magnitude, of order of the classical
$BCS$-value $\alpha_{\rm BCS}={1\over2}$ or larger. However, in
contrast to conventional BCS-type phonon-mediated superconductors,
$\alpha$ can be very sensitive to changes in doping and other
system properties such as impurity concentration and crystal
structure. With increasing hole-doping concentration,
the observed, usually positive oxygen isotope exponent $\alpha$
decreases and becomes very small, typically $<0.05$, at the optimal
doping concentration $x_o$.\cite{isotope_exp}
It is presently not clear whether $\alpha$
changes its sign for $x>x_o$. Negative $\alpha$-values have been
observed in copper isotope substitutions on less than optimally doped
cuprate materials.\cite{Franck_Cu}
In comparing these experimental results to the foregoing
theoretical picture of a polaron liquid, it is
important to note that, experimentally, the $T_c$-maximum
and the surmised cross-over from tightly-bound-pair
to extended-pair BCS-like behavior is observed as a function of doping
concentration $x$, whereas, in our above theoretical considerations,
we have discussed the cross-over as a function of phonon frequency
$\Omega$. To see how such a cross-over could arise in our
model as a function of doping, we need to consider
the doping dependence of the polaron delocalization matrix
elements $t_P^{(\nu)}$.
As indicated in
Figs.~\ref{fig:parameter}-~\ref{fig:long_range_Coulomb},
the polaron delocalization
matrix elements, and hence the polaron pair bandwidth $B_2$
are rapidly increasing functions of $\Omega$.
At finite doping, these delocalization matrix elements
will also become dependent on the hole doping concentration
$x=1-\langle n\rangle$ by the following mechanism: As the polaron
density increases, the localized wavefunctions of nearby holes will
begin to overlap and the holes will begin to mutually
screen out each other's tunneling barriers. This effect can be
clearly seen in comparing the 1- and 2-hole results
in Fig.~\ref{fig:parameter}.
For $P=2$, the mere proximity of the second, static polaron
strongly enhances the tunneling matrix element of the first,
moving polaron, hence $t_2^{(\nu)} > t_1^{(\nu)}$ for $\nu=2,3$.
Treated at the mean-field level, at finite polaron density, this
tunneling enhancement effect will cause the (mean-field average)
tunneling matrix elements to increase with the hole doping
concentration. Thus the effective polaron
pair bandwidth $B_2=B_2(\Omega,x)$
becomes a strongly increasing function of
of the hole doping concentration $x$.
According to the cross-over criterion (\ref{eq:omega_o})
it may then be possible to drive the
polaron liquid from the tightly-bound-pair into the extended-pair
regime by changing either the phonon frequency $\Omega$
or the doping concentration $x$, if $V_{\rm P}$ is
only weakly dependent on
$\Omega$ and $x$. Another way of stating the same result is
to say that the optimal phonon frequency $\Omega_o=\Omega_o(x)$,
according to (\ref{eq:omega_o}), is a decreasing
function of the hole doping concentration $x$.
The underdoped region corresponds to the
tightly-bound pre-existing-pair
regime in this picture; the overdoped region is identified with
the extended-pair BCS-like regime.
The superconducting transition temperature $T_c$
as a function of $x$ reaches a maximum at an optimal doping
concentration $x_o$ not too far from the concentration $x_\Omega$
where $\Omega_o(x_\Omega)=\Omega$ and the isotope exponent vanishes.
Notice here that the point $x_o=x_o(\Omega)$
[where $T_c(\Omega,x)$ reaches its
maximum as a function of $x$ at fixed $\Omega$] need not
exactly coincide with the point $x_\Omega$ [where the optimal
phonon frequency $\Omega_o(x)$ equals the actual
phonon frequency $\Omega$].
At sufficiently large hole doping the polaron-polaron wavefunction
overlap and the mutual screening of the hole-localizing potential
wells $Cu$ may become so strong that the holes become unbound,
that is, the polarons become unstable towards
forming free carriers. This finite-density polaron unbinding
can be regarded as analogous to the Mott delocalization
transition in moderately doped semi-conductors.
The primary difference is that the Mott transition
in semi-conductors involves the
screening of localizing potential wells due to static impurities
whereas, in the present case, the localizing potential wells
are due to local lattice distortions which are induced,
via the EP coupling, by the polaronic holes themselves.
In the adiabatic potential $W(u)$, this unbinding
will manifest itself in the (gradual or abrupt) disappearance
of local minimum configurations $u^{(\xi)}$. Whether, in the
thermodynamic limit, this occurs as a sharp transition
or as a continuous cross-over is presently unclear and
needs further study \cite{Roeder}.
The nature of the polaron unbinding and the
characteristic concentration $x_u$ where the unbinding
occurs will also be influenced by the
long-range Coulomb interaction $V_{\rm C}$ and, in more general
EP models, by the spatial range of the EP interaction.\cite{EmHo}
If the optimal polaronic doping concentrations $x_o$ and $x_\Omega$
are close to the polaron unbinding concentration $x_u$,
the polaron unbinding will likely dominate the cross-over into the
extended pair regime: In this scenario ($x_u\cong x_o,x_\Omega)$,
the cross-over from under- to overdoping
takes the system directly from the tightly-bound polaron pair
liquid into a BCS-like superconductor of extended pairs
of non-polaronic carriers. The effective mass of the non-polaronic
carriers in the overdoped regime is much less enhanced by the
electron-phonon coupling and, more importantly, the
mass enhancement is independent of the isotopic mass of the ions.
The latter is suggested by the conventional
weak-coupling electron-phonon theory where the mass enhancement factor
is given by $(1+\lambda_z)$ and the Eliashberg parameter
$\lambda_z$ is independent of the isotope mass.
If the pairing attraction is of predominantly
electronic (i.e., non-phonon) origin, one will then obtain a very
small isotope exponent $|\alpha|\ll 1$ throughout the overdoped regime
$x>x_u$.
Thus, the overall magnitude of $\alpha$ can
serve as a distinguishing feature between extended pairs
of polaronic and non-polaronic carriers in the overdoped regime.
In the former scenario, already described above, $\alpha(x)$ changes
sign near optimal doping, but $|\alpha|$ well inside the
overdoped regime can become as large
as in the underdoped regime, reflecting the fact
that the underlying pair constituents are still single-hole polarons.
By contrast, in the latter (unbound carrier) scenario,
$|\alpha|$ becomes small in the overdoped regime,
without necessarily incurring a sign change in $\alpha$,
reflecting the non-polaronic nature of the pair constituents.
Further experimental studies of the isotope exponent in the
overdoped cuprate systems would be desirable.
The foregoing features of the underdoped polaron liquid model
and its cross-over into the overdoped regime exhibit strong
similarities with the observed pairing symmetry and
doping dependences of $T_c$, isotope exponent and pseudo-gap
in the cuprates. However, in its present form, the model
also suffers from several potential drawbacks which
arise from the {\it small}-polaron character of the self-localized
hole. Small-polaron formation
necessarily implies bandwidths $B_1$ and $B_2$
which cannot be substantially larger than the
phonon energy scale $\Omega$, as shown in
Figs.~\ref{fig:parameter}-~\ref{fig:long_range_Coulomb}.
As a consequence, small-polaron carriers may be
easily localized by disorder and/or long-range Coulomb interaction
effects. Also, by Eq.~(\ref{eq:t_co}), the overall magnitude
of the optimal $T_{co}\sim {1\over 2}x B_2\lesssim {1\over 2}x\Omega$
cannot exceed some fraction of $\Omega$. With $x\sim 0.10-0.20$
and $\Omega\lesssim 1000 K$\cite{phonon_exp},
this upper limit on $T_{c}$ is of
order $50-100 K$ in the cuprates and it is reached
if $E_{\rm P}$ just barely exceeds $E_{\rm P}^{({\rm crit})}$.
For substantially larger $E_{\rm P}$, $B_2$ and $T_{c}$ are rapidly
(exponentially !) suppressed with $E_{\rm P}$. It is not clear from
the experimental data, whether observed carrier mobilities,
effective masses and $T_c$'s in the underdoped cuprates actually
exhibit such a strong sensitivity to changes in $E_{\rm P}$
and/or to disorder or long-range Coulomb interactions.
The foregoing limitations of the small-polaron system can
ultimately be traced back to the short-range nature of the
assumed Holstein EP coupling in our model. Scaling arguments
show that, at the level of the 0th order adiabatic approximation
in spatial dimensions $D\geq2$, short-range
EP models are subject to a dichotomy whereby single carriers either
form small polarons, if $E_{\rm P}$ exceeds a certain threshold
$E_{\rm P}^{({\rm crit})}>0$, or they do not form polarons at all, if
$E_{\rm P}<E_{\rm P}^{({\rm crit})}$ \cite{emin_holstein_scaling}.
By contrast, in systems with additional
longer-range EP couplings, such as
the Fr\"ohlich model\cite{emin_holstein_scaling,emin_large_pol},
as well as
in 1D short-range EP models\cite{ScHo,turkevich},
it is possible to form large polarons at arbitrarily weak
$E_{\rm P}$, i.e., with $E_{\rm P}^{({\rm crit})}=0$.
It has been argued \cite{emin_large_pol} that large-polaron
and large-bipolaron models can remedy some of the above described
deficiencies of the small-polaron picture, while retaining
most of the desirable physical features described above.
Thus, in a large-polaron model, there is still preformed pair
formation above the superconducting $T_c$ in the underdoped regime;
and the carrier bandwidth is still strongly reduced and dependent
on isotope mass.
Also, the possibility of a cross-over to a BCS-type free carrier
superconductor, as a function of doping, via polaron unbinding,
is retained in a large-polaron theory.
However the dependence of the bandwidth and $T_c$ on EP coupling
strengths $E_{\rm P}$ and on phonon frequencies $\Omega$ is only
algebraic, rather than exponential, and the overall magnitude of
the large-polaron bandwidths can become substantially larger
than in a small-polaron model, thus allowing for larger
$T_c$'s. The proposed large-polaron theories
studied so far \cite{emin_large_pol}
have been based on phenomenological continuum models which of course
cannot reproduce lattice-related features, such as the location
of band minima and pairing symmetries discussed above for our
2D lattice model. It will therefore be of interest
to extend our present work to lattice models with longer-range
EP couplings. Such future studies should explore
the possibility of large-polaron formation and the bandstructure
and pair wavefunction symmetry of large-polaron pairs.
Another critical problem in the above described polaron models
is the inclusion of long-range Coulomb effects.
Rough estimates based on a point charge model and measured
long-wavelength dielectric constants \cite{SGEH} suggest that
$V_{\rm C}/t$ in the cuprates could be as large as $1-2$,
if only the electronic contribution to the dielectric screening,
that is, only $\epsilon_\infty$,
is taken into account. If additional screening from phonons,
i.e., $\epsilon_0$, is included, the estimated
$V_{\rm C}/t$ is reduced to $0.15-0.3$.
The former, $V_{\rm C}/t\sim 1-2$, would be sufficient to
completely suppress the polaron pairing attraction in a system
containing only two isolated holes, that is, in the limit of vanishing
polaron density. The latter, $V_{\rm C}/t\sim0.15-0.3$,
may be overcome by the AF-mediated 1st neighbor attraction,
but the net attraction strength would still be substantially reduced
by $V_{\rm C}$.\cite{phase_sep,tj_model}
The suppression of extended pairing states,
such as $d_{x^2-y^2}$ pairing, by the long-range part of the Coulomb
interaction is a common problem in all extended pairing models
which are currently under investigation. Recent studies of
the metallic (in addition to insulating dielectric !) screening
of the extended Coulomb potential at finite
doping density\cite{SGEH,esirgen_et_al}
have suggested that the screened Coulomb potential
becomes substantially reduced, or even attractive,
at doping concentrations $x\sim 0.1-0.2$.
However the foregoing studies are based
on weak coupling or diagrammatic approaches which do not include
polaronic strong-EP effects. It therefore remains
to be seen whether metallic screening in a finite-density
polaron liquid will be sufficient to ``rescue'' the AF-driven
pairing attraction from the repulsive long-range Coulomb forces.
It is also worth re-emphasizing \cite{emin_large_pol}
the strong phonon contribution to the dielectric screening
in the cuprates,
as evidenced by the large measured dielectric constant ratio
$\epsilon_0/\epsilon_\infty\gtrsim 6$\cite{emin_large_pol,SGEH}.
This phonon contribution, which acts to reduce long-range
Coulomb forces, can be equivalently regarded as a
long-range attraction, mediated by long-range
(dipolar) EP interactions. This long-range EP interaction
is in fact a primary agent causing
(bi-)polaron formation in the above-cited
\cite{emin_large_pol} phenomenological
large-polaron models. It is therefore quite conceivable
that, in a realistic model of the cuprates, both AF-
{\it and} EP-mediated attractions contribute to the overall
pairing potential and that the EP contribution may even be the
predominant one.
\section{SUMMARY}
\label{sec:summary}
In conclusion, we have developed a treatment of polaron
tunneling dynamics on the basis of a path integral
formulation of the adiabatic approximation.
The adiabatic treatment of polaron tunneling
has been tested by comparison to exact numerical results
for a two-site Holstein system. The break-down of the
adiabatic approach in the
anti-adiabatic regime has been discussed and the resulting
limitations of applicability for
long-range polaron tunneling processes in lattice models
have been identified.
Using a combination of path integral,
many-body tight-binding and exact diagonalization
techniques, we have then explored the Berry phases and effective
matrix elements for single- and two-polaron tunneling, the two-polaron
quasiparticle statistics, effective two-polaron interactions,
and polaron pairing states in the 2D Holstein-$ tJ $ and
Holstein-Hubbard models near half filling.
The effect of 2nd neighbor electron hybridization
and long-range Coulomb repulsion has also been studied.
Due to the AF spin correlations, single-polaron hopping is
dominated by {\em intra\/}-sublattice 2nd- and 3rd-neighbor
processes.
These processes are strongly enhanced by close proximity of
a second polaron.
The Berry phases imply either $ d_{x^2-y^2} $- or
$ p_{x(y)} $-wave pair symmetries and effective spin-1/2-fermion
quasiparticle statistics of dopant-induced polaron carriers.
For the Holstein-$ tJ $ and Holstein-Hubbard models
on the 8-, 10-, and 16-site clusters, the $d_{x^2-y^2}$-wave
state is stable for two polarons. The second neighbor hopping
$H_{t'}$ favors the $d_{x^2-y^2}$-wave pair for $t'>0$, while
the long-range Coulomb repulsion $H_{\rm lc}$ favor the
$ p_{x(y)} $-wave pair.
The strong on-site Hubbard-$U$ Coulomb repulsion plays a crucial
role in the formation of these pairing states.
By keeping the electrons spatially
separated and preventing on-site bi-polaron formation the Hubbard-$U$
interaction acts, effectively, to greatly enhance the
polaron tunneling bandwidths and, hence, their mobility,
in the nearly $1\over2$-filled regime.
For a hypothetical superconducting polaron pair condensate,
our results imply qualitative doping dependences
of the isotope effect, $ T_c $ and pseudo-gap
which are similar to those observed in the cuprates.
Potential limitations of the present polaron model,
arising from the short-range nature of the assumed EP coupling,
have been pointed out. Further studies to
include longer-range EP couplings, in combination with
extended Coulomb interactions, have been outlined.
\acknowledgments
One of us (HBS) would like to thank D. Emin, J.P. Franck,
K. Levin and M. Norman for helpful discussions.
This work was supported by Grant No. DMR-9215123
from the National Science Foundation
and a Grant-in-Aid for Scientific Research on Priority Area
``Nanoscale Magnetism and Transport''
from the Ministry of Education, Science, Sports and Culture, Japan.
Computing support from UCNS at the University of Georgia is
gratefully acknowledged.
|
1,314,259,995,004 | arxiv | \section{Introduction}
Stochastic models are used in macroevolution to obtain phylogenetic trees from the molecular data of extant species.
In these models, it is common to assume that in the distant past there was a common ancestor from which the extant species evolved forward in time, according to a prior random process with a given number of species. The time from the origin of the evolutionary process to the present often is inferred from the data. Hence, the method to obtain a phylogenetic tree relies on simulation of branching processes and likelihood calculation on the reconstructed phylogenetic tree. Then, the shapes of such phylogenetic trees can be used to make some inference on the diversification properties of the evolutionary process, see \cite{gernhard2008conditioned, AmauryTanjiaTPB2013, morlon2011reconciling, stadler2011inferring}.
The central problem in the above method is to pre-specify the unknown random process of the fluctuation species number over time. As an attempt to avoid this problem, \textit{genealogical point processes} were introduced in \cite{lea2004}, based on a binary branching process conditioned on its current population size. In accordance with the biological literature where models of evolution are made on each level of taxonomy, we will use the branching terminology (individuals, births, death) instead of the macroevolution terms (species, speciation, extinction).
Genealogical point processes were extended to consider branching processes with non-binary offspring distribution in \cite{amaurylea2013}. Namely, therein \textit{coalescent point processes} (CPPs for short) are defined to describe backwards in time the genealogy of an extant population evolving as a Galton Watson tree. These processes have been widely used because they lead to extremely fast computation of tree likelihoods, see \cite{AmauryTanjiaTPB2013}. Moreover, according with a recent work by Lambert and Schertzer \cite{AmauryEmmanuel2019}, it is possible to recover the genealogy of the Kingman coalescent by
taking a sparse sample from a CPP.
Coalescent Point Processes were studied for different types of branching processes. For example, in \cite{amaurylea2013}, they analyzed Galton-Watson processes and continuous state branching processes. An extension to multi-type Galton-Watson processes were done by Popovic and Rivas \cite{LeaRivas2014}. In this paper, we aim to define a CPP for single-type Galton Watson processes in varying environment.
Consider an infinitely old branching tree, where individuals in the same generation has the same offspring distribution but these distributions vary along the generations. In other words, consider an infinitely old Galton-Watson tree in varying environment.
Our main result is to describe the genealogy of an arbitrary large population at the present time (generation 0), originated at an unspecified arbitrary large time in the past.
In order to do it, we are going to use a representation that labels individuals at each generation in such a way that the lines of descent do not intersect. In other words, we are going to have a monotone planar embedding tree where the $i$-th individual in the $n$-th generation is represented by $(n,i)$ for any $n\in \mathbb{Z}_{-}:=\{0,-1,-2,\dots\}$ and $i\in \mathbb{N}:=\{1,2,\dots\}$.
Individuals at the present generation will be denoted simply by $i$ instead of $(0,i)$.
For every $i\geq 1$, consider $A_i$ the coalescent time between individuals $i$ and $i+1$. The genealogy of the current generation backwards in time is uniquely determined by the \textit{coalescent point process} $(A_i,i\geq 1)$, as we will see below. In general, this process is not Markovian and its law is difficult to characterize. A natural goal is to find a Markov process such that it has the minimal amount of information to reconstruct $(A_i,i\geq 1)$. If the offspring distributions don't vary along generation, Lambert and Popovic \cite{amaurylea2013}, constructed two processes $(B_i, i\geq 1)$ and $(D_i, i\geq 1)$ such that for every $i$, $A_i$ is the first non-zero entry of $B_i$ or $D_i$, respectively. The process $(D_i, i\geq 1)$ is Markovian but it has a lot of repetitive information. The$ $ process $(B_i, i\geq 1)$ is constructed from $(D_i, i\geq 1)$ by removing \textit{some} information. They claimed that $(B_i, i\geq 1)$ contains the minimal amount of information needed to construct $(A_i, i\geq 1)$ while remaining Markov. However, they had mistake in their proof and as you can see in Example \ref{counterexample}, in general it is not Markovian.
The main contribution of this paper is to propose a new Markov process $(B_i, i\geq 0)$ that reach the goal to reconstruct the genealogy with the minimal information. Furthermore, the construction is valid for Galton-Watson processes in varying environment.
We propose a vector-valued process $(B_i, i\geq 0)$ constructed in the following recursive way. We define $B_0=\emptyset$, the empty vector.
For any fix individual $i$ we follow its ancestral line (the so called spine). For every $n\in \mathbb{N}$, we consider the subtree attached to the spine with root at generation $-n$. We denote by $B_i(n)$ the number of children of the root at the right hand side of the spine, whose descendants are alive at the present generation. Then, we define the vector $B_i=(B_i(1),\dots, B_i(l_i))$, where its length $l_i$ is the maximum between $l_{i-1}$ and the first $n$ such that $B_i(n)\neq 0$. By construction, we can see that $l_i$ is
the coalescent time between individuals $1,2,\dots, i+1$. In addition, $A_i$ is the first non-zero entry
of $B_i$. See Figure \ref{fig: figure-label}.
\begin{table}[ht!]
\begin{minipage}[b]{0.25\linewidth}
\centering
\resizebox{\textwidth}{!}{%
\begin{tabular}[!htbp]{| c | c | l |}
\hline
$i$ & $A_i$ & \hspace{0.6cm} $B_{i}$ \\ [.25 ex]
\hline
$1$ & $1$ & $(1)$ \\
$2$ & $2$ & $(0, 2)$ \\
$3$ & $1$ & $(1, 1)$ \\
$4$ & $2$ & $(0,1)$ \\
$5$ & $1$ & $(2,0)$ \\
$6$ & $1$ & $(1,0)$ \\
$7$ & $4$ & $(0, 0, 0, 1)$ \\
$8$ & $3$ & $(0, 0, 1, 0)$ \\
$9$ & $5$ & $(0, 0, 0, 0,1)$ \\
$10$ & $1$ & $(1, 0, 0,1, 0)$ \\
$11$ & $4$ & $(0, 0, 0,1, 0)$ \\ [.25 ex]
\hline
\end{tabular}
}
\label{table:student}
\end{minipage}
\hspace{-.5cm}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=14cm]{arbol.pdf}
\end{minipage}
\captionof{figure}{A Galton-Watson tree in varying environment and its processes $(A_i, i\geq 1)$ and $(B_i, i\geq 1)$. We use colors to represent the different subtrees attached to the spine of individual $1$ whose descendants are alive at the present generation. The length of the vector $B_i$ is the height of the subtree attached to the $1$-th spine that contains individual $i+1$.}
\label{fig: figure-label}
\end{table}
If the environment is constant, Lambert and Popovic \cite{amaurylea2013} showed that the process $(A_i, i\geq 1)$ is Markovian only when the offspring distribution is linear fractional.
If the environment is not constant but still linear fractional, we show in Proposition \ref{Prop Linear} that $(A_i, i\geq 1) $ is a sequence of independent and identically distributed random variables and we obtain its distribution.
The remainder of the paper is structured as follows. In Section \ref{preliminares}, we present Galton-Watson processes in varying environment and some related properties. The definitions of $(A_i, i\geq 1)$ and $(B_i, i \geq 1)$ are given in Section \ref{main}. In the same section, we present our main theorem: $(B_i, i \geq 1)$ is a Markov process which has the minimal amount of information in order to construct $(A_i,i\geq 1)$. We give the Counterexample \ref{counterexample} and explain why a process with less information than $(B_i, i \geq 1)$ would not be Markov. In addition, if the offspring distributions are linear fractional we show that the process $(A_i, i\geq 1)$ is Markovian and we give explicitly its distribution. Finally, Section \ref{proofs} is devoted to the proofs.
\section{Preliminaries}\label{preliminares}
Galton-Watson processes in varying environment may be considered as a model for the development of the size of a population where individuals reproduce independently with offspring distribution potentially changing among generations. To be precise, a \textit{varying environment} is a sequence $\mathcal{E}=(e_n,n\geq 1)$ of probability measures on $\mathbb{N}_0:=\{0,1,\dots\}$. Let $Z_n$ be the population size at generation $n$.
The process $Z=(Z_n,n\geq 0)$ is a Markov chain defined recursively as
\[
Z_0=1\qquad \mbox{and} \qquad Z_{n+1}= \sum_{i=1}^{Z_n} \xi_i^{(n)}, \quad n\geq 0,
\]
where $\xi_i^{(n)}$ denotes the offspring of the $i$-th individual living at generation $n$ and it has distribution $e_{n+1}$. We assume that the variables $(\xi^{(n)}_i,i\geq 1, n\geq 1)$ are all independent. The process $(Z_n,n\geq 0)$ is called a \textit{Galton Watson process in varying environment} $\mathcal{E}$, for short GWVE.
Let $f_n$ be the generating function of $e_n$. For each $0\leq m< n$ and $s\in[0,1]$, we define
\begin{equation}\label{eq:fmn}
f_{m,n}(s):=f_{m+1}\circ \cdots \circ f_n(s), \qquad \mbox{and}\qquad \qquad f_{n,n}(s):=s,
\end{equation}
where $\circ$ denotes the composition. Note that for every $s\in[0,1]$ and $0\leq m<n$,
\begin{equation}\label{eq:fgpmn}
f^\prime_{m,n}(s)=\prod_{\ell =m+1}^n f^\prime_\ell (f_{\ell,n}(s)),\qquad \mbox{and}\qquad f^\prime_{n,n}(s)=1.
\end{equation}
According to Kersting and Vatutin \cite{gotzvatutin}, the generating function of $Z_n$ is equal to $f_{0,n}$. Namely,
\begin{equation*}
\mathbb{E}(s^{Z_n})=f_{0, n}(s), \qquad 0\leq s\leq1, \, \, n\geq 1.
\end{equation*}
Let denote by $\theta:= \min\{ n\geq 1: Z_n=0\}$ the extinction time. Then,
\begin{equation}\label{eq:survival}
\mathbb{P}(\theta> n)=\mathbb{P}(Z_n\neq 0)=1 - f_{0,n}(0).
\end{equation}
We define $\zeta_n$ as the number of individuals at generation one having alive descendants at generation $n$. In particular, $\zeta_n$ has the law of
\begin{equation}\label{eq:lawzetan}
\sum_{i=1}^{\xi^{(0)}} \epsilon_i ^{(n)},
\end{equation}
where $\xi^{(0)}$ has distribution $e_1$ and $(\epsilon_i^{(n)}, i\geq 1)$ is a sequence of independent Bernoulli random variables with parameter $1 - f_{1,n}(0)$, independent of $\xi^{(0)}$.
We also define $\eta_n$ as the random variable distributed as $\zeta_{n}-1$ conditional on $\{\zeta_n\neq 0\}$. Note that for every $k\geq 0$,
\begin{equation}\label{eq:lawetan}
\mathbb{P}(\eta_n=k) = \mathbb{P}( \zeta_n=k+1 \mid \zeta_n \neq 0 )
= \frac{( 1- f_{1,n}(0) )^{k+1}f_1^{(k+1)} (f_{1,n}(0))}{(k+1)! (1 - f_{0,n} ( 0 ))},
\end{equation}
where $f^{(k+1)}_1$ denotes the $(k+1)$-th derivative of $f_1$. Indeed, we use
\[
\mathbb{E}\left(\xi^{(0)}(\xi^{(0)}-1)\cdots (\xi^{(0)}-k)s^{\xi^{(0)}-k+1}\right)=f^{(k)}_1(s)
\]
to obtain
\begin{align*}
\mathbb{P}(\zeta_n=k+1)
& = \sum_{i=1}^\infty \mathbb{P}( \zeta_{n}=k+1 \mid \xi^{(0)}=i) \mathbb{P}(\xi^{(0)}=i) \\
& = \sum_{i=1}^\infty \binom{i}{k+1} (1 - f_{1,n}(0))^{k+1} (f_{1,n}(0))^{i-k-1} \mathbb{P}(\xi^{(0)}=i) \\
& =\frac{(1 - f_{1,n}(0))^{k+1} }{ (k+1)! } f_1^{(k+1)}( f_{1,n}(0) ).
\end{align*}
Similarly,
\begin{align*}
\mathbb{P}( \zeta_n=0)
& = \sum_{i=0}^\infty ( f_{1,n}(0) )^i q_1(i)
= f_1( f_{1,n}(0) ) =f_{0,n} ( 0 ).
\end{align*}
We complete the section with an example.
\begin{example}\label{Ex linear1} An offspring distribution $\xi$ that satisfies
\[
\mathbb{P}(\xi =0)=1-r \qquad \mbox{and} \qquad \mathbb{P}(\xi =k) =rpq^{k-1}, \quad k\neq 0,
\]
where $0<q<1$, $p=1-q$ and $0\leq r \leq 1$, is called \textit{linear fractional}. Special cases are $r=1$, where we obtain a geometric distribution $G$; and $r=0$, where we obtain a Dirac measure $\delta_0$. If $0<r<1$, it is a mixture of both, i.e. $\xi=rG+(1-r)\delta_0$. In this case, its generating function is given by
\[
f(s)=1-r \frac{1-s}{1-qs}, \qquad s\in [0,1].
\]
Thanks to the identity
\begin{equation*}
\frac{1}{1-f(s)}=\frac{1}{1-f'(1)(1-s)} + \frac{ f''(1)}{2f'(1)^2}, \qquad s\in[0,1],
\end{equation*}
we can see that a linear fractional distribution is characterized by its mean $f'(1)=r/p$ and its normalized second factorial moment $f''(1)/f'(1)^2=2q/r$.
We say that a \textit{varying environment} $\mathcal{E}=(e_n,n \geq 1)$ is \textit{linear fractional} if and only if every $f_n$ is linear fractional with parameters $r_n$ and $p_n$. According with \cite[Chapter 1]{gotzvatutin}, the generating function $f_{m,n}$ is again linear fractional with mean
\[
f_{n,n}'(1)=1,\qquad f_{m,n}'(1)=f_{m+1}'(1)\cdots f_n'(1) = \frac{r_{m+1} \cdots r_n}{p_{m+1} \cdots p_n}, \qquad m<n,
\]
and normalized second factorial moment \ $f_{n,n}''(1)/f_{n,n}'(1)^2=0$; and for $m<n$,
\[
\frac{f_{m,n}''(1)}{f_{m,n}'(1)^2}
=\frac{f_{m+1}''(1)}{f_{m+1}'(1)^2}+\underset{k=m+2}{\overset{n}{\sum}} \frac{1}{f_{m+1}'(1)\cdots f_{k-1}'(1)}\frac{f_k''(1)}{f_k'(1)^2} =2\left(\frac{q_{m+1}}{r_{m+1}}+\underset{k=m+2}{\overset{n}{\sum}} \frac{ p_{m+1} \cdots p_{k-1} q_k}{r_{m+1} \cdots r_{k-1}r_k}\right).
\]
Consider a Galton-Watson process with the previous environment $(Z_n:n\geq 0)$. Then, $Z_n$ is linear fractional with generating function $f_{0,n}$. Furthermore, by using \eqref{eq:lawetan} and
\[
f_1^{(k+1)}(s) = \frac{(k+1)! r_1(1-q_1)q_1^{k} }{ (1-q_1 s)^{k+2} },
\]
we can prove that $\eta_{n}$ has geometric distribution. More precisely,
\begin{equation*}
\begin{split}
\mathbb{P}(\eta_n=k) =
& \left( \frac{1-q_1}{1-q_1 f_{1,n}(0)} \right) \left( \frac{q_1 (1 - f_{1,n}(0) )}{1-q_1 f_{1,n}(0)} \right)^k,\qquad k\geq 0.
\end{split}
\end{equation*}
In other words, $\mathbb{P}(\eta_1=k)=(1-q_1)q_1^k,$ for $ k\geq 0$; and for $n\geq 2$
\begin{equation*}
\begin{split}
\mathbb{P}(\eta_n=k) =
&
\left( \frac{ 1 + \underset{i=2}{\overset{n}{\sum}}
\frac { q_i r_{i+1} \cdots r_n }{ p_{i} p_{i+1}\cdots p_n} }{ 1 + \underset{i=1}{\overset{n}{\sum}}
\frac { q_ir_{i+1} \cdots r_n }{ p_{i} p_{i+1}\cdots p_n} } \right)
\left( \frac{ \frac { q_1r_{2} \cdots r_n }{ p_{1}p_2 \cdots p_n} }{ 1 + \underset{i=1}{\overset{n}{\sum}}
\frac { q_i r_{i+1} \cdots r_n }{ p_{i} p_{i+1}\cdots p_n} } \right) ^k, \qquad k\geq 0,
\end{split}
\end{equation*}
with the agreement that
$ \tfrac{ q_n r_{n+1} \cdots r_n }{ p_{n} p_{n+1}\cdots p_n}=\tfrac{q_n}{p_n}$.
\end{example}
\section{Main results}\label{main}
In this section, we analyze the genealogy of a Galton-Watson tree in varying environment, starting with individuals in the present generation and going backwards in time. We label the generations with negative integers, where the present generation is denoted by $m=0$. Hence, the offspring distributions are now denoted by $\mathcal{E}=(e_m, m\in \mathbb{Z}_{-})$.
In the previous section, the generating functions $f_{m,n}$ defined in \eqref{eq:fmn} were a useful tool. We can extend these definitions to include $m\leq n \leq 0$.
As we explain in the introduction, we use a representation that labels individuals at each generation in such a way that lines of descent of the individuals do not intersect. The monotone planar embedding is indexed by $\mathbb{Z}_{-}\times \mathbb{N}$, where the pair $(m,i)\in \mathbb{Z}_{-}\times \mathbb{N}$ represents the $i$-th individual in generation $m$. Individuals at the present generation will be denoted by $i$ instead of $(0,i)$. The offspring of individual $(m,i)$ is denoted by $\xi^{(m)}_i$ and its distribution is $e_{m+1}$.
We endow the population with the following genealogy. Individual $(m,i)$ has mother $(m-1,j)$ if and only if
\[
\sum_{k=1}^{j-1} \xi_{k}^{(m-1)} < i \leq \sum_{k=1}^j \xi_{k}^{(m-1)}.
\]
If $Z^{(m,i)}(k)$ denotes the number of descendants of individual $(m,i)$ at generation $m+k$, then the process $(Z^{(m,i)}(k),0\leq k\leq -m)$ is a GWVE process with environment $(e_{m+1}, e_{m+2}, \dots, e_0)$ starting with one individual.
In particular, by \eqref{eq:survival}, the probability that an individual in generation $m$ has alive descendants at the present generation is $p_{m}=1-f_{m,0}(0)$. For an individual at generation $-m\in \mathbb{Z}_-$, we define $\zeta^{(m)}$ as the number of daughters who have alive descendants at generation zero. As a consequence of \eqref{eq:lawzetan},
\begin{equation*}
\zeta^{(m)}\overset{\mathcal{L}}{=}\underset{i=1}{\overset{Y}{\sum}}\epsilon_i,
\end{equation*}
where $Y\sim e_{-m+1}$ and the variables $\epsilon_i$ are Bernoulli with parameter $1-f_{-m+1,0}(0)$, all independent. We also define $\eta^{(m)}$ as
\begin{equation}\label{eq:etadistribucion}
\eta^{(m)}=\zeta^{(m)}-1 \qquad \mbox{ conditioned on } \{\zeta^{(m)}>0\}.
\end{equation}
In particular, by \eqref{eq:lawetan}
\begin{equation}\label{etanueva}
\mathbb{P}(\eta^{(m)}=0) = \frac{ ( 1- f_{-m+1,0}(0) )f_{-m+1}^\prime (f_{-m+1,0}(0))}{ 1 - f_{-m,0} ( 0 )}.
\end{equation}
Let $\mathfrak{a}_i(n)$ be the index of the ancestor of individual $i$ in generation $-n$. Our goal is to describe the law of the coalescent times $C_{i,j}$ of individuals $i$ and $j$
\[
C_{i,j}:=\min \{ n\geq 1: \mathfrak{a}_i(n)=\mathfrak{a}_j(n) \}, \qquad i,j\in \mathbb{N},
\]
with the agreement that $\min \emptyset = \infty$. We define
\[
A_i:=C_{i,i+1}, \qquad i\in\mathbb{N}.
\]
By construction, $C_{i,j}=\max\{ A_i,A_{i+1},\dots, A_{j-1}\}$, for any $i<j$. Therefore, $(A_i, i \geq 1)$ contains all the information about the genealogy of the current population. The sequence $\mathbf{A}:=(A_i, i \geq 1)$ is called the \textit{coalescent point process} of a Galton-Watson process in varying environment. If the environment is constant, this process was studied in \cite{amaurylea2013}.
The process $\mathbf{A}$ is simple to describe. Nevertheless, its distribution is not easy to find and in general is not Markovian, except for some special cases. This motivates us to construct an auxiliary process. For any fix individual $i$, we follow its ancestral line (spine) and consider the subtrees with roots in the spine. Note that these roots are labeling by $\{(-n,\mathfrak{a}_i(n)), n\in\mathbb{N} \}$. At every subtree, we count the number of daughters of the root at the right hand side of the spine, whose descendants are alive at the present generation. To be precise, let
\[
\mathcal{D}_i(n):=\{ \text{daughters of } (-n,\mathfrak{a}_i(n)) \text{ with descendants in } \{ (0,j): j\geq i \} \}, \qquad n\geq 1, \ i\geq 1.
\]
Define
\[
D_i(n) = \# \mathcal{D}_i(n)-1, \qquad i,n\geq 1.
\]
It follows from the monotone planar embedding that
\begin{equation} \label{eq:AdeD}
A_i=\min\{n\geq 1: D_i(n)\neq 0\}, \qquad i\geq 1.
\end{equation}
We set $D_0(n):=0$ for all $n\geq 1$ and $A_0:=+\infty$. Consider the first generation $l_i$ such that an individual in the $i$-th spine is an ancestor of the present generation individuals $1, \dots ,i+1$,
\[
l_i=\min \{ n\geq 1: \mathfrak{a}_1(n)=\cdots=\mathfrak{a}_{i+1}(n) \}.
\]
We define our vector-valued process $(B_i, i\geq 0)$ as the restriction of $D_i$ to the first $l_i$ entries, i.e.
$B_i=(B_i(1), B_i(2), \dots, B_i(l_i))$ with
\[
B_i(k):=D_i(k), \qquad 1\leq k\leq l_i.
\]
We define $B_0=\emptyset$ and $l_0=0$. The length $l_i$ can be defined recursively in terms of $D_i$ as the maximum between $l_{i-1}$ and the first $n$ such that $D_i(n)\neq 0$,
\[
l_i=l_{i-1}\vee \min\{n: D_i(n)\neq 0\}.
\]
See Figure \ref{fig: figure-label}. From now on the process $(B_i, i\geq 0)$ will be called \textit{{coalescent point process with multiplicities}}. We point out that this process is the Markovian correction of the one defined by \cite{amaurylea2013}.
In the remaining of the paper we prove that $(B_i,i\geq 0)$ is a Markovian process and we characterize its transitions. To be precise, we consider
\[
\mathcal S = \bigcup_{m\in\mathbb{N}} \mathbb{N}^m
\]
with the convention of $\mathbb{N}^0=\emptyset$. For any $b=(b(1), b(2), \dots ,b(m)) \in \mathcal{S}$, we denote its length by $l(b)=m$ with the convention of \ $l(\emptyset)=0$. Let $\mathfrak{s}(b)$ be the first non-zero coordinate of $b$ with the convention that $\mathfrak{s}( b ) =+\infty$, if $b$ is a null vector or the empty set. We define the vector $b^*=(b^*(1), b^*(2), \dots ,b^*(m))$ where
\begin{equation*}
b^*(j)= b(j) - \mathbb{1}_{ \{j=\mathfrak{s}(b) \} },\qquad j\leq m,
\end{equation*}
with the convention $b^*=b$, if $b$ is a null vector or the empty set.
The following theorem provides the law of the process $(B_i,i\geq 0)$. Thanks to \eqref{eq:AdeD} and the definition of $l_i$,
\[
A_i=\mathfrak{s}(B_i) \qquad \text{and} \qquad l_i=C_{1,i+1} =A_1\vee\cdots\vee A_i, \qquad i\geq 1.
\]
\begin{theorem}\label{ThB}
The vector-valued process $(B_i,i\geq 0)$ is a Markov chain with $B_0=\emptyset$. Conditionally on the event $\{B_i=(b(1),\dots, b(\ell))\}$, the law of the vector $B_{i+1}=(B_{i+1}(1), B_{i+1}(2), \dots, B_{i+1}(l_{i+1}) )$ is given by the following transition probabilities
\begin{equation*}
B_{i+1}(m): =
\left\{
\begin{array}{ll}
\eta^{(m)} & \text{ if }\, 1\leq m < A_i\ \text{ or }\ \ell< m\leq l_{i+1} \\
b(m)-1 & \text{ if }\, m= A_i\\
b(m)& \text{ if }\, A_i < m \leq \ell ,
\end{array}
\right.
\end{equation*}
where $(\eta^{(m)}, m\geq 1 )$ is a sequence of independent random variables such that $\eta^{(m)}$ is distributed as \eqref{eq:etadistribucion} for each $m$, and
\begin{equation}\label{eq:LMarkov}
l_{i+1} = \ell\,\mathbb{1}_{ \{ \mathfrak{s}(B_i^*)<\infty \} }+ (\ell \vee \ell^\dagger) \,\mathbb{1}_{ \{ \mathfrak{s}(B_i^*)=\infty \} },
\end{equation}
where
\[
\ell^\dagger=\min\{k \in \{1,2,\dots , A_i -1\}\cup \{ \ell +1,\ell +2, \dots \} : \eta^{(k)} \neq 0\}.
\]
Moreover,
\[
\mathbb{P}(A_1>n)= \prod_{i=1}^n \mathbb{P}(\eta^{(i)}=0)=\frac{ f^{\prime}_{-n,0}(0) }{1-f_{-n,0}(0)}, \qquad n\geq 1.
\]
\end{theorem}
\begin{remark}
For any individual $(m,i)\in \mathbb{Z}_{-}\times \mathbb{N}$, the subtree $(Z^{(m,i)}(k),0\leq k\leq -m)$ is a GWVE tree with environment $\mathcal{E}=(e_{m+1}, e_{m+2},\dots, e_0)$. Then, the number of alive descendants at generation zero, $Z^{(m,i)}(-m)$, has generating function $f_{m,0}(s)$. For $n\geq 1$, we evaluate $f_{-n,0}(s)$ and $f^\prime_{-n,0}(s)$ at zero, to obtain that $\mathbb{P}(A_1>n)$ is equal to the probability that an individual in generation $-n$ has one alive descendant at generation $0$ conditionally on survival. In contrast, the variable $\eta^{(m)}+1$ has the same law as the number of children of an individual in generation $-m$ that have alive descendants at generation $0$ conditioned to be strictly positive.
\end{remark}
For every $i\geq 1$, we define the sequence $D_i=(D_i(n),n\geq 1)$ and $D_0$ as the null sequence. By similar arguments as in the above theorem, we can obtain the next corollary, which is an extension of \cite[Theorem 2.1]{amaurylea2013} to varying environment.
\begin{corollary}\label{ThD}
The sequence-valued process $(D_i,i\geq 0)$ is a Markov chain starting at the null sequence with transition probabilities
\begin{equation*}
\Big( D_{i+1}(m) \ \mid\ D_i = (d(n))_{ n\geq 1 } \Big) \ {=}
\left\{
\begin{array}{ll}
\eta^{(m)} & \text{ if } 1\leq m <A_i\\
d(m)-1& \text{ if }m=A_i\\
d(m) & \text{ if } A_i<m,
\end{array}
\right.
\end{equation*}
where $(\eta^{(m)}, m\geq 1 )$ is a sequence of independent random variables such that $\eta^{(m)}$ is distributed as \eqref{eq:etadistribucion} for each $m$.
\end{corollary}
Observe that the vector-valued process $(B_i, i\geq 0)$ can be written as a sequence of finite point measures. In other words, $B_0$ is the null measure and
\[
B_i=\underset{1\leq n\leq l_i}{\sum} B_i(n)\delta_n, \qquad i\geq 1,
\]
where $\delta_n$ is the Dirac measure in $n$.
Now, we provide a counterexample to illustrate that the process $(\widetilde{B}_i, i \geq 0)$ defined in \cite{amaurylea2013} could be not Markovian, and in particular, \cite[Theorem 2.2]{amaurylea2013} cannot hold.
The point measure-valued process $(\widetilde{B}_i, i \geq 0)$ encodes the relationship between the $i$-th spine and the $(i+1)$-th individual by recording \textit{some} future subtrees attached to the $i$-th spine, that are part of the ancestral lineage of individual $i+1$.
Intuitively, the level of the point mass records the time $n$ where a subtree with root at $(-n,\frak{a}_i(n))$ is created; and the multiplicity of the point mass records the number of children of $(-n,\frak{a}_i(n))$ at the right-hand side of the $i$-th spine with descendants in individuals $\{(0,j) : j\geq i+1\}$.
The construction of the process is as follows: $\widetilde{B}_0$ is the null measure. $\widetilde{B}_1$ has positive mass at position $A_1$ and its multiplicity records the number of children of individual $(-A_1, \frak{a}_1(A_1))$, with descendants in individuals $\{(0,j) : j\geq 2\}$.
Recursively, $\widetilde{B}_{i+1}$ will be updated from $\widetilde{B}_i$ by decreasing by one the mass at position $A_i$ (because the $(i+1)$-th spine was part of the children at the right hand side of the $i$-th spine) and possibly by adding a new mass at position $A_{i+1}$ with the respective multiplicity.
Formally, for any finite point measure $b=\sum_{n\geq 1}b(n)\delta_n$, we define the minimum of its support as
\[
\mathfrak{s}( b ) := \min\{n\geq 1: b(n)\neq 0\}.
\]
In addition, we define $b^\ast=b-\delta_{\mathfrak{s}( b )},$ with the convention that $\mathfrak{s}( b )=\infty$ and $b^\ast=b$ if $b$ is the null measure. Then, the process $\widetilde{B}_i$ is defined recursively as
\begin{equation*}
\widetilde{B}_{i+1} :=
\left\{
\begin{array}{ll}
\widetilde{B}_i^\ast+D_{i+1}(A_{i+1})\delta_{A_{i+1}} & \ \text{ if } A_{i+1}\neq \mathfrak{s}( \widetilde{B}_i ) \text{ and } A_{i+1}< \mathfrak{s}( \widetilde{B}_i^\ast ),\\
\widetilde{B}_i^\ast& \ \text{ otherwise.}
\end{array}
\right.
\end{equation*}
\begin{example}\label{counterexample}
Consider the process $(\widetilde{B}_i,i\geq 1)$ illustrated in Figure \ref{fig: figure-label}: \
$\widetilde{B}_1=\delta_1$, $\widetilde{B}_2=2\delta_2$, $\widetilde{B}_3=\delta_1+\delta_2$, $\widetilde{B}_4=\delta_2$, $\widetilde{B}_5=2\delta_1$, $\widetilde{B}_6=\delta_1$, $\widetilde{B}_7=\delta_4$, $\widetilde{B}_8=\delta_3$, $\widetilde{B}_9=\delta_5$, $\widetilde{B}_{10}=\delta_1$ and $\widetilde{B}_{11}=\delta_4$.
We will show that this process \textit{cannot be Markovian} by exhibiting that the value of $\widetilde{B}_{11}$ depends on both $\widetilde{B}_{9}$ and $\widetilde{B}_{10}$.
By the recursive construction, we observe that $\widetilde{B}_{10}=\delta_1$ implies that $\widetilde{B}_{11}=\widetilde{B}_{11}(n)\delta_n$ for some $n\geq 2$ with $\widetilde{B}_{11}(n)>0$. Besides, from
$\widetilde{B}_9=\delta_5$ we know that individual $(-5,\mathfrak{a}_9(5))$ has \textit{only} one daughter at the right hand side of the $9$-th spine, whose descendants are alive at the present generation. Moreover, since $A_{9}=5$ and $A_{10}=1$, we have that $(-5,\mathfrak{a}_9(5))=(-5,\mathfrak{a}_{10}(5))=(-5,\mathfrak{a}_{11}(5))$ and the only daughter is at both $10$-th and $11$-th spine. Therefore, $\widetilde{B}_{11}(5)=0$ and the previous $n$ cannot be 1 or 5. This implies that the value of $\widetilde{B}_{11}$ depends on $\widetilde{B}_{10}$ and additionally on $\widetilde{B}_{9}$.
\end{example}
Using a similar argument as in the previous example, we can show that every finite point measure-valued process $(E_i,i\geq 0)$ with less information than $(B_i,i\geq 0)$ would not be Markov.\\
To end this section, we establish that if the offspring distributions are linear fractional, the coalescent point process $(A_i,i\geq 0)$ is Markovian and we give its distribution. When the environment is constant, this observation was done in \cite[Proposition 5.1]{amaurylea2013} and in \cite{rannala1997genealogy} in an alternative formulation.
\begin{proposition}\label{Prop Linear} Suppose that the environment $\mathcal{E}$ is linear fractional with parameters $r_m$ and $p_m$, for each $m\in \mathbb{Z}_-$. Then, the variables $(A_i,i\geq 1)$ are independent with common distribution
\begin{equation*}
\mathbb{P}(A_1>n)= \left( 1 + \underset{i=-n+1}{\overset{0}{\sum}}
\frac { (1- p_i )r_{i+1} \cdots r_0 }{ p_{i} p_{i+1}\cdots p_0} \right)^{-1}, \qquad n\geq 1.
\end{equation*}
with the agreement that $ \tfrac{ (1-p_0) r_{0+1} \cdots r_0 }{ p_{0} p_{0+1}\cdots p_0}=\tfrac{(1-p_0)}{p_0}$.
\end{proposition}
\section{Proofs }\label{proofs}
In this section we give the proofs of Theorem \ref{ThB}, Corollary \ref{ThD} and Proposition \ref{Prop Linear}.
\begin{proof}[Proof of Theorem \ref{ThB}]
Since $A_i$ is the coalescent time of individual $i$ and $i+1$, then
the $i$-th and $(i+1)$-th spines coincide for every generation $-m$ with $m\geq A_i$. This implies,
\begin{equation}\label{eq frak a}
\mathfrak{a}_i(m) \neq\mathfrak{a}_{i+1}(m) \quad \text{for all } m< A_i \qquad
\text{and} \qquad \mathfrak{a}_i(m) =\mathfrak{a}_{i+1}(m) \quad \text{for all } m\geq A_i.
\end{equation}
Recall that $B_i(m)$ counts the number of children of $(-m, \mathfrak{a}_i(m))$ at the right hand side of the $i$-th spine, whose descendants are alive at the present generation. Therefore, $B_i(m)=B_{i+1}(m)$ for all $A_i < m \leq l_i = \ell$. Since the $(i+1)$-th spine before generation $-A_i$ is at the right hand side of the $i$-th spine, we have $B_{i+1}(A_i) = B_i(A_i) -1$.
To prove the missing cases we define
\[
E_i:=(B_j(n): j\leq i , n\leq l_j).
\]
For any $n,j\geq 1$ we denote by $\tau(n,j)$, the subtree with root on $(-n,\mathfrak{a}_j(n))$. Since $(0,j)$ is a descendant of $(-n,\mathfrak{a}_j(n))$, we observe that $\tau(n,j)$ has the same distributions as a GWVE tree with environment $\mathcal{E}=(e_{-n+1}, \dots, e_0)$ conditioned to survive $n$ generations. We now consider the smallest index from all the descendants of individual $(-n,\mathfrak{a}_j(n))$ at the present generation,
\[
I(n,j) :=\min\{k\leq j : \mathfrak{a}_k(n) =\mathfrak{a}_j(n) \}, \quad j,n\geq 1.
\]
By the branching property, for every fixed $j,n\geq 1$, the subtrees with roots at the daughters of individual $(-n,\mathfrak{a}_j(n))$ are independent. We can say something stronger, consider the roots that are outside the $j$-th spine, i.e. let
\[
R_j:=\{\mbox{daughters of individual } (-n,\mathfrak{a}_j(n)): n\geq 1\}\setminus \{(-n,\mathfrak{a}_j(n)): n\geq 0\}, \qquad j\geq 1.
\]
Then, by the branching property, for every fixed $j\geq 1$, all the subtrees with roots in $R_j$ are independent. In particular, the subtree $\tau(A_i-1,i+1)$ is independent of $\tau(A_i-1,i)$ and it is also independent of the subtrees with roots in $(-k,r)\in R_i$ such that $A_i-1\leq k\leq \ell -1$. See Figure \ref{fig:1}.
Therefore, conditionally on $E_i$, $\tau(A_i-1,i+1)$ is still a GWVE tree with environment $\mathcal{E}=(e_{-A_i+2}, \dots, e_0)$ conditioned to survive $A_i-1$ generations.
\begin{figure}[!htbp]
\begin{subfigure}[b]{0.5\textwidth}
\includegraphics[width=1.15\textwidth]{caseA}
\caption{ $1\leq m <A_i$.}
\label{fig:1}
\hspace{-.75cm}
\end{subfigure}
\begin{subfigure}[b]{0.5\textwidth}
\includegraphics[width=0.9\textwidth]{caseB}
\caption{$\ell< m\leq L_{i+1}$.}
\label{fig:2}
\end{subfigure}
\caption{In (a) the blue subtree $\tau(A_i-1,i+1)$ is independent of the red subtree $\tau(A_i-1,i)$ and it is also independent of the green subtrees. In (b) the subtrees with roots at $R_i^\uparrow$ (green or blue trees) are independent of the subtrees with roots at $R_i^\downarrow$ (purple subtrees). In addition, individual
$(0,i+1)$ is the only descendant at generation 0 of the subtree $\tau(A_i-1,i+1)$.}
\end{figure}
Observe that conditionally on $E_i$, $I(-A_{i}+1, \mathfrak{a}_{i+1}(A_i-1)) =i+1$. Then, conditionally on $E_i$, for every $1\leq m <A_i$,
\begin{equation*}
\begin{split}
\mathcal{D}_{i+1}(m)
& = \{ \text{daughters of } (-m,\mathfrak{a}_{i+1}(m)) \text{ with descendants in } \{ (0,j): j\geq i+1 \} \} \\
& = \{ \text{daughters of } (-m,\mathfrak{a}_{i+1}(m)) \text{ with descendants in } \{ (0,j): j\geq 1 \} \}.
\end{split}
\end{equation*}
It follows that for every $1\leq m <A_i$, the variable $D_{i+1}(m)$ conditioned to $E_i$ has the same distribution as $\eta^{(m)}$ given in \eqref{eq:etadistribucion}.
If $B_{i+1}(m)\neq 0$ for some $1\leq m \leq \ell$, we stop the procedure and take $l_{i+1}=\ell$. Otherwise, we need to extend the length of the vector $B_{i+1}$.
Note that the subtrees with roots in $R_i^{\uparrow}:=\{(-k,r)\in R_i: k<\ell\}$ are independent of the subtrees with roots in $R_i^{\downarrow}:= \{(-k,r)\in R_i: k\geq\ell\}$. Hence, given $E_i$, every subtree with roots in $(-m,r)\in R_i^{\downarrow}$, is still a GWVE tree with environment $\mathcal{E}=(e_{-m+1}, \dots, e_0)$. See Figure \ref{fig:2}.
Conditionally on $E_i$ we have
\[
\mathfrak{a}_{i+1}(m)=\mathfrak{a}_{i}(m) = \cdots =\mathfrak{a}_{1}(m), \qquad m\geq \ell.
\]
Then, $\{(0,j): j\leq i+1 \}$ are descendant of $(-m+1, \mathfrak{a}_{i+1}(m-1) )$ and $(-m+1, \mathfrak{a}_{i+1}(m-1) )$ is a daughter of $(-m, \mathfrak{a}_{i+1}(m) )$. This implies,
\begin{equation*}
\begin{split}
\mathcal{D}_{i+1}(m)
& = \{ \text{daughters of } (-m,\mathfrak{a}_{i+1}(m)) \text{ with descendants in } \{ (0,j): j\geq i+1 \} \} \\
& = \{ \text{daughters of } (-m,\mathfrak{a}_{i+1}(m)) \text{ with descendants in } \{ (0,j): j\geq 1 \} \}.
\end{split}
\end{equation*}
It follows that for every $ m>\ell $, the distribution of $D_{i+1}(m)$ conditioned to $E_i$ is again given by \eqref{eq:etadistribucion}.
Finally, we stop the first generation $-m$ such that $D_{i+1}(m)\neq 0$. In other words, given $E_i$ the transitions of $l_i$ are given by \eqref{eq:LMarkov}. Therefore, the Markov property holds. Recall that, all the subtrees with roots in $R_{i+1}$ are independent. This implies that the variables $\eta^{(m)}$ are independent.
For the law of $A_1$ we use equations \eqref{etanueva} and \eqref{eq:fgpmn} to obtain
\begin{align*}
\mathbb{P}(A_1>n) & = \prod_{i=1}^n \mathbb{P}(\eta^{(i)}=0) \\
& = \prod_{i=1}^n \frac{1- f_{-i+1,0}(0) }{1- f_{-i,0}(0)} f^\prime_{-i+1} (f_{-i+1,0}(0) ) \\
& = \frac{ f^{\prime}_{-n,0}(0) }{1-f_{-n,0}(0)}.
\end{align*}
\end{proof}
Note that the process $(B_i, i\geq 0)$ was constructed by taking some information of the sequence-valued process $(D_i, i\geq 0)$. Then, the proof of Corollary \ref{ThD} is quite similar to the previous one. We repeat the same arguments, except that it is not necessarily to stop at time $l_i$. We only explain the differences.
\begin{proof}[Proof of Corollary \ref{ThD}]
By the equation \eqref{eq frak a}, we can see that $D_i(m)=D_{i+1}(m)$ for all $m> A_i$ and $D_i(A_i)=D_{i+1}(A_i)-1$. For $m<A_i$, we observe that the subtree $\tau(A_i-1,i+1)$ is independent of $\tau(A_i-1,i)$ and it is also independent of the subtrees with roots in $(-k,r)\in R_i$ such that $A_i-1\leq k$. Then, by following the same argument as in the proof of Theorem \ref{ThB}, we get the result.
\end{proof}
Finally, we give the proof of Proposition \ref{Prop Linear}.
\begin{proof}[Proof of Proposition \ref{Prop Linear}]
Observe that for every $n\geq 1$, the variable $\eta^{(n)}\overset{\mathcal{L}}{=}\eta_n$ with environment $\mathcal{E}_n=( e_{-n+1}, e_{-n+2}, \dots , e_{0})$ as in Example \ref{Ex linear1}. In particular, $\eta^{(n)}$ has geometric distribution (modeling the number of failures until the first success) with success probability $\lambda_1:=p_0$ and for $n\geq 2$,
\begin{equation}\label{eq:failure prob}
\lambda_n:=\left( 1 + \underset{k=-n+2}{\overset{0}{\sum}}
\frac { (1- p_k )r_{k+1} \cdots r_0 }{ p_{k} \cdots p_0} \right)\left( 1 + \underset{k=-n+1}{\overset{0}{\sum}}
\frac { (1- p_k )r_{k+1} \cdots r_0 }{ p_{k} \cdots p_0} \right)^{-1}.
\end{equation}
with the agreement that $ \tfrac{ (1-p_0) r_{0+1} \cdots r_0 }{ p_{0} p_{0+1}\cdots p_0}=\tfrac{(1-p_0)}{p_0}$.
By induction on $i\geq 1$, we are going to prove the following statement:
\begin{description}
\item[($H_i$)]\label{H_i} The random variables $(D_i(n),n\geq 1)$ are independent, geometrically distributed with success probability $\lambda_n$ given in \eqref{eq:failure prob}. In addition, they are independent of $(A_0,\dots, A_{i-1})$.
\end{description}
Suppose that $(H_i)$ is true for every $i\geq 1$. Then, by equation \eqref{eq:AdeD}, we can see that $A_i$ is independent of $(A_0,\dots, A_{i-1})$ and it is distributed as $A_1$. In particular,
\begin{equation*}
\mathbb{P}(A_i>n)= \prod_{\ell=1}^n \mathbb{P}(\eta^{(\ell)}=0)= \left( 1 + \underset{k=-n+1}{\overset{0}{\sum}}
\frac { (1- p_k)r_{k+1} \cdots r_0 }{ p_{k} \cdots p_0} \right)^{-1}, \qquad n\geq 1.
\end{equation*}
The claim holds once we prove that $(H_i)$ is true for every $i\geq 1$. $(H_1)$ holds by Corollary \ref{ThD}. Now, we assume $(H_i)$ and we will prove that $(H_{i+1})$ is also true by conditioning on the value of $A_i$. Suppose that $A_i=h$. We now apply the transition probabilities from Corollary \ref{ThD}. Note that $D_i(n)=D_{i+1}(n)$ for all $n>h$. Then, by hypothesis $(H_i)$, the variables $(D_{i+1}(n), n>h)$ are independent, geometrically distributed with parameters $\lambda_n$ and also independent of $(A_0,\dots, A_{i-1})$. For $n=h$, by hypothesis, $D_{i+1}(h)=D_i(h)-1$ is independent of $(D_i(n),n> h)$ and independent
of $(A_0,\dots, A_{i-1})$. In addition, by \eqref{eq:AdeD}, $D_i(h)$ is a geometric variable with parameter $\lambda_h$ conditioned to be strictly positive. Then,
\[
\mathbb{P}(D_{i+1}(h)=k)=\frac{\mathbb{P}(\eta^{(h)}=k+1)}{\mathbb{P}(\eta^{(h)}>0)}=\lambda_h(1-\lambda_h)^k,\qquad k\geq 0,
\]
is a geometric random variable with parameter $\lambda_h$.
Finally, the variables $(D_{i+1}(n), n<h)$ are \textit{new} independent geometric random variables with parameters $\lambda_n$ (therefore, they are independent of $(D_{i+1}(n), n\geq h)$ and $(A_0,\dots, A_{i-1})$).
In other words, conditionally on $\{A_i=h\}$, the variables $(D_{i+1}(n), n\geq 1)$ are independent with geometric distributions of parameters $\lambda_n$ and independent of $(A_0,\dots, A_{i-1})$. It remains to sum over $h$ to obtain the result $(H_{i+1})$.
\end{proof}
\bibliographystyle{amsplain}
|
1,314,259,995,005 | arxiv | \section{Introduction}
Since the first discovery of $\Lambda$-hypernuclei by observing
cosmic-rays in emulsion chambers~\cite{Danysz53}, hypernuclei, which
are nuclei with one or more of the nucleons replaced with hyperons,
have been used as a natural laboratory to study hyperon-nucleon and
hyperon-hyperon interactions, properties of hadrons in nuclear
environment, and in particular the impurity effect of hyperon in
nuclear medium~\cite{Chrien89,Dover89,Bando90}. Due to the absence
of Pauli's principle between the nucleon and the $\Lambda$ particle,
a $\Lambda$ hyperon can probe deeply into the interior of nuclear
medium and have important influences on its properties, including
softening the equation of state~\cite{Glendenning00}, modifying the
shape and size of finite nucleus~\cite{Tanida01}, changing the
nuclear binding and thus the driplines of neutrons and
protons~\cite{Samanta06} as well as the fission barrier heights in
heavy nuclei~\cite{Minato09}.
In the past decade, many high-resolution $\gamma$-ray spectroscopy
experiments using germanium detector arrays (Hyperball) have been
carried out for $\Lambda$-hypernuclei~\cite{Hashimoto06} to
understand the nature of $\Lambda$-nucleon interaction in nuclear
medium and the impurity effect of a $\Lambda$ on nuclear structure.
In particular, the facilities built at J-PARC will provide an
opportunity to perform hypernuclear $\gamma$-ray spectroscopy study
with high precision by improving the quality of the secondary
mesonic beam~\cite{Tamura09}. These facilities offer useful tools to
study the low-lying states of hypernuclei, especially those of
medium and heavy hypernuclei. To date, there are many experimental
data not only on the single-$\Lambda$ binding energy but also on the
hypernuclear $\gamma$-ray spectroscopy that allow us to study the
$\Lambda$-nucleon interaction, nuclear medium effects of baryons and
impurity effects induced by a $\Lambda$ hyperon in much greater
detail~\cite{Hashimoto06}.
The theoretical studies for the hypernuclear $\gamma$-ray
spectroscopy are mainly performed with the cluster
model~\cite{Motoba83,Bando90}, few-body
model~\cite{Hiyama03,Nemura02}, and shell model~\cite{Dalitz78}. The
energy level scheme, M1 and E2 transition rates in low-lying states
of light $\Lambda$-hypernuclei have been investigated with either a
one-boson exchange potential or a parameterized spin-dependent
$\Lambda$-nucleon interaction. Due to the numerical difficulty, the
application of these models to medium and heavy hypernuclei is
greatly limited. It is noted that, recently, the framework of
few-body model has been extended to the case of five-body and used
to study the energy levels of the double $\Lambda$-hypernucleus,
$^{11}_{\Lambda\Lambda}$Be~\cite{Hiyama10}.
The framework of nuclear energy-density functionals (EDF) is
nowadays one of the most important microscopic approaches for
large-scale nuclear structure calculations in medium and heavy
nuclei \cite{Bender03} and has already been extended to study
hypernuclei~\cite{Rayet76,Rufa90,Mares94,Schaffner94,Sugahara94,Lv03,Shen06,Vretenar98}.
Recently, both the non-relativistic Skyrme-Hartree-Fock (SHF)
theory~\cite{Zhou07,Schulze10,Win11} and the relativistic mean-field
(RMF) theory~\cite{Win08} have been applied to study the impurity
effect of $\Lambda$ hyperon on the deformation of
$\Lambda$-hypernuclei. The predicted energy surface is somewhat
soft, in which case a large shape fluctuation effect of collective
vibration might be expected. Furthermore, the static
single-reference (SR) EDF is characterized by symmetry breaking
(e.g., translational, rotational, particle number), and can provide
only an approximate description of bulk ground-state properties.
Therefore, to calculate excitation spectra and electromagnetic
transition rates in individual hypernuclei, it is necessary to
extend the SR EDF framework to include collective correlations
related to restoration of broken symmetries and to fluctuations of
collective coordinates.
In recent years several accurate and efficient methods and
algorithms have been developed that perform the restoration of
rotational symmetries in 3D Euler space broken by the static nuclear
mean field and take into account fluctuations around the mean-field
minimum~\cite{Bender08,Yao09,Rodriguez10,Niksic11}. The most
effective approach to configuration mixing calculations is the
generator coordinate method (GCM). Within these methods, the energy
spectrum and electromagnetic transition rates of low-lying excited
states in both light and heavy nuclei have been successfully
reproduced. However, these approaches are currently developed only
for even-even nuclei, which cannot be extended straightforwardly to
study the $\gamma$-ray spectra of single-$\Lambda$ hypernuclei by
simply adding hyperon degree of freedom.
At present, the extension of 3D angular momentum projected GCM
(3DAMP+GCM) method to single-$\Lambda$ hypernuclei based on triaxial
symmetry-breaking intrinsic states is still much complicated and its
applications to medium-heavy and heavy nuclei would be
computationally demanding. As an alternative approach to the 5D
quadrupole dynamics that restores rotational symmetry and allows for
fluctuations around the triaxial mean-field minima, a 5D collective
Bohr Hamiltonian (5DCH) has been formulated with
deformation-dependent parameters determined by microscopic
selfconsistent mean-field
calculations~\cite{Libert99,Prochniak04,Niksic09,Li09}. In this
work, we will construct a 5DCH with the parameters derived from the
Skyrme-Hartree-Fock calculations for the nuclear core in a single
$\Lambda$-hypernucleus and calculate the corresponding low-spin
excitation spectra. The impurity effect of $\Lambda$ hyperon on the
collective motion of an atomic nucleus will be examined by studying
the modifications of collective excitation spectrum. In this way, we
will in this paper concentrate on the modification of the core
nucleus due to the addition of a $\Lambda$ particle, leaving the
evaluation of the spectrum of the whole hypernucleus as a future
work.
The paper is organized as follows. In Section~\ref{sec2} we present
a brief outline of the 5DCH method and the Skyrme-Hartree-Fock
approach for $\Lambda$-hypernucleus. The collective potential energy
surface, parameters in collective Hamiltonian as well as the
resultant collective excitation spectra for $^{24}$Mg and the same
nucleus inside $^{25}_{~\Lambda}$Mg are given in Section~\ref{sec3}.
A brief summary and an outlook for future studies are included in
Section~\ref{sec4}.
\section{The Method}
\label{sec2}
\subsection{Collective Hamiltonian in five dimension}
The collective Hamiltonian that describes the nuclear excitations of
quadrupole vibrations, 3D rotations, and their couplings can be
written in the form:
\begin{equation}
\label{hamiltonian-quant} \hat{H} =
\hat{T}_{\textnormal{vib}}+\hat{T}_{\textnormal{rot}}
+V_{\textnormal{coll}} \; ,
\end{equation}
where $V_{\textnormal{coll}}$ is the collective potential. The
vibrational kinetic energy reads,
\begin{eqnarray}
\hat{T}_{\textnormal{vib}}
&=&-\frac{\hbar^2}{2\sqrt{wr}}
\left\{\frac{1}{\beta^4}
\left[\frac{\partial}{\partial\beta}\sqrt{\frac{r}{w}}\beta^4
B_{\gamma\gamma} \frac{\partial}{\partial\beta}\right.\right.\nonumber\\
&& \left.\left.- \frac{\partial}{\partial\beta}\sqrt{\frac{r}{w}}\beta^3
B_{\beta\gamma}\frac{\partial}{\partial\gamma}
\right]+\frac{1}{\beta\sin{3\gamma}} \left[
-\frac{\partial}{\partial\gamma} \right.\right.\nonumber\\
&& \left.\left.\sqrt{\frac{r}{w}}\sin{3\gamma}
B_{\beta \gamma}\frac{\partial}{\partial\beta}
+\frac{1}{\beta}\frac{\partial}{\partial\gamma} \sqrt{\frac{r}{w}}\sin{3\gamma}
B_{\beta \beta}\frac{\partial}{\partial\gamma}
\right]\right\},
\end{eqnarray}
and the rotational kinetic energy,
\begin{equation}
\hat{T}_{\textnormal{\textnormal{\textnormal{rot}}}} =
\frac{1}{2}\sum_{\kappa=1}^3{\frac{\hat{J}^2_\kappa}{\mathcal{I}_\kappa}},
\end{equation}
with $\hat{J}_\kappa$ denoting the components of the angular
momentum in the body-fixed frame of a nucleus. It is noted that the
mass parameters $B_{\beta\beta}$, $B_{\beta\gamma}$,
$B_{\gamma\gamma}$, as well as the moments of inertia
$\mathcal{I}_\kappa$, depend on the quadrupole deformation variables
$\beta$ and $\gamma$,
\begin{equation}
\label{MOI}
\mathcal{I}_\kappa = 4B_\kappa\beta^2\sin^2(\gamma-2\kappa\pi/3),
~~\kappa=1,2,3 \;.
\end{equation}
Two additional quantities that appear in the expression for the
vibrational energy, that is, $r=B_1B_2B_3$, and
$w=B_{\beta\beta}B_{\gamma\gamma}-B_{\beta\gamma}^2 $, determine the
volume element in the collective space. The corresponding eigenvalue
problem is solved by expansion of eigenfunctions in terms of a
complete set of basis functions that depend on the deformation
variables $\beta$ and $\gamma$, and the Euler angles $\phi$,
$\theta$ and $\psi$~\cite{Pro.99}.
The dynamics of the collective Hamiltonian is governed by seven
collective quantities, that is, the collective potential $V_{\rm
coll}$, three mass parameters $B_{\beta\beta}$, $B_{\beta\gamma}$,
and $B_{\gamma\gamma}$, and three moments of inertia
$\mathcal{I}_\kappa$. These quantities are functions of the
intrinsic deformations $\beta$ and $\gamma$ and will be determined
by Skyrme-Hartree-Fock calculations with constraints on the mass
quadrupole moments.
\subsection{Skyrme-Hartree-Fock approach for $\Lambda$-hypernucleus}
In Ref.~\cite{Win11}, the computer code {\tt ev8}~\cite{Bonche05} of
SHF+BCS approach has already been extended for the study of
$\Lambda$ hypernuclei. Therefore, in the following, we start from
this approach to calculate the seven collective quantities in the
5DCH, as shown in Eq.~(\ref{hamiltonian-quant}).
In the SHF+BCS approach for $\Lambda$ hypernucleus, the total energy
$E$ can be written as the integration of three terms,
\begin{equation}
\label{HFE}
E
=\int d^3r [{\cal E}_N(\br) + {\cal T}_\Lambda(\br) + {\cal E}_{N\Lambda}(\br)],
\end{equation}
where ${\cal E}_N(\br)$ is the standard nuclear part of energy
functional, including both $ph$-channel of the Skyrme force and
$pp$-channel of the $\delta$-force, as well as the kinetic energy
density for the nucleons~\cite{Vautherin72,Bonche05}. ${\cal
T}_\Lambda(\br)=\dfrac{\hbar^2}{2m_\Lambda}\tau_\Lambda$ is the
kinetic energy density of $\Lambda$ hyperon. ${\cal
E}_{N\Lambda}(\br)$ is the interaction energy density between the
$\Lambda$ and nucleons given in terms of the $\Lambda$ and nucleon
densities~\cite{Rayet81},
\begin{eqnarray}
{\cal E}_{N\Lambda}
&=&
t^\Lambda_0(1+\dfrac{1}{2}x^\Lambda_0)\rho_\Lambda\rho_N
+\dfrac{1}{4}(t^\Lambda_1+t^\Lambda_2)(\tau_\Lambda\rho_N+\tau_N\rho_\Lambda)\nonumber\\
&&+\dfrac{1}{8}(3t^\Lambda_1-t^\Lambda_2)(\nabla\rho_N\cdot\nabla\rho_\Lambda)
+\dfrac{1}{4}t^\Lambda_3\rho_\Lambda(\rho^2_N+2\rho_n\rho_p)\nonumber\\
&&+\dfrac{1}{2}W^\Lambda_0(\nabla\rho_N\cdot \bJ_\Lambda+\nabla\rho_\Lambda\cdot \bJ_N)
\tau_N\rho_\Lambda.
\end{eqnarray}
Here, $\rho_\Lambda, \tau_\Lambda$ and $\bJ_\Lambda$ are
respectively the particle density, the kinetic energy density, and
the spin density of the $\Lambda$ hyperon. These quantities are
given in terms of the single-particle wave-function of $\Lambda$ and
occupation probabilities~\cite{Vautherin72}. $t^\Lambda_0,
t^\Lambda_1, t^\Lambda_2, t^\Lambda_3$, and $W^\Lambda_0$ are the
Skyrme parameters for the $\Lambda$N interaction.
The pairing correlation between the nucleons is taken into account
in the BCS approximation. The density-dependent $\delta$-force is
adopted in the $pp$ channel,
\begin{equation}
V(\br_1, \br_2)=-g\dfrac{1-\hat P^\sigma}{2}
\left[1-\dfrac{\rho(\br_1)}{\rho_0}\right]
\delta(\br_1-\br_2),
\end{equation}
where $\hat P^\sigma$ is the spin-exchange operator, and
$\rho_0=0.16$ fm$^{-3}$.
The HF equations for the nucleons and $\Lambda$ are obtained by
varying the HF energy (\ref{HFE}) with respect to the corresponding
single-particle wave functions and are solved by discretizing
individual single-particle wave functions on a three-dimensional
Cartesian mesh. More details can be found in Ref.~\cite{Bonche05}.
The method of quadratic constraints on the quadrupole moments of the
nuclear density is used to find nuclear intrinsic wave functions
(including the quasiparticle energies $E_i$, occupation
probabilities $v_i$, and single-nucleon wave functions $\psi_i$)
corresponding to the desired quadrupole
deformations~\cite{Bonche05,RS.80}. With these wave functions, one
can calculate the moments of inertia $\mathcal{I}_\kappa$ in Eq.
(\ref{MOI}) using the Inglis-Belyaev formula~\cite{Ing.56,Bel.61}
\begin{equation}
\label{Inglis-Belyaev}
\mathcal{I}_\kappa =
\sum_{i,j}{\frac{\left(u_iv_j-v_iu_j \right)^2}{E_i+E_j}
\langle i |\hat{J}_\kappa | j \rangle |^2},
\end{equation}
where $\kappa=1, 2, 3$ denotes the axis of rotation, and the
summation of $i, j$ runs over the proton and neutron quasiparticle
states.
The mass parameters $B_{\mu\nu}(\beta,\gamma)$ are also calculated
in the cranking approximation~\cite{GG.79}
\begin{equation}
\label{masspar-B} B_{\mu\nu}(\beta,\gamma)=\frac{\hbar^2}{2}
\left[\mathcal{M}_{(1)}^{-1} \mathcal{M}_{(3)} \mathcal{M}_{(1)}^{-1}\right]_{\mu\nu}\;,
\end{equation}
with
\begin{equation}
\label{MMatrix}
\mathcal{M}_{(n),\mu\nu}(\beta,\gamma)=\sum_{i,j}
{\frac{\left\langle i\right|\hat{Q}_{2\mu}\left| j\right\rangle
\left\langle j\right|\hat{Q}_{2\nu}\left| i\right\rangle}
{(E_i+E_j)^n}\left(u_i v_j+ v_i u_j \right)^2}.
\end{equation}
The mass parameters $B_{\mu\nu}$ in Eq.(\ref{masspar-B}) can be converted into the
forms of $B_{\beta\beta}, B_{\beta\gamma}, B_{\gamma\gamma}$ with
the following relations~\cite{Prochniak09},
\begin{subequations}\begin{eqnarray}
B_{\beta\beta} &=& B_{00}a_{00}\cos^2\gamma + 2B_{02}a_{02}\cos\gamma\sin\gamma +
B_{22}a_{22}\sin^2\gamma,\\
B_{\beta\gamma} &=& (B_{22}a_{22}-B_{00}a_{00})\cos\gamma\sin\gamma
+ B_{02}a_{02}(\cos^2\gamma-\sin^2\gamma),\\
B_{\gamma\gamma} &=& B_{22}a_{22}\cos^2\gamma - 2B_{02}a_{02}\cos\gamma\sin\gamma +
B_{00}a_{00}\sin^2\gamma,
\end{eqnarray}\end{subequations}
where the coefficients $a_{00}, a_{02}, a_{22}$ are as follows,
\begin{equation}
a_{00}=\dfrac{9r^4_0A^{10/3}}{16\pi^2},~~
a_{02}=a_{00}/\sqrt{2},~~
a_{22}=a_{00}/2,
\end{equation}
with $r_0=1.2$.
\begin{figure}[t]
\centering
\includegraphics[width=8cm]{fig1.eps}
\caption{The collective potential $V_{\rm coll}$, the rms radius of protons, the mass parameters
$B_{\beta\beta}$, $B_{\gamma\gamma}$,
and the moment of inertia along the $1$-axis $I_1$ as functions of quadrupole deformation $\beta$ for $^{24}$Mg
and the nuclear core of $^{25}_\Lambda$Mg from the Skyrme-Hartree-Fock+BCS calculations using the SGII
force~\cite{Giai81}. The $\Lambda$N interaction energy ${\cal E}_{N\Lambda}$
has been included ($w$) or excluded ($w/o$) in the collective potential $V_{\rm coll}$ for the
nuclear core of $^{25}_\Lambda$Mg.}
\label{fig1}
\end{figure}
The collective potential $V_{\rm coll}$ in the collective
Hamiltonian is obtained by subtracting the zero-point-energy (ZPE)
from the total mean-field energy~\cite{Niksic09},
\begin{equation}
\label{Vcoll} {V}_{\textnormal{coll}}(\beta,\gamma)
= E_{\textnormal{tot}}(\beta,\gamma)
- \Delta V_{\textnormal{vib}}(\beta,\gamma) - \Delta
V_{\textnormal{rot}}(\beta,\gamma),
\end{equation}
where $E_{\textnormal{tot}}$ is the total energy for the nuclear
core in $\Lambda$ hypernucleus. We will investigate two options,
that is, those with ($w$) or without ($w/o$) the interaction part of
energy ${\cal E}_{N\Lambda}$ between the $\Lambda$ and nucleons,
\begin{eqnarray}
E_{\textnormal{tot}}
=\left\{
\begin{array}{cc}
\int d^3r {\cal E}_N(\br), & w/o \\
\int d^3r [{\cal E}_N(\br)+ {\cal E}_{N\Lambda}(\br)], & w \\
\end{array}
\right.
\end{eqnarray}
In the collective potential $V_{\rm coll}$ of Eq.(\ref{Vcoll}), the vibrational ZPE, $\Delta V_{\textnormal{vib}}$ is given by,
\begin{equation}
\Delta V_{\textnormal{vib}}(\beta,\gamma) =
\dfrac{1}{4}{\rm Tr}[{\cal M}^{-1}_{(3)}{\cal M}_{(2)}],
\end{equation}
where ${\cal M}_{(n),\mu\nu}(\beta,\gamma)$ is determined by
Eq.(\ref{MMatrix}) with the mass quadrupole operators $(\mu,\nu=0,
2)$ defined as ,
\begin{equation}
\hat{Q}_{20}=2z^2-x^2-y^2 \quad \textnormal{and}\quad
\hat{Q}_{22}=x^2-y^2 \;.
\end{equation}
The rotational part of ZPE is a summation of three terms,
\begin{equation}
\Delta V_{\textnormal{rot}}(\beta,\gamma)
= \sum_{\mu=-2,-1,1} \Delta V_{\mu\mu}(\beta,\gamma),
\end{equation}
with
\begin{equation}
\Delta V_{\mu\nu}(\beta,\gamma)
= \dfrac{1}{4} \dfrac{{\cal M}_{(2),\mu\nu}(\beta,\gamma)}{{\cal
M}_{(3),\mu\nu}(\beta,\gamma)}.
\end{equation}
where ${\cal M}_{(n),\mu\nu}(\beta,\gamma)$ is determined by
Eq.(\ref{MMatrix}) with the intrinsic components of quadrupole
operator defined as,
\begin{eqnarray}
\hat Q_{2\mu}
=\left\{
\begin{array}{cc}
-2iyz, & \mu=1 \\
-2xz, & \mu=-1 \\
2ixy, & \mu=-2 \\
\end{array}
\right.
\end{eqnarray}
\section{Results and discussion}
\label{sec3}
Following Ref.~\cite{Win11}, in the $ph$-channel, we adopt the SGII
parameterized Skyrme force~\cite{Giai81} for the NN interaction, and
the No.1 set in Ref.~\cite{Yamamoto88} for the $\Lambda$N
interaction. In the $pp$-channel for nucleons, we follow
Ref.~\cite{Terasaki96} to use $g=1000$ MeV fm$^3$ for both protons
and neutrons. A smooth pairing energy cutoff of 5 MeV around the
Fermi level is used. In the mean-field calculations, the mass
quadrupole moments are constrained to the mesh-points in
$\beta$-$\gamma$ plane with $\beta=0, 0.05, 0.10, \dots, 1.20$ and
$\gamma=0^\circ, 6^\circ, 12^\circ, \dots, 60^\circ$. The $\Lambda$
particle occupies the lowest single-particle state throughout the
constraint calculations. In the following, we take $^{24}$Mg as an
example, and study the impurity effect of $\Lambda$ hyperon by
examining the changes of collective parameters and the resultant
collective excitation spectrum and related obsevables.
Figure~\ref{fig1} displays the collective potential $V_{\rm coll}$, the rms radius of protons, the mass parameters
$B_{\beta\beta}$, $B_{\gamma\gamma}$,
and the moment of inertia along the $1$-axis $I_1$ as functions of quadrupole deformation $\beta$ for $^{24}$Mg
and the nuclear core of $^{25}_\Lambda$Mg. The $\Lambda$N interaction energy ${\cal
E}_{N\Lambda}$ in Eq.(\ref{HFE}) has been included ($w$) or excluded ($w/o$)
in the collective potential $V_{\rm coll}$ for the
nuclear core of $^{25}_\Lambda$Mg [cf. Eq.(\ref{Vcoll})]. It is found that the $\Lambda$
hyperon has negligible influences on the moments of inertia and mass
parameters of the nuclear core. However, it can lower down the
barrier in the neighborhood of spherical shape and make the energy
curve stiffer at large deformed region. In other words, the $\Lambda$
will reduce the collectivity of $^{24}$Mg, where the $\Lambda$N interaction energy
plays a major role.
\begin{figure}[t]
\centering
\includegraphics[width=14cm]{fig2.eps}
\caption{The probability distribution $\rho_{I\alpha}(\beta,\gamma)$ in $\beta$-$\gamma$ plane for the $0^+_1$
state in $^{24}$Mg (left panel), $^{25}_\Lambda$Mg (core, $w/o$) (middle panel) and $^{25}_\Lambda$Mg (core, $w$) (right panel).}
\label{fig2}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=10cm]{fig3.eps}
\caption{The difference in the probability distribution $\rho_{I\alpha}(\beta,\gamma)$ of $0^+_1$ states between
$^{25}_\Lambda$Mg (core, $w/o$) and $^{24}$Mg (left panel) as well as
between $^{25}_\Lambda$Mg (core, $w$) and $^{24}$Mg (right panel).}
\label{fig3}
\end{figure}
In Fig.~\ref{fig2}, we plot the probability distribution
$\rho_{I\alpha}$ in $\beta$-$\gamma$ plane for the $0^+_1$
state in $^{24}$Mg and the nuclear core of $^{25}_\Lambda$Mg, where the $\rho_{I\alpha}$ is defined as~\cite{Li10},
\begin{equation}
\rho_{I\alpha} (\beta,\gamma)
= \sum_{K}\vert \Psi^{I}_{\alpha,K}(\beta,\gamma)\vert^2 \beta^3 \vert
\sin3\gamma\vert,
\end{equation}
which follows the normalization condition,
\begin{equation}
\int^\infty_0 \beta d\beta \int^{2\pi}_0 d\gamma \rho_{I\alpha}(\beta,\gamma) = 1.
\end{equation}
Here, $\Psi^{I}_{\alpha,K}(\beta,\gamma)$ is the collective wave function that corresponds to
the solution of 5DCH in Eq.(\ref{hamiltonian-quant}) and $\alpha=1, 2,\cdots$, labels collective
eigenstates for a given angular momentum $I$.
It is shown in Fig.~\ref{fig2} that the $\Lambda$ shifts slightly the probability distribution
of the $0^+_1$ state to the smaller deformation region. This effect can be seen more
clearly from the changes in $\rho_{I\alpha}(\beta,\gamma)$
for the $0^+_1$ state after the introducing of $\Lambda$ hyperon,
as shown in Fig.~\ref{fig3}, where the differences in the probability distribution $\rho_{0,1} (\beta,\gamma)$
for the nuclear core of $^{25}_\Lambda$Mg and $^{24}$Mg are
plotted. Quantitatively, the average values of $\beta(\gamma)$ are $0.54 (20.0^\circ)$ for $^{24}$Mg
and these values become $0.53 (20.7^\circ$) for $^{25}$Mg (core, $w/o$),
and $0.52 (20.8^\circ$) for $^{25}$Mg (core, $w$).
\begin{figure}[t]
\centering
\includegraphics[width=9cm]{fig4.eps}
\caption{The rms proton radius for $^{24}$Mg and the nuclear core of $^{25}_\Lambda$Mg.}
\label{fig4}
\end{figure}
Moreover, it is also shown in Fig.~\ref{fig1} that the rms radius of
protons is reduced by the $\Lambda$, in particular in the
neighborhood of spherical shape. However, this shrinkage effect
on the proton radius of $^{24}$Mg is only $\sim0.5\%$, as illustrated in
Fig.~\ref{fig4}, where the rms proton radius in $\beta$-$\gamma$ plane for
both the $^{24}$Mg and the nuclear core of $^{25}_\Lambda$Mg from
the 5DCH calculations are plotted.
\begin{figure*}[t]
\centering
\includegraphics[width=12cm]{fig5.eps}
\caption{The low-spin spectra of the ground state band for the
$^{24}$Mg (b) and the nuclear core of $^{25}_\Lambda$Mg (c, d) obtained by the five-dimensional
collective Hamiltonian (5DCH) with the parameters determined by the
Skyrme-Hartree-Fock+BCS calculations using the SGII
force~\cite{Giai81}. The $B(E2)$ values are in units of e$^2$ fm$^4$.
The spectrum of $^{24}$Mg is compared with the corresponding experimental
data (a), taken from Ref.~\cite{Endt90}.}
\label{fig5}
\end{figure*}
Figure~\ref{fig5} displays the low-spin spectra of ground state band for the
$^{24}$Mg and the nuclear core of $^{25}_\Lambda$Mg. It is noted that
the $\Lambda$ stretches the spectra of ground state band. Comparing
with columns (b) and (d), one finds that the $\Lambda$
increases the excitation energy of $2^+_1$ state by $\sim 7\%$.
Moreover, it reduces the E2 transition strength $B(E2: 2^+_1 \rightarrow 0^+_1)$
by $\sim 9\%$, which is a little smaller than the values,
$19(4)\%$ or $16(6)\%$ in $^{6}$Li~\cite{Tanida01}.
\section{Summary and outlook}
\label{sec4}
The impurity effect of $\Lambda$ hyperon in $^{24}$Mg has been
quantitatively studied in the framework of non-relativistic energy
density functional theory that has been extended to include
correlations related to the restoration of rotational symmetries and
fluctuations of collective variables by solving the eigenvalue
problem of a 5DCH for quadrupole vibrational and rotational degrees
of freedom, with parameters determined by constrained
self-consistent nonrelativistic mean-field calculations for triaxial
shapes using the SGII Skyrme force. The low-spin spectra for
$^{24}$Mg in both free space and with the additional $\Lambda$ have
been calculated. It has been found that the $\Lambda$ hyperon shifts
the collective wave function of ground state to a smaller
deformation region by softening the nuclear collective potential
surface in the neighborhood of spherical shape. As the consequence
of this effect, the spectra of ground state band becomes stretched
and the excitation energy of $2^+_1$ state is increased by $\sim
7\%$. Moreover, the $B(E2: 2^+_1 \rightarrow 0^+_1)$ value is
reduced by $\sim 9\%$. However, the shrinkage effect on the average
proton radius is found to be only $\sim0.5\%$.
As pointed out in Refs.~\cite{Win08,Schulze10,Win11}, the influence
of the addition of $\Lambda$ particle might be stronger in the
relativistic mean-field approach. Therefore, it would be very
interesting to extend this work to the relativistic case. In
addition, to calculate directly the $\gamma$-spectra of
single-$\Lambda$ hypernucleus, one has to extend the current EDF
based 3DAMP+GCM or 5DCH models for the odd-mass or odd-odd nucleus.
Working along this direction is in progress.
\begin{acknowledgments}
We would like to thank P. Z. Ning and T. Koike for helpful
discussions. K.H. acknowledges the Global Center of Excellence
Program "Weaving Science Web beyond Particle-Matter Hierarchy" at
Tohoku University for financial support and thanks the Southwest
University for its hospitality. This work is partly supported by the
Major State 973 Program 2007CB815000 and the NSFC under Grants No.
10947013 and No. 10975008; the Fundamental Research Funds for the
Central Universities (XDJK2010B007); the Southwest University
Initial Research Foundation Grant to Doctor (No. SWU109011 and No.
SWU110039); and the Japanese Ministry of Education, Culture, Sports,
Science and Technology by Grant-in-Aid for Scientific Research under
Program No. 22540262.
\end{acknowledgments}
\begin{appendix}
\section{Calculations of moments of inertia with the EV8 code}
\label{Appendix}
In the {\tt ev8} code~\cite{Bonche05}, the single-particle (s.p.) wave-function of $k$-state $\Phi_{k}(\br)$,
discretized on a three-dimensional Cartesian mesh, is written in the $4$-component
form,
\beqn
\Phi_{k}=\begin{pmatrix}
\Psi^{(1)}_k+i \Psi^{(2)}_k \\
\Psi^{(3)}_k+i \Psi^{(4)}_k
\end{pmatrix}
\eeqn
where $\Psi^{(\alpha)}_k$ ($\alpha=1,2,3,4$) are real functions corresponding to
the real and imaginary, spin-up and spin-down parts of $\Phi_{k}$.
The time-reversed state of $\Phi_{k}$ are determined by
\beqn
\Phi_{\bar k}
\equiv\hat T \Phi_{k}
=
-\begin{pmatrix}
\Psi^{(3)}_k -i \Psi^{(4)}_k \\
-\Psi^{(1)}_k+i \Psi^{(2)}_k \\
\end{pmatrix}.
\eeqn
Therefore, the components in $\Phi_{\bar k}$
are connected with the components in $\Phi_{k}$
by the following relations,
\beq
\Psi^{(1)}_{\bar k} =-\Psi^{(3)}_k,~
\Psi^{(2)}_{\bar k} = \Psi^{(4)}_k,~
\Psi^{(3)}_{\bar k} = \Psi^{(1)}_k,~
\Psi^{(4)}_{\bar k} =-\Psi^{(2)}_k.
\eeq
\begin{table}
\tabcolsep=20pt
\caption{Parities of four components $\Psi^{(\alpha)}_k$ ($\alpha=1,2,3,4$)
in single-particle wave function $\Phi_{k}$ of $k$-state with respect to
the planes $x=0, y=0, z=0$. The parity of $k$-state is denoted as $p_k$.}
\begin{tabular}{c|ccc}
\hline\hline
& $x$ & $y$ & $z$ \\
\hline
$\Psi^{(1)}_k$ & $+$ & $+$ & $p_k$ \\
$\Psi^{(2)}_k$ & $-$ & $-$ & $p_k$\\
$\Psi^{(3)}_k$ & $-$ & $+$ & $-p_k$\\
$\Psi^{(4)}_k$ & $+$ & $-$ & $-p_k$\\
\hline\hline
\end{tabular}
\label{Symmetry}
\end{table}
Together with the parities of the four components $\Psi^{(\alpha)}_k$ in single-particle wave function
$\Phi_{k}$ of $k$-state with respect to the planes $x=0, y=0, z=0$, as shown in Table ~\ref{Symmetry},
the moments of inertia $I_{1,2,3}$ in Eq.(\ref{Inglis-Belyaev})
can be simplified as
\begin{subequations}
\beqn
I_{1,2}
&=&2\sum_{i,j>0}\dfrac{(u_iv_j-v_iu_j)^2}{E_i+E_j}
\left\vert\langle \bar i\vert\hat J_{1,2}\vert j\rangle\right\vert^2,\\
I_3
&=&2\sum_{i,j>0}\dfrac{(u_iv_j-v_iu_j)^2}{E_i+E_j}
\left\vert\langle i\vert\hat J_3\vert j\rangle\right\vert^2,
\eeqn\end{subequations}
where the s.p. states $i, j$ have the same parities ($p_i=p_j$) and
the non-zero matrix elements are determined by,
\begin{subequations}\beqn
\langle \bar i\vert\hat J_{1}\vert j\rangle
&=&\int\int\int^{+\infty}_{-\infty} dxdydz\nonumber\\
&& \left[-\Psi^{(3)}_{i}(\dfrac{1}{2}\Psi^{(3)}_j+i\hat L_x\Psi^{(2)}_j)
+\Psi^{(4)}_{i}(\dfrac{1}{2}\Psi^{(4)}_j-i\hat L_x\Psi^{(1)}_j)\right.\nonumber\\
&&+\left. \Psi^{(1)}_{i}(\dfrac{1}{2}\Psi^{(1)}_j+i\hat L_x\Psi^{(4)}_j)
- \Psi^{(2)}_{i}(\dfrac{1}{2}\Psi^{(2)}_j-i\hat
L_x\Psi^{(3)}_j)\right],\\
\langle \bar i\vert\hat J_2\vert j\rangle
&=&\int\int\int^{+\infty}_{-\infty} dxdydz\nonumber\\
&& \left[-\Psi^{(3)}_{i}(-\dfrac{1}{2}\Psi^{(3)}_j-i\hat L_y\Psi^{(1)}_j)
+ \Psi^{(4)}_i(-\dfrac{1}{2}\Psi^{(4)}_j-i\hat L_y\Psi^{(2)}_j)\right.\nonumber\\
&&+\left.\Psi^{(1)}_i(\dfrac{1}{2}\Psi^{(1)}_j-i\hat L_y\Psi^{(3)}_j)
-\Psi^{(2)}_i(\dfrac{1}{2}\Psi^{(2)}_j-i\hat
L_y\Psi^{(4)}_j)\right],\\
\langle i\vert\hat J_3\vert j\rangle
&=&\int\int\int^{+\infty}_{-\infty} dxdydz\nonumber\\
&&\left[+\Psi^{(1)}_i(\dfrac{1}{2}\Psi^{(1)}_j +i\hat L_z\Psi^{(2)}_j)
+\Psi^{(2)}_i (\dfrac{1}{2}\Psi^{(2)}_j -i\hat L_z\Psi^{(1)}_j)\right.\nonumber\\
&&+\left.\Psi^{(3)}_i(-\dfrac{1}{2}\Psi^{(3)}_j +i\hat L_z\Psi^{(4)}_j)
+\Psi^{(4)}_i (-\dfrac{1}{2}\Psi^{(4)}_j -i\hat
L_z\Psi^{(3)}_j)\right].
\eeqn\end{subequations}
In the above equations, $\hat L_\kappa$ ($\kappa=x,y,z$) denote the components of orbital angular momentum
operator.
\end{appendix}
|
1,314,259,995,006 | arxiv | \section{Introduction}
\label{Section_Introduction}
Casimir forces result from, and provide insight into, the behavior of a medium confined to a restricted space, canonically the region between two plane, parallel surfaces. In the case of the electromagnetic Casimir force, the medium is the vacuum, and the underlying mechanism is the set of quantum zero point or temperature fluctuations of the electromagnetic field. The now widely-investigated critical Casimir force (CCF) results from the fluctuations of an order parameter and more generally the thermodynamics of the medium supporting that order parameter in the vicinity of a critical point. In fact, the free energy of a confined medium can mediate a Casimir force at any temperature provided its excitations are long-range correlated ones. This fact, along with the wide range of options for a mediating substance opens up a range of possibilities for the study and exploitation of the Casimir force arising from a confined medium.
One of the principal influences on the Casimir force is the nature of the bounding surface. With respect to the CCF, published investigations have been focused, almost exclusively, on systems belonging to the Ising universality class. On a basic level, based on the behavior of coupling in the vicinity of the surface, there are three universality classes---extraordinary (or normal), ordinary and surface-bulk (or special), ones \cite{D86,K94,BDT2000}. Experimental investigations into the influence of surface universality classes on the Casimir force have been reported in \cite{SZHHB2007,RBM2007,HHGDB2008,GMHNHBD2009,NHC2009,NDHCNVB2011,ZAB2011}. Most of them focus on the behavior of colloids in a critical solvent. They probe the dependence of the force between boundaries on temperature, the concentration of the components of the solvent and the relative preference of the surfaces of the colloids for the components of the solvent. For example, in \cite{GMHNHBD2009} the critical thermal noise in a solvent medium consisting of a binary liquid mixture of water and 2,6-lutidine near its lower consolute point is shown to lead to attractive or repulsive forces, depending on the relative adsorption preferences of the colloid and substrate surfaces with respect to the two components of the binary liquid mixture. On the theoretical side, the influence of the surface fields has been studied on the case of two dimensional Ising model via exact calculation \cite{NN2008,NN2009,AM2010,NN2016}, using the variational formulation due to Mikheev and Fisher \cite{B2015,Z2012}, with the help of density-matrix renormalization-group numerical method \cite{MCD99,DMC2000,DME2000,ZMD2013}, via conformal invariance \cite{VED2013,JRT2015}, Monte Carlo methods \cite{VED2013}, and numerically using bond propagation algorithms \cite{WI2015}. The three dimensional Ising model has been studied with Monte Carlo methods in \cite{VGMD2009,H2011,VMD2011,TTD2013,VD2013,V2014,MVDD2015}, mean-field type calculations \cite{PD2004,K97,MMD2010,VDK2012,THD2015,VD2015} and renormalized local functional theory \cite{OO2012}. In general, it has been shown that the Casimir force depends on the strength of the surface fields $h_1$ and $h_2$ and that it can change sign as the magnitudes of the surface field, the thickness of the films, and the temperature of the system are varied.
For the general case of $O(n)$ systems there is no similarly thorough classification \cite{P90}. References \cite{APP91,HK92,ZPZ98,KG99,BLF2000,GAF2001,HSD2004,KNSP2013,HNSP2014} report on studies of the Casimir force in liquid crystals, and \cite{I86,GC99,GC2002,ZRK2004,GSGC2006,MGD2007,UBMCR2003} describe investigations for $^4$He and $^3$He--$^4$He mixtures. In the case of Helium films, however, it is generally accepted that the boundary conditions are determined, in the region where the liquid behaves as a quantum liquid, by its quantum nature and, thus, cannot be easily influenced by modification of bounding surfaces, in that there are no surface fields that couple to the order parameter in such systems. In that respect liquid crystals seem much more readily adjustable, and in particular more amenable to the influence of boundary conditions. For example, in Ref. \cite{APP91} it is shown that director fluctuations in nematics induce long-range interactions between walls, which are attractive with symmetric boundary conditions, but may become repulsive with mixed ones. In smectics such forces are longer ranged than van der Waals ones.
In \cite{ZPZ98} the authors concluded that in the case of finite surface coupling, the fluctuation-induced forces for nematics are weaker than in the strong anchoring limit. In the example of three-dimensional lattice XY model with nearest neighbor interaction, it has been shown \cite{BDR2011} that the Casimir force depends in a continuous way on the parameter $\alpha$ characterizing the so-called twisted boundary conditions when the angle between the vector order parameter at the two boundaries is $\alpha$ where $0<\alpha\le \pi$. The effect is essential; depending on $\alpha$ the force can be attractive or repulsive. By varying $\alpha$ and/or the temperature $T$ one can control both the sign and the magnitude of the Casimir force in a reversible way. Furthermore, when $\alpha = \pi $, an additional phase transition, which occurs only in finite systems, has been discovered, associated with the spontaneous symmetry breaking of the direction of rotation of the vector order parameter through the body of the system.
In the current article we show that the strength and the mutual orientation of surface fields---as well as structuring on the surface via chemical or other alternations that can be described in terms of surface fields---lead to interesting and substantial modification in the behavior of the force between the confining surface. Such modification includes the change of the sign of the force, as well as non-monotonic behavior, appearance of multiple minima, of a longitudinal Casimir force, and also an amplification of the force in regions with strong helicity effects. We will demonstrate the above with the example of few models: the one dimensional XY and Heisenberg models, the three dimensional Gaussian model and the three dimension $O(2)$ XY model.
We start with the one-dimensional XY and Heisenberg models.
\section{1d continuum symmetry models with boundary fields} \label{sec:continuum}
\label{1d_systems}
Here we consider two one-dimensional models with continuous $O(n)$ spin symmetry: XY ($n=2$) and Heisenberg ($n=3$) chains of $N$ spins with ferromagnetic interaction $J$ between nearest-neighbor spins,
the boundary fields ${\mathbf H}_1$ and ${\mathbf H}_2$ of which are at an angle $0\le\psi\le \pi$ with respect to each other. Obviously, such systems do not exhibit spontaneous ordering at non-zero temperatures given their low dimension and the short range nature of the interactions between spins, as has been shown to follow rigorously from the Mermin-Wagner theorem \cite{MW66}. Nevertheless, they posses an essential singular point at $T=0$ and will, in that limit, support spontaneous order. We will demonstrate that when the boundary fields are non-zero the Casimir force, $F_{\rm Cas}$, of these systems displays very rich and interesting behavior. We also show that near $T=0$ the force has a scaling behavior and that, depending on the angle between the boundary fields and the value of the temperature scaling variable $x\sim N k_B T/J$, this force can be {\it attractive} or {\it repulsive}. More precisely, we will establish that:
\begin{enumerate}
\item[i)]For low temperatures, when $x={\cal O}(1)$ and
\begin{equation}
\label{eq:constraint}
N\gg J\left(\frac{1}{H_1}+\frac{1}{H_2}\right)
\end{equation}
the leading behavior of the Casimir force can be written in the form
\begin{equation}
\label{eq:1dCas_gen}
\beta F_{\rm Cas}(T,N,{\bf H}_1,{\bf H}_2)=N^{-1}X(\psi,x),
\end{equation}
with $x$ a scaling variable and $X$ a universal scaling function. Equation (\ref{eq:1dCas_gen}) implies that, under constraint \eq{eq:constraint}, $X_{\rm Cas}$ depends only on the scaling variable $x$ defined in (\ref{eq:scaling_variables}) and the angle $\psi$. The latter parameter effectively describes the boundary conditions on the system. Note that, unlike the Ising model, the boundary conditions depend here {\it continuously} on one parameter---in our notation $\psi$.
\item[ii)] When $x\to 0+$ the scaling function of the Casimir force becomes positive, i.e., the force turns {\it repulsive} provided that $\psi \ne 0$. In that case $X_{\rm Cas}\sim x^{-1}$ and, thus, the overall $N$-dependence of the force is of the order of $N^{-2}$.
\item[iii)] When $x\gtrsim 1$ the scaling function has a sign that depends on the sign of $\cos(\psi)$: for $0<|\psi|<\pi/2$ the force will be {\it attractive}, while for $\pi/2<|\psi|<\pi$ it will be {\it repulsive}. For $x\gg 1$ the force decays exponentially to zero.
\item[iv)] For any $\psi$ such that $0<|\psi|<\pi/2$ the Casimir force {\it changes from attractive to repulsive} when the temperature decreases from a moderate value to zero for fixed system size, $N$.
\item[v)] When $\psi=0$ the force is attractive for {\it any} value of the scaling variable $x$.
\end{enumerate}
These 1d models have been studied analytically in the case of free (frequently termed ``open'' or Dirichlet) and periodic boundary conditions \cite{F64,J67,J67b,S68,PB2011}, but we are not aware of any investigation of them in the presence of boundary fields, which are responsible for the effects of interest in this article.
\subsection{The 1d XY model}
\label{sec:1dXY_model}
We consider a system with the Hamiltonian
\begin{equation}
\label{eq:def_1d_Ham}
{\cal H} = -J \sum _{i=1}^{N-1} {\mathbf S}_i.{\mathbf S}_{i+1}-{\mathbf H}_1.{\mathbf S}_1-{\mathbf H}_N.{\mathbf S}_N
\end{equation}
where ${\mathbf S}_i$, with ${\mathbf S}_i^2=1$ and ${\mathbf S}_i \in \mathbb{Z}^2$, $i=1,\cdots,N$, are $N$ spins arranged along a straight line. The Hamiltonian can be written in the form
\begin{eqnarray}
\label{eq:system_angles}
{\cal H} &=& -J \sum _{i=1}^{N-1} \cos \left(\varphi _{i+1}-\varphi _i\right)
\\&&
-H_1 \cos \left(\psi _1-\varphi _1\right)-H_N
\cos \left(\psi _N-\varphi _N\right), \nonumber
\end{eqnarray}
where the angles $\psi_1, \psi_2$ and $\varphi _1,\cdots,\varphi _N$ are measured with respect to the line of the chain which is taken to be, say, the x axis. The free energy $-\beta F_N$ of this system is given by
\begin{equation}
\label{eq:free_energy}
\exp \left(-\beta F_N\right)=\int_{0}^{2\pi}\exp \left(-\beta {\cal H}\right)\ \ \prod_{i=1}^{N} \frac{d\varphi_i}{2\pi}.
\end{equation}
Performing the requisite calculations (see Appendix \ref{app:XY}) one obtains
\begin{eqnarray}
\label{eq:free_energy_calculated}
\lefteqn{\exp \left(-\beta F_N\right)}\\&=&\sum _{k=-\infty }^{\infty } \exp \left(i k \psi\right) I_k\left(h_1\right) I_k(K){}^{N-1} I_k\left(h_N\right)\nonumber
\end{eqnarray}
where
\begin{equation}
\label{eq:def_parameters}
\psi \equiv (\psi_1-\psi_N), K\equiv \beta J, h_1\equiv \beta H_1, h_N\equiv\beta H_N.
\end{equation}
Note that the free energy depends only on the difference in angles, $(\psi_1-\psi_N)$, and not on $\psi_1$ and $\psi_N$ separately. For the Casimir force in the system, i.e., for the finite size part of the total force, see \eq{tot}, one then has the {\it exact} expression
\begin{widetext}
\begin{equation}
\label{FCas}
\beta F_{\text{Cas}}=
\frac{ 2\sum _{k=1}^{\infty }
\cos \left[k (\psi_1-\psi_2)\right] \log \left[\frac{I_k(K)}{I_0(K)}\right]\frac{I_k\left(h_1\right)}{I_0\left(h_1\right)} \left(\frac{I_k(K)}{I_0(K)}\right)^{N-1} \frac{I_k\left(h_N\right)}{I_0\left(h_N\right)}
}{1+2 \sum _{k=1}^{\infty } \cos \left[k (\psi_1-\psi_2)\right] \frac{I_k\left(h_1\right)}{I_0\left(h_1\right)} \left(\frac{I_k(K)}{I_0(K)}\right)^{N-1} \frac{I_k\left(h_N\right)}{I_0\left(h_N\right)}}.
\end{equation}
\end{widetext}
From here on we will be interested in the behavior of the system in the limit $\beta\gg 1$, i.e., when $T\to 0$. Obviously, when $\beta\gg 1$ from \eq{eq:def_parameters} one has $h_1\gg 1$, $h_N\gg 1$ and $K\gg 1$, which means that in \eq{eq:free_energy_calculated} one uses the large argument asymptote of $I_{k}(z)$ for $z\gg 1$. We will use the asymptote in the form reported in \cite{SP85}
\begin{equation}
\label{eq:as_form_SP}
I_\nu(z)= \frac{e^{z-\nu^2/2z}}{\sqrt{2\pi z}}\left[1+\frac{1}{8 z}+{\cal O}\left(\frac{\nu^2}{z^2}\right)\right].
\end{equation}
Retaining only the first term in the above expansion, one obtains
\begin{equation}
\label{eq:F_Cas_scaling}
\beta F_{\rm Cas}(x)=\frac{1}{N_{\rm eff}}X_{\rm Cas}(\psi,x,h_{\rm eff})
\end{equation}
where
\begin{equation}
\label{eq:F_Cas_scaling}
X_{\rm Cas}=-x\frac{ \sum _{k=1}^{\infty } k^2
\cos \left(k \psi\right) \exp
\left[-\frac{1}{2}
k^2\left(h_{\rm eff}^{-1}+x\right)\right]}{
1+2 \sum _{k=1}^{\infty } \cos \left(k \psi\right) \exp \left[-\frac{1}{2}
k^2\left(h_{\rm eff}^{-1}+x\right)\right]},
\end{equation}
and
\begin{equation}
\label{eq:scaling_variables}
x\equiv \frac{N_{\rm eff}}{K}, \qquad h_{\rm eff}^{-1}=h_1^{-1}+h_2^{-1}, \qquad N_{\rm eff}=N-1.
\end{equation}
Here, $x$ is the scaled version of the reduced temperature variable, which in systems with a non-zero transition temperature takes the form $x=t^{\nu}L$, with $t$ the reduced temperature $\propto T-T_c$, $L$ the characteristic size of the finite system and $\nu$ the correlation length exponent. Recall that with an effective transition temperature of $T=0$ and $K \propto 1/T$, the definition in (\ref{eq:scaling_variables}) is consistent with this definition under the assumption that $\nu=1$.
Obviously, when \eq{eq:constraint} is fulfilled
one has $x\gg h_{\rm eff}^{-1}$, and one can safely ignore $h_{\rm eff}$ in \eq{eq:F_Cas_scaling}. Then the behavior of the force is exactly as stated in \eq{eq:1dCas_gen}.
The representation of $X_{\rm Cas}$ given by \eq{eq:F_Cas_scaling} is convenient for all values of $x$ except in the limit $x \ll 1$. For that limit, using the Poisson identity \eq{eq:Poisson}, one obtains
\begin{eqnarray}
\label{eq:F_Cas_scaling_small_x}
\lefteqn{X_{\rm Cas}(\psi,x,h_{\rm eff})=-\frac{x}{2
\left(x+h_{\rm eff}^{-1}\right)}}\\
&&+\frac{x}{2\left(x+h_{\rm eff}^{-1}\right)^2}\frac{ \sum _{n=-\infty}^{\infty} \left(2 n \pi
+\psi\right)^2
\exp{\left[-\frac{\left(2 n \pi +\psi\right)^2}{2
\left(x+h_{\rm eff}^{-1}\right)}\right]}}{
\sum _{n=-\infty}^{\infty}
\exp{\left[-\frac{\left(2 n \pi +\psi\right)^2}{2
\left(x+h_{\rm eff}^{-1}\right)}\right]}}. \nonumber
\end{eqnarray}
Under the assumption that the constraint \eqref{eq:constraint} is fulfilled and given the asymptotic behavior of $X_{\rm Cas}$ from Eqs. \eqref{eq:F_Cas_scaling} and \eqref{eq:F_Cas_scaling_small_x}, we derive
\begin{equation}
\label{X_cas_ass}
X_{\rm Cas}(\psi,x)=\left\{ \begin{array}{ll}
-\frac{1}{2}+\frac{1}{2x} \psi^2+\cdots, & x\to 0+ \\
-x \cos(\psi) \exp(-x/2), & x \gg 1.
\end{array}
\right.
\end{equation}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_Cas.pdf}
\caption{(Color online) The scaling function $X_{\rm Cas}$ of the XY model as a function of the scaling variable $x$, see \eq{eq:scaling_variables}, for different values of the phase change $\psi$.}
\label{Fig:1d_X_Cas}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_Cas_Surface_compressed.pdf}
\caption{(Color online) The surface of the scaling function $X_{\rm Cas}(\psi,x)$ of the XY model as a function of the scaling variables $x$ and $\psi$. The horizontal plane marks the $X_{\rm Cas}=0$ value.}
\label{Fig:1d_X_Cas_Surface}
\end{figure}
From \eq{eq:F_Cas_scaling_small_x} one can also derive an expression for the low $T$ behavior of the system that retains the dependence on $H_1$ and $H_2$. The result is
\begin{eqnarray}
\label{low_T_behavior}
\beta F_{\rm Cas}&=&-\frac{1}{2}\frac{1}{
\left(J/H_1 + J/H_N+N-1\right)}\nonumber \\ && +\frac{1}{2}K\frac{\left(\psi _1-\psi
_N\right)^2 }{
\left(J/H_1+J/H_N+N-1\right)^2}.
\end{eqnarray}
This result can be also directly derived by realizing that the ground state of the system is a spin wave such that the end spins are twisted with respect to each other at angle $\psi=\psi_1-\psi_N$.
Equations \eqref{eq:F_Cas_scaling}, \eqref{eq:F_Cas_scaling_small_x}, \eqref{X_cas_ass} and \eqref{low_T_behavior} confirm the validity of the statements i)-iv) in the first part of this section. For example, \eq{eq:F_Cas_scaling} demonstrates that when $\psi=0$ the force is attractive for {\it any} value of the scaling variable $x$; \eq{X_cas_ass} then confirms this behavior for small and large values of the scaling variable $x$.
The behavior of the scaling function $X_{\rm Cas}(\psi,x)$ for different values of $\psi$ as a function of the scaling variable $x$ is shown in Fig. \ref{Fig:1d_X_Cas}. Fig. \ref{Fig:1d_X_Cas_Surface} shows a $3D$ plot of this function for $x\in[0,10]$ and $\psi\in[-\pi,\pi]$.
\subsection{The 1d Heisenberg model}
\label{sec:1dH_model}
The Hamiltonian of the system is again given by \eq{eq:def_1d_Ham} with the conditions that now the $N$ spins ${\mathbf S}_i$, $i=1,\cdots N$, again arranged along a straight line, are three-dimensional vectors ${\mathbf S}_i \in \mathbb{Z}^3$, $i=1,\cdots,N$.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_H_Cas.pdf}
\caption{(Color online) The scaling function $X_{\rm Cas}$ of the Heisenberg model as a function of the scaling variable $x$, see \eq{eq:def_FCas_Heis_scaling}, for different values of the phase change $\psi$.}
\label{Fig:1d_X_H_Cas}
\end{figure}
As shown in Appendix \ref{app:Heisenberg} the free energy of the system is given by the {\it exact} expression
\begin{widetext}
\begin{eqnarray}
\label{eq:free_energy_calcul_final_result}
\exp \left(-\beta F_N\right) &=& \left(\frac{\pi}{2K}\right)^{(N-1)/2}\frac{\pi}{2\sqrt{h_1h_N}} \sum_{n=0}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) I_{n+1/2}(h_1) I_{n+1/2}(h_N) \left[I_{n+1/2}(K)\right]^{N-1}\\
&=& \frac{\sinh h_1}{h_1}\frac{\sinh h_N}{h_N}\left[\frac{\sinh K}{K}\right]^{N-1} \left\{1+ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)} \right]^{N-1}\right\}, \nonumber
\end{eqnarray}
\end{widetext}
where $\psi_h$ is the angle between the vectors ${\mathbf H}_1$ and ${\mathbf H}_N$ and we have used that $I_{1/2}(x)=\sqrt{2/(\pi x)}\sinh(x)$. Here $I_{n+1/2}(z)$ is the modified Bessel function of the first kind of half-integer index, $P_n(x)$ is the Legendre polynomial of degree $n$ and $K$, $h_1$ and $h_N$ are defined in accord with \eq{eq:def_parameters_Heis}.
\begin{equation}
\label{eq:def_parameters_Heis}
K\equiv \beta J, h_1\equiv \beta H_1, h_N\equiv\beta H_N.
\end{equation}
When $h_1\to 0$ and $h_N\to 0$ the system considered becomes the one with Dirichlet boundary conditions, a case that was studied by M. E. Fisher in \cite{F64}. Taking into account that $I_{n+1/2}(x)=[2^{n+1/2}\Gamma(n+3/2)]^{-1}x^{n+1/2}+{\cal O}(x^{5/2+n})$ and that $P_0(x)=1$, one concludes that only the term with $n=0$ will contribute to the free energy in this case. One obtains
\begin{eqnarray}
\label{eq:free_energy_calcul_final_result_Dirichlet_bc}
\exp \left(-\beta F_N\right) &=& \left(\frac{\pi}{2K}\right)^{(N-1)/2}\left[I_{1/2}(K)\right]^{N-1}\\
&=&\left[\frac{\sinh K}{K}\right]^{N-1}. \nonumber
\end{eqnarray}
The last expression is precisely the result derived in \cite{F64}.
From \eq{eq:free_energy_calcul_final_result} one can easily derive the corresponding {\it exact} expression for the Casimir force for the one dimensional Heisenberg model. One has
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis}
\beta F_{\text{Cas}}=\frac{ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \ln\left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)}\right] \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)} \right]^{N-1}}{1+ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)}\right]^{N-1}}.
\end{equation}
\end{widetext}
In the limit $T\to 0$ when $h_1\gg 1$, $h_N\gg 1$ and $K\gg 1$ from \eq{eq:as_form_SP} one obtains
\begin{equation}
\label{eq:F_Cas_scaling_Heis}
\beta F_{\rm Cas}(x)=\frac{1}{N_{\rm eff}}X_{\rm Cas}(\psi_h,x,h_{\rm eff})
\end{equation}
where the scaling variable $x$, as well as $h_{\rm eff}$, are as defined in \eq{eq:scaling_variables} while the scaling function $X_{\rm Cas}$ is
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis_scaling}
X_{\rm Cas}(\psi_h,x,h_{\rm eff})=-\frac{1}{2}x\frac{ \sum_{n=1}^{\infty} n(n+1)(2n+1)P_n \left(\cos\psi_h\right) \exp\left[-\frac{1}{2} n(n+1)\left(x+h_{\rm eff}^{-1}\right)\right]}{1+ \sum_{n=1}^{\infty}(2n+1)P_n \left(\cos\psi_h\right) \exp\left[-\frac{1}{2} n(n+1)\left(x+h_{\rm eff}^{-1}\right)\right]}.
\end{equation}
\end{widetext}
As in the case of the $XY$ model, when \eq{eq:constraint} is fulfilled one can ignore $h_{\rm eff}$ in the above expression. If not stated otherwise we will always suppose this to be the case. Then the scaling function $X_{\rm Cas}$ depends only on the scaling variable $x$ and the angle $\psi_h$ that parametrizes the boundary conditions on the system, exactly as set forth in \eq{eq:1dCas_gen}. The representation of $X_{\rm Cas}$ given by \eq{eq:def_FCas_Heis_scaling} is applicable for all values of $x$ except in the limit $x \ll 1$. Keeping in mind that $P_1(\cos \psi_h)=\cos\psi_h$, and in light of the fast decay off the terms in the sums in \eq{eq:def_FCas_Heis_ass}, it is clear that for those very small values of $x$ the sign of the force will be determined by the sign of $\cos \psi_h$. For the leading behavior of the Casimir force when $x\ll 1$ one obtains
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis_ass}
X_{\rm Cas}(\psi_h,x,h_{\rm eff})=-1+
\frac{h_{\rm eff}^{-1}}{h_{\rm eff}^{-1}+x}
+
\frac{x (1-\cos\psi_h)}{\left(h_{\rm eff}^{-1}+x\right)^2}
+x\frac{
\coth
\left(\frac{1}{h_{\rm eff}^{-1}+x}\right)
-1}{\left(h_{\rm eff}^{-1}+x\right)^2},
\end{equation}
\end{widetext}
which follows from \eq{smallx_Heis}.
One can also derive the first three terms in that expansion by considering the $N$ dependence of the ground energy of the 1d Heisenberg model, assuming it to be in the form of a spin wave. Explicitly, for the behavior of the Casimir force for $T\to 0$ from \eq{eq:def_FCas_Heis_ass} one obtains
\begin{eqnarray}
\label{low_T_behavior_Heis}
\beta F_{\rm Cas}&=&-\frac{1}{
\left(J/H_1 + J/H_N+N-1\right)}\nonumber \\ && +K \frac{1-\cos\psi_h}{
\left(J/H_1+J/H_N+N-1\right)^2}.
\end{eqnarray}
The behavior of the scaling function $X_{\rm Cas}(\psi,x)$ for different values of $\psi$ as a function of the scaling variable $x$ is shown in Fig. \ref{Fig:1d_X_H_Cas} while Fig. \ref{Fig:1d_X_H_Cas_Surface} shows a $3D$ plot of this function for $x\in[0,10]$ and $\psi\in[-\pi,\pi]$.
Thus, for the overall behavior of the Casimir force as a function of $\psi_h$ one arrives at the same set of conclusions for the Heisenberg model as for the $XY$ model as a function of $\psi$, as summarized in statements i)-v).
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_H_Cas_Surface_compressed.pdf}
\caption{(Color online) The surface of the scaling function $X_{\rm Cas}(\psi,x)$ of the Heisenberg model as a function of the scaling variables $x$ and $\psi$. The horizontal plane marks the $X_{\rm Cas}=0$ value.}
\label{Fig:1d_X_H_Cas_Surface}
\end{figure}
\section{The 3d Gaussian model} \label{sec:Gaussian}
Here, we focus on a system with scalar spins. This means that, strictly speaking, there is no helicity. However, the surface fields that influence the order parameter will have sinusoidal variation along the film boundaries, conforming to the behavior of the individual components of a field that induces helical order in a multi-component system. We therefore expect that the results to be derived and discussed in this section will be germane to corresponding behavior in such a system. We consider a planar discrete system containing $L$ two-dimensional layers with a Hamiltonian
\begin{widetext}
\begin{eqnarray}
\label{eq:def_Ham_GM}
-\beta \mathcal{H}&=&\sum _{x=1}^M \sum _{y=1}^N \Bigg\{K^\| \sum _{z=1}^L S_{x,y,z} \left(S_{x+1,y,z}+S_{x,y+1,z}\right)+K^\perp\sum _{z=1}^{L-1}
S_{x,y,z} S_{x,y,z+1}+h_1 S_{x,y,1} \cos \left(k_x x+k_y y\right) \nonumber\\
&& +h_L S_{x,y,L} \cos \left(k_x \left(x+\Delta _x\right)+k_y \left(y+\Delta
_y\right)\right)-s \sum_{z=1}^L S_{x,y,z}^2\Bigg\}
\end{eqnarray}
\end{widetext}
which describes a system with short-ranged nearest neighbor interactions possessing chemically modulated bounding surfaces situated at $z=1$ and $z=L$. Here $h_1=\beta H_1$ and $h_L=\beta H_L$ are the external fields acting only on the boundaries of the system. In the specific example considered the modulation depends on the coordinates $x$ and $y$ in a wave-like way specified by the applied surface fields $h_1\cos\left(k_x x+k_y y\right)\equiv h_1 \cos({\bf k}.{\bf r})$ and $h_L \cos [k_x \left(x+\Delta_x\right)+k_y \left(y+\Delta_y\right)]\equiv h_L\cos({\bf k}.({\bf r}+{\bf \Delta}))$, the phases of which are thus shifted with respect to each other by $\Delta_x$ in $x$ direction and by $\Delta_y$ in $y$ direction. Here ${\bf r}=(x,y)$, ${\bf k}=(k_x,k_y)$ and ${\bf \Delta}=(\Delta_x,\Delta_y)$. Periodic boundary conditions are applied along the $x$ and $y$ axes, while missing neighbor (Dirichlet) boundary conditions are imposed in the $z$ direction. These boundary conditions are expressed as follows:
\begin{equation}
\label{bc_def_per}
S_{1,y,z}=S_{M+1,y,z}, \qquad S_{x,1,z}=S_{x,N+1,z}
\end{equation}
and
\begin{equation}
\label{bc_Dirichlet}
S_{x,y,0}=0 \qquad \text{and} \qquad S_{x,y,L+1}=0.
\end{equation}
Given those the boundary conditions, the Hamiltonian in Eq. (\ref{eq:def_Ham_GM}) can be rewritten in the form
\begin{eqnarray}
\label{eq:def_Ham_GM_final}
-\beta \mathcal{H}&=&\sum _{x=1}^M \sum _{y=1}^N \sum _{z=1}^L S_{x,y,z} \Bigg\{K^\| \left(S_{x+1,y,z}+S_{x,y+1,z}\right) \nonumber \\ && + K^\perp
S_{x,y,z+1} +\delta_{1,z} h_1 \cos \left[{\bf k}.{\bf r} \right] \nonumber\\
&& + \delta_{L,z} h_L \cos \left[{\bf k}.({\bf r}+{\bf \Delta}) \right]-s\; S_{x,y,z}\Bigg\}.
\end{eqnarray}
Since we will be considering the limit $M,N\to\infty$ we can always take the wave vector components $k_x$ and $k_y$ to coincide with $(2\pi p)/M $ and $(2\pi q)/N$ for some $p=1,\cdots, M$ and $q=1,\cdots,N$, respectively.
In Eqs. \eqref{eq:def_Ham_GM} and \eqref{eq:def_Ham_GM_final} one has
\begin{equation}
\label{eq:inte}
K^{\|}=\beta J^{\|}, \qquad \mbox{and} \qquad K^{\perp}=\beta J^{\perp},
\end{equation}
where $J^{\|}$ and $J^{\perp}$ are the strengths of the coupling constants along and perpendicular to the $L$ layers of the system. The parameter $s>0$ on the right hand side of \eqref{eq:def_Ham_GM_final} is subjected to the constraint that it has a value that ensures the existence of the partition function of the system. It is easy to check that $2K^{\|}+K^{\perp}-s \equiv \beta (2J^{\|}+J^{\perp})-s=0$ determines the critical temperature $\beta_c$ of the bulk model, i.e., one has
\begin{equation}
\beta_c=s/(2J^{\|}+J^{\perp}).
\label{betac}
\end{equation}
For the model defined above the Casimir force acting on the bounding planes at $z=1$ and $z=L$ has both orthogonal, $\beta F^{(\perp)}_{\rm Cas}$, and lateral, $\beta F^{(\|,\alpha)}_{\rm Cas}$, $\alpha=x$ or $\alpha=y$, components, which can be written in the form
\begin{equation}
\label{eq:gen_force}
\beta F^{(\cdots)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(\cdots)}_{\rm Cas}(x_t,x_k,x_1,x_L),
\end{equation}
where $(\cdots)$ stands for either $(\perp)$ or $(\|,\alpha)$, with $\alpha=x$ or $\alpha=y$. Here
\begin{equation}
\label{eq:field_scaling_def}
x_{1}=\sqrt{L K^\|}\frac{h_1}{K^\perp}, \qquad x_{L}=\sqrt{L K^\|}\frac{h_L}{K^\perp},
\end{equation}
are the field-dependent scaling variables, $x_t$ is the temperature-dependent one with
\begin{equation}
\label{eq:xt_and_xk}
x_t=L\sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]}, \qquad x_k=\sqrt{\dfrac{J^{\|}}{J^{\perp}}}\; L k,
\end{equation}
with $k=\sqrt{k_x^2+k_y^2}$ is the scaling variable related to the surface modulation. When $h_1={\cal O}(1)$ and $h_L={\cal O}(1)$ we will see that $F^{(\cdots)}_{\rm Cas}$ has a {\it field dependent contribution} which, in this regime, will provide the {\it leading} contribution to the force of the order of $L^{-2}$.
The Hamiltonian (\ref{eq:def_Ham_GM_final}) can be easily diagonalized in a standard way---see Appendix \ref{A:GM}. The resulting free energy of the system, $F$, is
\begin{equation}
\label{eq:fe_short}
F=\Delta F_0+\Delta F_h,
\end{equation}
where
\begin{eqnarray}
\label{eq:free_energy_GM}
\lefteqn{-\beta \Delta F_0 = \frac{1}{2} M N L \ln\pi} \\
&& -\frac{1}{2}\sum _{l=1}^L \sum _{m=1}^M
\sum _{n=1}^N \ln\left\{
s-K^{\|}\left[\cos \left(\frac{2 \pi
m}{M}\right)+\cos \left(\frac{2 \pi
n}{N}\right)\right]\nonumber \right. \\
&&\left.-K^{\perp} \cos\left(\frac{\pi
l}{L+1}\right)\right\} \nonumber
\end{eqnarray}
is the field independent part of the free energy and $\Delta F_h$, the field dependent contribution, is
i) when either $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:free_energy_h}
\lefteqn{-\beta \Delta F_h = \frac{MN}{8(L+1)} \times} \\
&& \sum _{l=1}^L \frac{\sin^2\left(\frac{\pi l}{L+1}\right)\left[h_1^2+h_L^2-2 h_L h_1
(-1)^l \cos(\mathbf{k.\Delta})\right]}{s-K^{\|}\left[\cos \left(\frac{2 \pi p
}{M} \right)+\cos \left(\frac{2 \pi q
}{N} \right)\right]-K^{\perp} \cos\left(\frac{\pi l}{L+1}\right)}, \nonumber
\end{eqnarray}
where $\mathbf{k}=(k_x=2 \pi p /{M},k_y=2 \pi q /{N})$, and $\mathbf{\Delta}=(\Delta_x,\Delta_y)$, and
ii) when $p=M$ and $q=N$:
\begin{eqnarray}
\label{eq:free_energy_h_cf}
\lefteqn{-\beta \Delta F_h = \frac{MN}{2(L+1)} \times} \\
&& \sum _{l=1}^L \frac{\sin^2\left(\frac{\pi l}{L+1}\right)\left[h_1-h_L
(-1)^l \cos \left(2 \pi (\Delta
_x+\Delta
_y)\right)\right]^2}{s-2K^{\|}-K^{\perp} \cos\left(\frac{\pi l}{L+1}\right)}. \nonumber
\end{eqnarray}
Note that there is a fundamental difference between the sub-cases in Eqs. \eqref{eq:free_energy_h} and \eqref{eq:free_energy_h_cf}; while in the first sub-case $i)$ the average field applied on the surfaces is zero when specially averaged, in the second sub-case $ii)$ it is a constant. In the last sub-case one can think of $h_L$ as a constant field acting on the second surface being twisted in direction with respect to the constant field $h_1$ applied to the first one with a twist governed by $\Delta_x$ and $\Delta_y$.
Obviously
\begin{eqnarray}
\lefteqn{s - K^{\|}\left[\cos \left(\frac{2 \pi
m}{M}\right)+\cos \left(\frac{2 \pi
n}{N}\right)\right] -
K^{\perp} \cos\left(\frac{\pi
k}{L+1}\right)} \nonumber\\
&&=\left(\beta_c/\beta-1\right)\left[ 2K^{\|}+K^\perp\right] +K^{\perp} \left[1- \cos\left(\frac{\pi
k}{L+1}\right)\right]\nonumber \\
&&+K^{\|}\left[2-\cos \left(\frac{2 \pi
m}{M}\right)-\cos \left(\frac{2 \pi
n}{N}\right)\right] >0
\end{eqnarray}
for $\beta<\beta_c$. The above implies that the statistical sum of the infinite system exists for all $\beta<\beta_c$. The statistical sum of the finite system exists, however, under the less demanding constraint that
\begin{equation}
\label{eq:T_finite}
\left(\beta_c/\beta-1\right)\left[ 2J^{\|}+J^\perp\right] +J^{\perp} \left[1- \cos\left(\frac{\pi
}{L+1}\right)\right]>0.
\end{equation}
In the remainder we will assume that the constraint given by \eq{eq:T_finite} is fulfilled for all temperatures considered here.
For the contribution of the field-independent term to the transverse Casimir force
\begin{equation}
\label{eq:_def_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=-\dfrac{\partial}{\partial L}(\beta\Delta f_0),
\end{equation}
with
\begin{equation}
\label{eq:f_0}
\Delta f_0=\lim_{M, N \to \infty}
\dfrac{\Delta F_0}{MN},
\end{equation}
it is demonstrated in Appendix \ref{A:GM} that
\begin{equation}
\label{eq:Cas_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=-\frac{1}{2}\int _{-\pi }^{\pi
}\int _{-\pi }^{\pi }\delta
\left[\coth ((1+L) \delta)-1\right] \frac{d\theta
_1d\theta _2}{ (2 \pi )^2},
\end{equation}
where $\delta=\delta\left(\theta_1,\theta_2|\beta_c/\beta,J^\|/J^\perp\right)$ is given by the expression
\begin{eqnarray}
\label{eq:def_delta}
\cosh\delta &=& 1+\left(\frac{\beta_c}{\beta}-1\right)\left(1+ 2\frac{J^{\|}}{J^\perp}\right)\\
&& +\frac{J^\|}{J^\perp}\left(2-\cos \theta_1-\cos \theta_2\right). \nonumber
\end{eqnarray}
The result in \eq{eq:Cas_no_field} is an {\it exact} expression for $\beta\Delta F^{(0,\perp)}_{\rm Cas}$; no approximations have been made. Since $\coth(x)>1$ for $x>0$ one immediately concludes that $\Delta F^{(0,\perp)}_{\rm Cas}<0$, i.e., it is an {\it attractive} force, for {\it all} values of $L$. In order to obtain scaling and, thus, the scaling form of $\Delta F^{(0,\perp)}_{\rm Cas}$ we have to consider the regime $L\gg 1$.
Obviously, then Casimir force will be exponentially small if $\delta$ is finite. For the scaling behavior of the force---see Appendix \ref{A:GM}---one obtains
\begin{equation}
\label{eq:F_Cas_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(0,\perp)}_{\rm Cas}(x_t)
\end{equation}
where $X^{(0,\perp)}_{\rm Cas}(x_t)$ is the universal scaling function
\begin{eqnarray}
\label{eq:X_Cas_no_field_sf}
X^{(0,\perp)}_{\rm Cas}(x_t)&=&-\frac{1}{8 \pi}\Bigg\{
\text{Li}_3\left(e^{-2
x_t}\right)+2 x_t
\text{Li}_2\left(e^{-2
x_t}\right) \nonumber \\
&& -2 x_t^2 \ln \left(1-e^{-2
x_t}\right)\Bigg\}.
\end{eqnarray}
and the scaling variable $x_t$ is
\begin{equation}
\label{eq:delta_xt}
x_t=L \sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left(1+ 2\frac{J^{\|}}{J^\perp}\right)},
\end{equation}
in accord with \eq{delta_xt}. It is easy to show that $X^{(0,\perp)}_{\rm Cas}(x_t)$ is a {\it monotonically increasing} function of $x_t$. The behavior of $X^{(0,\perp)}_{\rm Cas}(x_t)$ is visualized in Fig.
\ref{Fig:3d_G_X_Cas_Zero_Field}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3d_G_X_Cas_Zero_Field.pdf}
\caption{(Color online) The scaling function $X^{(0,\perp)}_{\rm Cas}(x_t)$ as a function of the temperature dependent scaling variable $x_t$ The horizontal line marks the Casimir amplitude $X^{(0,\perp)}_{\rm Cas}(0)=-\zeta(3)/(8\pi)$.}
\label{Fig:3d_G_X_Cas_Zero_Field}
\end{figure}
At the critical point one has $x_t=0$ and then one immediately obtains the well known Casimir amplitude for the Gaussian model under Dirichlet boundary condition
\begin{equation}
\label{eq:F_Cas_no_field_ampl}
X^{(0,\perp)}_{\rm Cas}(x_t=0)=-\frac{\zeta(3)}{8 \pi}.
\end{equation}
It is easy to show that
\begin{equation}
\label{eq:asX0_Cas}
X^{(0,\perp)}_{\rm Cas}\simeq
\left\{\begin{array}{lcr}
-\frac{1}{8\pi} \exp(-2 x_t) \left[1+2 x_t
\left(1+x_t\right)\right],& x_t\gg 1&\\
&& \\
-\frac{1}{8 \pi
}\zeta (3)+\frac{1}{48 \pi} x_t^2 \left(6-4
x_t+x_t^2\right),& x_t\to 0.
\end{array} \right.
\end{equation}
For the field component of the transverse Casimir force
\begin{equation}
\label{eq:_def_field}
\beta\Delta F^{(h,\perp)}_{\rm Cas}=-\dfrac{\partial}{\partial L}(\beta\Delta f_h)
\end{equation}
where
\begin{equation}
\label{eq:f_h}
\Delta f_h=\lim_{M, N \to \infty}
\dfrac{\Delta F_h}{MN}
\end{equation}
one derives, see Eqs. \eqref{eq:delta_fh_final_pq} and \eqref{eq:delta_fh_final} in Appendix \ref{A:GM}:
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:Casimir_tr_pq}
\lefteqn{\beta \Delta F^{(h,\perp)}_{\rm Cas}=\frac{\lambda\sinh (\lambda)}{32 K^{\perp}}}\\
&&\times\left\{ \left[h_1^2+h_L^2-2 h_L h_1
\cos(\mathbf{k.\Delta})\right]^2 \text{csch}^2\left[\frac{1+L}{2} \lambda\right] \right.\nonumber\\
&&\left.-\left[h_1^2+h_L^2+2 h_L h_1
\cos(\mathbf{k.\Delta})\right]^2 \text{sech}^2\left[\frac{1+L}{2}
\lambda\right]\right\}. \nonumber
\end{eqnarray}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:Casimir_tr}
\lefteqn{ \beta \Delta F^{(h,\perp)}_{\rm Cas}=\frac{\lambda\sinh (\lambda)}{32 K^{\perp}} }\\
&&\times\left\{ \left[h_1-h_L
\cos 2 \pi ( \Delta
_x+ \Delta
_y)\right]^2 \text{csch}^2\left[\frac{1+L}{2} \lambda\right] \right.\nonumber\\
&&\left.-\left[h_1+h_L
\cos 2 \pi ( \Delta
_x+ \Delta
_y)\right]^2 \text{sech}^2\left[\frac{1+L}{2}
\lambda\right]\right\}. \nonumber
\end{eqnarray}
Here we have introduced the helpful notation
\begin{equation}
\label{eq:x_def_1}
\cosh \lambda=\Lambda
\end{equation}
for the case when $\Lambda\ge 1$ and
\begin{equation}
\label{eq:x_def_2}
\cos \lambda=\Lambda
\end{equation}
in the opposite case when $\Lambda\le 1$. Note that
\begin{itemize}
\item
when $h_1={\cal O}(1)$, $h_L={\cal O}(1)$ and
\begin{equation}
\label{eq:w_def}
w=L\lambda/2
\end{equation}
is such that $w={\cal O}(1)$, {\it the Casimir force is of the order of }${\cal O}(L^{-2})$ { despite} the fact that the system is at a temperature {\it above} the bulk critical one.
\item If $h_1$ and $h_L$ are such that the field-dependent scaling variables $x_1={\cal O}(1)$ and $x_L={\cal O}(1)$, see \eq{eq:field_scaling_def}, then,
in terms of $w$, the Casimir force $\beta\Delta F^{(h,\perp)}_{\rm Cas}$ reads
\begin{equation}
\label{eq:sc_funct_field}
\beta\Delta F^{(h,\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)
\end{equation}
where the scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ is
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:scaling_h_pq}
\lefteqn{X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)=\frac{1}{8} w^2}\\
&& \times \left\{[x_1^2+x_L^2-2x_1x_L \cos\left(\mathbf{k.\Delta}\right)] \text{csch}^2 w \right. \nonumber \\
&&\left. - [x_1^2+x_L^2+2x_1 x_L \cos\left(\mathbf{k.\Delta}\right)] \text{sech}^2 w \right\}, \nonumber
\end{eqnarray}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:scaling_h}
\lefteqn{X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L) = \frac{1}{8} w^2}\\
&& \times \left\{[x_1-x_L \cos 2 \pi ( \Delta
_x+ \Delta
_y)]^2 \text{csch}^2 w \right. \nonumber \\
&&\left. - [x_1+x_L \cos 2 \pi ( \Delta
_x+ \Delta
_y)]^2 \text{sech}^2 w \right\}. \nonumber
\end{eqnarray}
The latter expression implies that in the regime considered here the field-dependent part of the force if of order of $L^{-3}$, as it is the field-independent part of it.
\end{itemize}
The asymptotic behavior of $\Delta F^{(h,\perp)}_{\rm Cas}$ for $w\gg 1$ can be easily obtained from Eqs. \eqref{eq:large_x_pq} and \eqref{eq:large_x}. The result is
\begin{eqnarray}
\label{eq:scaling_h_as}
\lefteqn{\beta\Delta F^{(h,\perp)}_{\rm Cas}\simeq -\dfrac{2w^2}{K^{\perp}L^2} e^{-2w}h_1 h_L} \nonumber \\
&&\times \left\{
\begin{array}{ll}
\cos\left(\mathbf{k.\Delta}\right), & p\ne M \quad \mbox{or} \quad q\ne N,
\\
\cos 2 \pi (\Delta_x+ \Delta_y), & p=M, q=N.
\end{array} \right.
\end{eqnarray}
which implies that in this limit the transverse component of the force is exponentially small in $L$ and attractive {\it or } repulsive depending on the product $h_1 h_L \cos[\mathbf{k.\Delta}]$ or $h_1 h_L \cos 2 \pi (\Delta_x+ \Delta_y)$.
For the field contribution to the longitudinal component of the Casimir force along the $\alpha$ axis, where $\alpha=x,y$, one has
\begin{equation}
\label{eq:_def_field_x}
\beta \Delta F^{(h,\alpha)}_{\rm Cas}(L)=-\dfrac{\partial}{\partial \Delta_\alpha}\Delta f_h.
\end{equation}
Thus, from Eqs. \eqref{eq:delta_fh_final_pq} and
\eqref{eq:delta_fh_final}
one derives
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:Casimir_longit_pq}
\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L)=-\frac{h_1 h_L }{4 K^{\perp}} k_\alpha \sin (\mathbf{k.\Delta}) \frac{\sinh(\lambda)}{\sinh[\lambda(L+1)]}
\end{equation}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:Casimir_longit}
\lefteqn{\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) = -\frac{ \pi \sin [2 \pi (\Delta_x+ \Delta_y)]}{2 K^{\perp}} h_L} \\
&& \times \Bigg\{ h_1 \frac{\sinh(\lambda)}{\sinh[(L+1)\lambda]} \nonumber\\
&& + h_L \cos[2 \pi (\Delta_x+ \Delta_y)] \left[\Lambda-\frac{\sinh(\lambda)}{\tanh (L+1)\lambda}\right]\Bigg\}. \nonumber
\end{eqnarray}
When $L\lambda\gg 1$ the above simplifies to
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:scaling_h_as_x_pq}
\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) \simeq -\dfrac{k_\alpha}{2 K^{\perp}} \sinh[\lambda] e^{-(L+1)\lambda} h_1 h_L \sin\left(\mathbf{k.\Delta}\right)
\end{equation}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:scaling_h_as_x}
\lefteqn{\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) \simeq } \\
&& -\frac{ \pi h_L^2}{4 K^{\perp}} \sin [4 \pi (\Delta_x+ \Delta_y)] \left\lbrace \Lambda-\sinh[\lambda]\right\rbrace \nonumber \\
&& -\dfrac{\pi}{K^{\perp}} \sinh[\lambda] e^{-(L+1)\lambda} h_1 h_L \sin [2 \pi (\Delta_x+ \Delta_y)]. \nonumber
\end{eqnarray}
Note that in the first sub-case the $L\gg 1$ limit of the lateral force is zero, in the second sub-case, when the average value of the external field on the upper surface is not zero the lateral force tends to a finite, well defined limit which is proportional to the surface area of the system. Obviously, this force has the meaning of a local purely surface force.
Subtracting from $\Delta F^{(h,\alpha)}_{\rm Cas}$ its $L$-independent part we obtain the lateral force that will act on the upper surface due to the presence of the lower one if we act in lateral direction on the upper one. In the case $p=M$ and $q=N$ one obtains
\begin{eqnarray}
\label{eq:F_long}
\lefteqn{\beta\delta F^{(h,\alpha)}_{\rm Cas}(L) \equiv \beta\left[\Delta F^{(h,\alpha)}_{\rm Cas}(L)-\lim_{L\to\infty}\Delta F^{(h,\alpha)}_{\rm Cas}(L)\right]}\nonumber\\
&& =-\frac{ \pi h_L }{2 K^{\perp}} \sin [2 \pi (\Delta_x+ \Delta_y)] \sinh(\lambda) \Bigg\{ h_1/ \sinh[(L+1)\lambda] \nonumber \\
&& + h_L \cos[2 \pi (\Delta_x+ \Delta_y)] [1-\coth (L+1)\lambda]\Bigg\}.
\end{eqnarray}
In the other sub-case when $p\ne M$ or $q\ne N$ one has that $\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)\equiv \beta\Delta F^{(h,\alpha)}_{\rm Cas}(L)$.
In scaling variables for $\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)$ one has
\begin{equation}
\label{eq:sc_funct_field_long}
\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(h,\alpha)}_{\rm Cas}(w,x_1,x_L),
\end{equation}
where
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:Casimir_longit_pq_scaling}
X^{(h,\alpha)}_{\rm Cas}=-\pi x_1 x_L \, p_\alpha \sin (\mathbf{k.\Delta}) \frac{\omega}{\sinh[2\omega]},
\end{equation}
where $p_\alpha=p$ for $\alpha=x$, and $p_\alpha=q$ for $\alpha=y$.
{\it ii)} if $p=M$ and $q=N$:
\begin{eqnarray}
\label{eq:F_long_scaling}
\lefteqn{X^{(h,\alpha)}_{\rm Cas}=-\pi x_L \omega \sin [2 \pi (\Delta_x+ \Delta_y)] } \\
&& \times \Bigg\{ x_1/ \sinh[2\omega] + x_L \cos[2 \pi (\Delta_x+ \Delta_y)] [1-\coth 2\omega]\Bigg\}. \nonumber
\end{eqnarray}
\eq{eq:sc_funct_field_long} implies that in the scaling regime the longitudinal Casimir force is of the same order of magnitude as the orthogonal component of the force.
Let us now clarify the physical meaning of the regimes $\omega={\cal O}(1)$ and $\omega\gg 1$ in terms of the temperature $T$. Taking into account \eq{eq:Lambda_def} one has
\begin{equation}
\label{eq:Lambda_def_delta}
\Lambda = 1+\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]+2\dfrac{J^{\|}}{J^{\perp}}\left[\sin^2 \frac{k_x}{2} +\sin^2 \frac{k_y}{2}\right],
\end{equation}
where $k_x=2 \pi p/{M}$, $k_y=2 \pi q/{N}$, as well as all the other terms in the sum determining $\Lambda$ are dimensionless. We again have to consider two sub-cases:
{\it i)} if $p\ne M$ or $q\ne N$.
In this case, in order to have $\lambda$ small, one needs to have $\beta/\beta_c\to 1$, and $k_\alpha\to 0$, $\alpha=x,y$. Under this conditions one has
\begin{equation}
\label{eq:lambda_eq}
\lambda \simeq \sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]+\dfrac{J^{\|}}{J^{\perp}}\left[k_x^2 +k_y^2\right]}.
\end{equation}
Then
\begin{equation}
\label{eq:omega_eq}
\omega=\frac{1}{2}\sqrt{x_t^2+x_k^2},
\end{equation}
where $x_t$ and $x_k$ are defined in \eq{eq:xt_and_xk}. From \eq{eq:omega_eq} it is clear that in order to have $\omega = {\cal O}(1)$ one needs to have simultaneously $x_t={\cal O}(1)$ and $x_k={\cal O}(1)$. Taking into account that $\nu=1/2$ for the Gaussian model, one has that $x_t^2$ is in its expected form $a_t t L^{1/\nu}$, with $t=(T-T_c)/T_c$. The condition $x_k={\cal O}(1)$ implies that in order to encounter the regime $\omega = {\cal O}(1)$ one needs to have a modulation with a wave vector $k\lesssim L^{-1}$ which includes, e.g., the $k=0$ case. If $x_k\gg 1$ one will have, even at the critical point $\beta=\beta_c$ that $\omega\gg 1$ and, according to \eq{eq:scaling_h_as}, that the field contributions into the Casimir force will be exponentially small then.
{\it ii)} if $p=M$ and $q=N$.
As it is clear from \eq{eq:Lambda_def_delta}, this sub-case reduces to the previously considered one with $k_x=k_y=0$. The last implies that, then, $\omega=x_t/2$.
When $\omega = {\cal O}(1)$, from Eqs. \eqref{eq:Casimir_tr_pq} and \eqref{eq:Casimir_tr} with $h_1 = {\cal O}(1)$ and $h_L = {\cal O}(1)$ one has that $\Delta F^{(h,\perp)}_{\rm Cas} = {\cal O}(L^{-2})$, i.e., the longitudinal force in this case is in an order of magnitude {\it larger} in $L$ than the usual transverse Casimir force, which is of the order of ${\cal O}(L^{-3})$.
The behavior of the function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ is visualized in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend} if {\it i)} $p\ne M$ or $q\ne N$ and in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend_MN}
if {\it ii)} $p=M$ and $q=N$ .
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:scaling_h_pq}, as a function of $w\in (0,10]$ and $\left(\mathbf{k.\Delta}\right)\in [0,2\pi]$ for $x_1=x_L=1$. As wee see, $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_MN_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:scaling_h}, as a function of $w\in(0,10]$ and $\Delta_x+ \Delta_y\in [0,1]$ for $x_1=x_L=1$. As wee see, also in this case $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ can be both positive and negative, depending on the values of its arguments. Let us remind that in this sub-case $\omega=x_t/2$. }
\label{Fig:3D_G_h1_eq_hL_Legend_MN}
\end{figure}
We observe, inspecting the legends, that the maximal values of the function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ are in this case smaller than in previous case shown in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend}.
Let us turn now to the behavior of the total orthogonal Casimir force $F^{(\perp)}_{\rm Cas}$. From Eqs. \eqref{eq:fe_short}, \eqref{eq:_def_no_field}, \eqref{eq:f_0}, \eqref{eq:F_Cas_no_field}, \eqref{eq:_def_field} and \eqref{eq:sc_funct_field} one has
\begin{equation}
\label{eq:ort_force}
F^{(\perp)}_{\rm Cas}\equiv \Delta F^{(0,\perp)}_{\rm Cas}+\Delta F^{(h,\perp)}_{\rm Cas}
\end{equation}
and
\begin{equation}
\label{eq:ort_force_sf}
\beta F^{(\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L).
\end{equation}
The behavior of the scaling function of the total orthogonal Casimir force $X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$ is depicted in Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force} - \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} for the case when {\it i)} $p\ne M$ or $q\ne N$ and in the Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force_MN} for the case {\it ii)} $p=M$ and $q=N$ with $x_k=0$. Let us note that in the case {\it i)} the function $X^{(\perp)}_{\rm Cas}$ is symmetric about $x_1$ and $x_L$, while in the case {\it ii)} that is not so. The last implies that when $x_1\ne x_L$ in the case {\it ii)} we have to consider separately the sub-case $x_1\gg x_L$ and $x_1 \ll x_L$.
\begin{figure}[h]
\centering
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_total_force_compressed}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $x_1=x_L=1$. As wee see, $X^{(\perp)}_{\rm Cas}$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend_total_force}
\end{figure}
Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force} and \ref{Fig:3D_G_h1_eq_hL_Legend_total_force_MN} show the behavior of the force for for equal values of the field scaling variables $x_1=x_L$. When they are not equal this behavior is visualized in Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} for the case {\it i)} and in Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN}, \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_11_MN} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN} for the case {\it ii)}. Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN} represent the situation when $x_1\gg x_L$, namely $x_1=10 x_L$, while Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN} represent the results for the case when $x_1=-x_L=1$.
The comparison of these figures with Figs. (\ref{Fig:3D_G_h1_eq_hL_Legend}) and (\ref{Fig:3D_G_h1_eq_hL_Legend_MN}) shows, as it might be expected from the data presented in Fig. (\ref{Fig:3d_G_X_Cas_Zero_Field}), that the contribution of $X^{(0,\perp)}_{\rm Cas}(x_t)$ to the overall behavior of the force is quite small, at least in the depicted cases.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_1_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $x_1=10 x_L=1$. As wee see, the scaling function in that case is predominantly positive.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_2_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $ x_1=-x_L=1$. As wee see, the scaling function in that case can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_total_force_MN_compressed}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1=x_L=1$. As wee see, $X^{(\perp)}_{\rm Cas}$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend_total_force_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_1_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1=10 x_L=1$. As wee see, the scaling function in that case is predominantly positive.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_11_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $10 x_1= x_L=1$. As wee see, the scaling function in that case can be both positive and negative.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_11_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_2_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1= -x_L=1$ or $x_1=-x_L=-1$. As wee see, the scaling function in that case can be both positive and negative.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN}
\end{figure}
Let us now consider the behavior of the longitudinal Casimir force.
We first note that it does not have a contribution that is field-independent. Thus, the scaling fuction, which characterizes this force, is given by \eq{eq:Casimir_longit_pq_scaling} and
\eq{eq:F_long_scaling}.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_lat_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\alpha)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:Casimir_longit_pq_scaling}, as a function of $w\in (0,3]$ and $\left(\mathbf{k.\Delta}\right)\in [0,2\pi]$ for $x_1=x_L=1$. }
\label{Fig:3D_G_h1_eq_hL_Legend_lat}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_MN_lat_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\alpha)}_{\rm Cas}(x_t,x_1,x_L)$, see \eq{eq:F_long_scaling}, as a function of $w\in(0,3]$ and $\Delta_x+ \Delta_y\in [0,1]$ for $x_1=x_L=1$. Let us remind that in this sub-case $\omega=x_t/2$. }
\label{Fig:3D_G_h1_eq_hL_Legend_MN_lat}
\end{figure}
Because of the term $\sin (\mathbf{k.\Delta})$, multiplying the expression for the force in the first case, and to $\sin [2 \pi (\Delta_x+ \Delta_y)]$, in the second case, the scaling function $X^{(h,\alpha)}_{\rm Cas}$ can be both positive and negative, independently on the values of $x_1$ and/or $x_L$.
\section{The 3d mean-field XY model} \label{sec:3dmf}
\subsection{With infinite surface fields} \label{subsec:infiniteh}
In Ref. \cite{BDR2011} the $XY$ model characterized by the functional
\begin{multline}
{\cal F}\left[ {\bf m};t,L\right]=\int_{-L/2}^{L/2} dz\,\left[\frac{b}{2}\left|\frac{d{\bf m}}{dz}\right|^2+\frac{1}{2}at\left|\textbf{m}\right|^2\right.\\
\left.+\frac{1}{4}g\left|{\bf m}\right|^4\right],
\label{LGenergyfunctional}
\end{multline}
has been studied in the presence of what have been termed twisted boundary conditions.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3d_Casimir_Force_XY_5_compressed.pdf}
\caption{(Color online) The scaling function $X^{(\alpha)}_{\rm Cas}(x_t)$ of the $XY$ model under twisted boundary conditions as a function of $x_t$ and $\alpha$ for $h=0$. The plane surface marks the $X^{(\alpha)}_{\rm Cas}(x_t)=0$ value of the force: the force is repulsive above it and attractive below it.}
\label{Fig:3d_Casimir_Force_XY}
\end{figure}
Switching to polar coordinates,
\begin{equation}
{\bf m}(z)=\left(\Phi(z)\cos\varphi(z),\Phi(z)\sin\varphi(z)\right),
\end{equation}
these boundary conditions can convenietly be defined by requiring that \begin{eqnarray}
&\varphi(\pm L/2)=\pm \alpha/2,\nonumber\\
&\Phi(\pm L/2) = \infty,
\label{boundaryconditions}
\end{eqnarray}
i.e., the moments at the boundaries are twisted by an angle $\alpha$ relative to one another. It has been shown that the Casimir force has the form
\begin{equation}\label{cas}
\beta F_{\rm Cas}(t,L)=\frac{b}{\hat{g}}L^{-4}X_{\rm Cas}^{(\alpha)}(x_t),
\end{equation}
where $\hat{a}=a/b$, $\hat{g}=g/b$, $x_t=\hat{a}t L^{2}$ and
\begin{equation}\label{casscalingfunctionpandtau}
X_{\rm Cas}^{(\alpha)}(x_t)=\left\{ \begin{array}{cc}
X_0^4[p^2- \left(1+\tau \right)], & x_t \ge 0 \\
X_0^4[p^2- \left(1+\tau/2 \right)^2], & x_t \le 0
\end{array} \right. .
\end{equation}
Here
\begin{equation}\label{newvarscaling}
\tau=x_t/X_0^2, \, X_0=\int_1^\infty \frac{dx}{\sqrt{(x-1)[x^2+x(1+\tau)+p^2]}}
\end{equation}
\begin{equation}\label{xoonpt}
X_0=\int_1^\infty \frac{dx}{\sqrt{(x-1)[x^2+x(1+\tau)+p^2]}},
\end{equation}
and $p$ is to be determined for any fixed value of $x_t$ so that the twisted spins at the boundary make the prescribed angle $\alpha$. Let
\begin{equation}\label{roots}
x_\pm=\frac{1}{2} \left[-(\tau +1)\pm\sqrt{(\tau +1)^2-4 p^2}\right]
\end{equation}
be the roots of the quadratic term in the square brackets in the denominator of (\ref{xoonpt}). There are two subcases: {it A)}
the roots are real, and {\it B)} the roots are complex conjugates of each
other.
{\it A)} The roots $x_\pm$ are real. Then
\begin{equation}\label{x0det}
X_0=\frac{2}{\sqrt{1-x_-}}K\left[\sqrt{\frac{x_+-x_-}{1-x_-}} \right]
\end{equation}
and
\begin{multline}\label{alphaasafunctionoftherootsfinal}
\alpha=\frac{\sqrt{|x_- x_+|} X_0}{x_-} \bigg\{1\\
-\frac{2}{X_0 \sqrt{1-x_-}} \Pi \left[\frac{x_-}{x_- -1},\sqrt{\frac{x_+-x_-}{1-x_-}} \right]\bigg\}.
\end{multline}
We note that
\begin{equation}\label{relxtp}
\tau=-1-x_--x_+,\qquad p=\sqrt{|x_- x_+|}.
\end{equation}
{\it B)} The roots $x_\pm$ are complex.
One has
\begin{equation}\label{cx0det}
X_0=\frac{2}{\sqrt{r}} K\left(w\right),
\end{equation}
and
\begin{multline}\label{calphaasafunctionoftherootsfinal}
\alpha = \frac{p X_0}{1-r}+\frac{4p}{r^2-1} \sqrt{\frac{r}{1-w^2}}\\
\times\Pi\left[\left(\frac{r-1}{r+1}\right)^2,\frac{w}{\sqrt{w^2-1}}\right].
\end{multline}
where
\begin{eqnarray}\label{notations1}
r\equiv r(x_-,x_+)&=&\sqrt{(1-x_-)(1-x_+)} \nonumber\\
&=& \sqrt{2+\tau+p^2},
\end{eqnarray}
and
\begin{multline}\label{notations2}
w^2\equiv w^2(x_-,x_+) = \frac{1}{2}+\frac{\frac{x_- + x_+}{2}-1}{2
\sqrt{(1-x_-) (1-x_+)}}\\
= \frac{1}{2}\left(1-\frac{3+\tau}{2\sqrt{2+\tau+p^2}} \right).
\end{multline}
The scaling function $X^{(\alpha)}_{\rm Cas}(x_t)$ of the $XY$ model under twisted boundary conditions as a function of $x_t$ and $\alpha$ is shown in Fig. \ref{Fig:3d_Casimir_Force_XY}.
We recall that, as shown in Ref. \cite{BDR2011} the asymptotic expression for $X_{\rm Cas}^{(\alpha)}(x_t)$
\begin{equation}\label{casscalingfunctionasympXY}
X_{\rm Cas}^{(\alpha)}(x_t)\simeq \frac{1}{2} \alpha^2 \left[|x_t|+4
\sqrt{2|x_t|}+\frac{1}{2} \left(48-3 \alpha ^2\right)\right],
\end{equation}
when $x_t\to-\infty$. According to \eq{cas} the last implies that in this regime
\begin{equation}
\label{casMFas}
\beta F_{\rm Cas}(t,L)\simeq \frac{1}{2} \alpha^2 \frac{b}{\hat{g}} |x_t|L^{-4} = \frac{1}{2}\frac{a b}{g} \alpha^2 |t| L^{-2},
\end{equation}
i.e., its leading behavior is of the order of $L^{-2}$ there due to the existence of helicity within the system.
\subsection{With finite surface fields}
The model described immediately above constrains the spins at the surface of the film to point in particular directions. The physical realization of a such a system is much more likely to be one in which the spins at the surfaces to be under the influence of finite surface fields. Here, we consider a model for such a system. In order to do so, we employ the approach utilized in Section II of \cite{BDR2011}, in which the spin system occupies sites on a lattice that is infinite in extent in two directions and that consists a finite number of layers (here labeled 1 to $L$) in the third dimension. We impose surface fields that couple in the standard way to the spins on the leftmost layer, labeled 1, and the rightmost layer, labeled $L$. The magnitude of each of those fields is $h_s$, and the angle between them is $\alpha$. In our mean field approach, the free energy is minimized by adjusting the expectation value of the amplitude and direction of the spins in each layer. The Casimir force follows from the difference between the free energies with $L$ and $L+1$ layers; because of the numerical nature of the free energy results, we are unable to take the derivative with respect to film thickness, as in Section \ref{sec:continuum}.
We find that the Casimir force is consistent with the following scaling form
\begin{equation}
F_{\rm Cas} = L^{-4}f(tL^2, h_cL) \label{eq:3dmf1}
\end{equation}
where $t$ is the bulk reduced temperature. Furthermore, for small enough $h_c$ and $t$ higher than the value at which the film orders spontaneously, the function $f$ on the right hand side of (\ref{eq:3dmf1}) has the form
\begin{equation}
f(tL^2, h_cL) = f_0(tL^2) + f_1(tL^2) \left(h_cL \right)^2 + O\left( \left(h_cL \right)^4\right)\label{eq:3dmf2}
\end{equation}
Because of this, it is possible to envision for small $h_s$ the behavior of the Casimir force that one encounters in the Gaussian model.
Figure \ref{fig:3dmfplot1} is a plot of the scaled Casimir force versus the scaled reduced temperature and scaled surface fields for two values of the film thickness, $L$. The perspective highlights the departure from the behavior in (\ref{eq:3dmf2}) that occurs when the temperature is sufficiently far below the bulk critical temperature that the moments in the film order spontaneously. The films in question consists of $L=50$ and $L=100$ layers, and the angle between the two surface fields is $\alpha = \pi/3$. As is clear from the figure, the difference between the two plots is quite small.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot1a.pdf}
\caption{(Color online) Scaled Casimir force, $L^4F_{\rm Cas}$, as a function of the scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$. The number of layers in the two films are $L=50$ and $L=100$, and $\alpha$, the angle between the surface fields, is $\pi/3$. The difference between the two plots is barely discernible, indicating that the difference between the scaling function for $L=50$ and the infinite $L$ limit is quite small. }
\label{fig:3dmfplot1}
\end{center}
\end{figure}
As indicated in Fig. \ref{fig:3dmfplot1}, $L=50$ is sufficiently large that the difference between the function and the scaling limit is quite small. Figure \ref{fig:3dmfplot2} illustrates the dependence of the scaled Casimir force on the scaled surface field amplitude for various values of the scaled reduced temperature.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot2.pdf}
\caption{(Color online) Scaled Casimir force, $L^4 F_{\rm Cas}$, as a function of the scaled surface field, $h_sL$ for various scaled reduced temperatures, $tL^2$. Here, $L=50$ and $\alpha= \pi/3$. When $tL^2>-\pi^2$ the small $h_s$ dependence of the Casimir force is quadratic, consistent with (\ref{eq:3dmf2}). Below that value of the scaled reduced temperature, the small $h_s$ dependence is linear in the absolute value of that quantity, as exemplified by the curve for $tL^2=-15$. }
\label{fig:3dmfplot2}
\end{center}
\end{figure}
For all reduced temperatures greater than $-\pi^2$, the initial dependence on scaled surface fields is quadratic, consistent with (\ref{eq:3dmf2}). In fact for temperatures at and above the bulk critical temperature ($t \ge 0$) the second term in the right hand side of (\ref{eq:3dmf2}) is the leading non-zero contribution to that expansion. This is consistent with the amplification of the Casimir force that one finds in the Gaussian model---see Section \ref{sec:Gaussian}. However, such amplification only occurs when there is spontaneous ordering in the film. Figure \ref{fig:3dmfplot3} shows the scaled Casimir force as a function of the scaled surface field for $tL^2=5$ and $tL^2=-5$, above and below the bulk transition but above the threshold for film ordering. This plot illustrates the saturation of the Casimir force when the reduced temperature is above the threshold for film ordering, $tL^2=-\pi^2$.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot3.pdf}
\caption{(Color online) Dependence of the scaled Casimir force on the scaled surface field for two values of scaled reduced temperature above the point, $tL^2=-\pi^2$, at which spontaneous ordering occurs in the film. Here, $L=50$ and $\alpha = \pi/3$. The plots illustrate the saturation of the influence of the surface fields, at odds with the amplification effect seen in Section \ref{sec:Gaussian}. The figure also illustrates the fact that the Casimir force can change change sign as the temperature is varied. This is due to the fact that there is a range of temperatures below the bulk critical temperature in which the bulk system orders while the film remains disordered. For $T>T_c$ both the bulk and the finite system are disordered. For $|h_s|\gg 1$ the Casimir force approaches its value for fixed boundary conditions, the case considered in Subsection \ref{subsec:infiniteh}.}
\label{fig:3dmfplot3}
\end{center}
\end{figure}
The Casimir force changes sign as $L$ increases for fixed $\alpha$, $T$ and $h_s$. This is displayed in Fig. \ref{fig:above_tc}.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{change_sign_above_tc.pdf}
\caption{(Color online) Scaled Casimir force, $L^2 F_{\rm Cas}$, as a function of $L$, for fixed values of temperature $t=0.001$, helicity $\alpha= \pi/3$ and value of the surface field amplitude $h_s=0.1$. }
\label{fig:above_tc}
\end{center}
\end{figure}
We also note that the force changes sign for moderate values of $L$. It can readily be established that the overall behavior of the Casimir force is
in accord with Eq. (\ref{eq:3dmf2}); see, for instance, Fig. \ref{fig:3dmfplot3}.
If spontaneous ordering is possible, then amplification of the Casimir force does occur. Figure \ref{fig:3dmfplot4} plots the newly scaled Casimir force $L^2F_{\rm Cas}$ against system size $L$, illustrating the enhanced force amplitude as a function of system size, $L$, expressed in terms of the scaled variable $tL^2$. Here, the reduced temperature is fixed at $t=-0.05$, while the surface field amplitudes are set to $0.05$, $\alpha = \pi/3$ and the system size varies from $L=2$ to $L=3,000$. The behavior displayed is a direct result of the energy stored in the helical spin configuration, a response to the surface fields that are tilted with respect to each other.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot4.pdf}
\caption{(Color online) Illustrating the $L$ dependence of the Casimir force for a negative value of reduced temperature, $t=-0.05$ with surface field amplitude $h_s=0.05$ and $\alpha=\pi/3$. The plot is generated by varying the film thickness $L$ for fixed values of $t$, $h_s$ and $\alpha$. The large graph shows how $L^2F_{\rm Cas}$ varies over an extended range of film thicknesses $L$, and the inset shows the $L$ dependence over a much smaller range.}
\label{fig:3dmfplot4}
\end{center}
\end{figure}
Of additional interest in this plot is the variation of the Casimir force for smaller values of $L$, shown in the inset. Note the change in the sign of the Casimir force. A Casimir force going as $L^{-2}$ is consistent with the energy associated with a helicity modulus, which is natural given that the $XY$ system supports such a modulus in the regime in which it spontaneously orders. In this case the surface fields play the essential role of enforcing a helical structure on the order parameter when spontaneous ordering occurs.
The enhanced Casimir force is consistent with the scaling form of (\ref{eq:3dmf1}). Figure \ref{fig:3dmfplot5} displays the dependence of the scaled Casimir force $L^4 F_{\rm Cas}$ on the scaled variable $tL^2$.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot5.pdf}
\caption{(Color online) The scaled Casimir force, $L^4 F_{\rm Cas}$, as a function of the scaled variable $tL^2$. The thickness of the film is $L=50$, the surface field amplitudes have been set to 0.01 and the angle between them, $\alpha$, is $\pi/3$. }
\label{fig:3dmfplot5}
\end{center}
\end{figure}
An important feature of this plot is its linear dependence on the scaled reduced temperature when it is sizable and negative. This leads to an overall $L$ dependence going as $L^{-2}$. Another significant property of the critical Casimir force plotted in Fig. \ref{fig:3dmfplot5} is its change in sign in the vicinity of the bulk critical point. In this sense, the Casimir force is tunable---and can be changed from attractive to repulsive---through a variation in temperature.
Finally, Fig. \ref{fig:3dmfplot6} displays the dependence of the scaled Casimir force, $L^4F_{\rm Cas}$, on scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$ for a variety of values of the angular difference, $\alpha$, between the two surface fields. As shown in the plots, when $\alpha$ increases from $0$ to $\pi$ the minimum of the force becomes shallower and the region of parameters $tL^2$ and $h_sL$ in which the force is repulsive expands. We also note that the amplitude of the force for any fixed combination of the parameters $tL^2$ and $h_sL$ is a monotonically increasing function of $\alpha$. The force is attractive in the whole region of $h_sL$ and $tL^2$ values only for $\alpha=0$.
\begin{widetext}
\begin{figure}[h!]
\begin{center}
\includegraphics[width=7in]{3dmfplot6_new_compressed.pdf}
\caption{(Color online) Scaled Casimir force, $L^4F_{\rm Cas}$, as a function of the scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$. The number of layers in the film is $L=50$. The values of $\alpha$, the angle between the surface fields are, reading left to right and then top to bottom are {\bf a}: 0, { \bf b}: $\pi/2$, { \bf c}: $2 \pi/3$ and { \bf d}: $\pi$. }
\label{fig:3dmfplot6}
\end{center}
\end{figure}
\end{widetext}
\pagebreak
\section{Discussion and concluding remarks} \label{sec:conclusions}
The Casimir force has provided an unexpectedly rich and varied set of phenomena for study and potential exploitation. In this paper, we have attempted to demonstrate that interactions between the bounding system and the media that supports the Casimir force allow for the possibility of utilizing those interactions, here parameterized as surface fields, to control---and in certain cases greatly amplify---that force. Our focus has been the critical Casimir force, but a number of our results extend far beyond the critical regime. We find that the angle between surface fields can significantly affect the magnitude and the sign of the Casimir force, that variations in temperature can also have such an effect, and that the strength of the critical Casimir force can undergo substantial amplification as a consequence of the application of surface fields. Such fields represent a useful and likely accurate quantification of the action of modifications of the structure or composition of bounding surfaces in the medium giving rise to the Casimir force. Thus, the results presented here could well be utilized or expanded upon to motivate experimental investigations of the effects of surface patterning on the Casimir force.
The key findings reported here are twofold. First, the combination of helicity and surface fields allows for the manipulation of both the sign and the amplitude of the Casimir force. In certain circumstances---particularly when the system supports helicity in the bulk---the force can be greatly amplified in magnitude. The second finding is that the expressions describing the Casimir force are consistent with the expectations of finite size scaling, as embodied in Eqs. (\ref{eq:F_Cas_scaling}), (\ref{eq:F_Cas_scaling_Heis}), (\ref{eq:F_Cas_no_field}), (\ref{eq:sc_funct_field}), (\ref{eq:sc_funct_field_long}), (\ref{cas}) and (\ref{eq:3dmf1}).
One possible setting for an experimental study might be a nematic liquid crystal film. Here, the order parameter is quadrupolar, rather than dipolar as in the case of the $XY$ or Heisenberg models, but the continuous symmetry with respect to rotation of the order parameter is nevertheless in the same general class as in the systems considered here. In fact, a class of Liquid Crystal Display (LCD) devices operates on the basis of inducing of a helical structure in liquid crystalline films \cite{LCDGray}. It is also possible that the results reported here are applicable to the case of a liquid Helium film in the superfluid state in which a temperature gradient exists between the substrate on which the film has condensed and that gas phase bordering its free surface. Such a temperature gradient induces flow in the superfluid component, which entails a rotation of the superfluid wave function in the complex plane \cite{[{R. P. Feynman in }] Gorter,Ginz_PIt}.
The models investigated here are unlikely to be directly realized in nature, either because of their low dimensionality, or because they neglect important phenomena such as saturation of the order parameter as in the Gaussian model or are based on approximations, such as the mean field theory. Nevertheless, we are confident in the the overall import of our results: that surface fields and helicity in the medium that generates the Casimir force are likely to prove quite significant as experimentally accessible modifiers of that force. How those surface fields are to be generated will vary from system to system, but there is every reason to anticipate that ways will be found and that the result will be a greater insight into the Casimir force and, one hopes, new and useful applications of this interaction.
\acknowledgements{D.D. gratefully acknowledges the financial support via contract DN02/8 of Bulgarian NSF. J. R. is pleased to acknowledge support from the NSF through DMR Grant No. 1006128}
\section{Introduction}
\label{Section_Introduction}
Casimir forces result from, and provide insight into, the behavior of a medium confined to a restricted space, canonically the region between two plane, parallel surfaces. In the case of the electromagnetic Casimir force, the medium is the vacuum, and the underlying mechanism is the set of quantum zero point or temperature fluctuations of the electromagnetic field. The now widely-investigated critical Casimir force (CCF) results from the fluctuations of an order parameter and more generally the thermodynamics of the medium supporting that order parameter in the vicinity of a critical point. In fact, the free energy of a confined medium can mediate a Casimir force at any temperature provided its excitations are long-range correlated ones. This fact, along with the wide range of options for a mediating substance opens up a range of possibilities for the study and exploitation of the Casimir force arising from a confined medium.
One of the principal influences on the Casimir force is the nature of the bounding surface. With respect to the CCF, published investigations have been focused, almost exclusively, on systems belonging to the Ising universality class. On a basic level, based on the behavior of coupling in the vicinity of the surface, there are three universality classes---extraordinary (or normal), ordinary and surface-bulk (or special), ones \cite{D86,K94,BDT2000}. Experimental investigations into the influence of surface universality classes on the Casimir force have been reported in \cite{SZHHB2007,RBM2007,HHGDB2008,GMHNHBD2009,NHC2009,NDHCNVB2011,ZAB2011}. Most of them focus on the behavior of colloids in a critical solvent. They probe the dependence of the force between boundaries on temperature, the concentration of the components of the solvent and the relative preference of the surfaces of the colloids for the components of the solvent. For example, in \cite{GMHNHBD2009} the critical thermal noise in a solvent medium consisting of a binary liquid mixture of water and 2,6-lutidine near its lower consolute point is shown to lead to attractive or repulsive forces, depending on the relative adsorption preferences of the colloid and substrate surfaces with respect to the two components of the binary liquid mixture. On the theoretical side, the influence of the surface fields has been studied on the case of two dimensional Ising model via exact calculation \cite{NN2008,NN2009,AM2010,NN2016}, using the variational formulation due to Mikheev and Fisher \cite{B2015,Z2012}, with the help of density-matrix renormalization-group numerical method \cite{MCD99,DMC2000,DME2000,ZMD2013}, via conformal invariance \cite{VED2013,JRT2015}, Monte Carlo methods \cite{VED2013}, and numerically using bond propagation algorithms \cite{WI2015}. The three dimensional Ising model has been studied with Monte Carlo methods in \cite{VGMD2009,H2011,VMD2011,TTD2013,VD2013,V2014,MVDD2015}, mean-field type calculations \cite{PD2004,K97,MMD2010,VDK2012,THD2015,VD2015} and renormalized local functional theory \cite{OO2012}. In general, it has been shown that the Casimir force depends on the strength of the surface fields $h_1$ and $h_2$ and that it can change sign as the magnitudes of the surface field, the thickness of the films, and the temperature of the system are varied.
For the general case of $O(n)$ systems there is no similarly thorough classification \cite{P90}. References \cite{APP91,HK92,ZPZ98,KG99,BLF2000,GAF2001,HSD2004,KNSP2013,HNSP2014} report on studies of the Casimir force in liquid crystals, and \cite{I86,GC99,GC2002,ZRK2004,GSGC2006,MGD2007,UBMCR2003} describe investigations for $^4$He and $^3$He--$^4$He mixtures. In the case of Helium films, however, it is generally accepted that the boundary conditions are determined, in the region where the liquid behaves as a quantum liquid, by its quantum nature and, thus, cannot be easily influenced by modification of bounding surfaces, in that there are no surface fields that couple to the order parameter in such systems. In that respect liquid crystals seem much more readily adjustable, and in particular more amenable to the influence of boundary conditions. For example, in Ref. \cite{APP91} it is shown that director fluctuations in nematics induce long-range interactions between walls, which are attractive with symmetric boundary conditions, but may become repulsive with mixed ones. In smectics such forces are longer ranged than van der Waals ones.
In \cite{ZPZ98} the authors concluded that in the case of finite surface coupling, the fluctuation-induced forces for nematics are weaker than in the strong anchoring limit. In the example of three-dimensional lattice XY model with nearest neighbor interaction, it has been shown \cite{BDR2011} that the Casimir force depends in a continuous way on the parameter $\alpha$ characterizing the so-called twisted boundary conditions when the angle between the vector order parameter at the two boundaries is $\alpha$ where $0<\alpha\le \pi$. The effect is essential; depending on $\alpha$ the force can be attractive or repulsive. By varying $\alpha$ and/or the temperature $T$ one can control both the sign and the magnitude of the Casimir force in a reversible way. Furthermore, when $\alpha = \pi $, an additional phase transition, which occurs only in finite systems, has been discovered, associated with the spontaneous symmetry breaking of the direction of rotation of the vector order parameter through the body of the system.
In the current article we show that the strength and the mutual orientation of surface fields---as well as structuring on the surface via chemical or other alternations that can be described in terms of surface fields---lead to interesting and substantial modification in the behavior of the force between the confining surface. Such modification includes the change of the sign of the force, as well as non-monotonic behavior, appearance of multiple minima, of a longitudinal Casimir force, and also an amplification of the force in regions with strong helicity effects. We will demonstrate the above with the example of few models: the one dimensional XY and Heisenberg models, the three dimensional Gaussian model and the three dimension $O(2)$ XY model.
We start with the one-dimensional XY and Heisenberg models.
\section{1d continuum symmetry models with boundary fields} \label{sec:continuum}
\label{1d_systems}
Here we consider two one-dimensional models with continuous $O(n)$ spin symmetry: XY ($n=2$) and Heisenberg ($n=3$) chains of $N$ spins with ferromagnetic interaction $J$ between nearest-neighbor spins,
the boundary fields ${\mathbf H}_1$ and ${\mathbf H}_2$ of which are at an angle $0\le\psi\le \pi$ with respect to each other. Obviously, such systems do not exhibit spontaneous ordering at non-zero temperatures given their low dimension and the short range nature of the interactions between spins, as has been shown to follow rigorously from the Mermin-Wagner theorem \cite{MW66}. Nevertheless, they posses an essential singular point at $T=0$ and will, in that limit, support spontaneous order. We will demonstrate that when the boundary fields are non-zero the Casimir force, $F_{\rm Cas}$, of these systems displays very rich and interesting behavior. We also show that near $T=0$ the force has a scaling behavior and that, depending on the angle between the boundary fields and the value of the temperature scaling variable $x\sim N k_B T/J$, this force can be {\it attractive} or {\it repulsive}. More precisely, we will establish that:
\begin{enumerate}
\item[i)]For low temperatures, when $x={\cal O}(1)$ and
\begin{equation}
\label{eq:constraint}
N\gg J\left(\frac{1}{H_1}+\frac{1}{H_2}\right)
\end{equation}
the leading behavior of the Casimir force can be written in the form
\begin{equation}
\label{eq:1dCas_gen}
\beta F_{\rm Cas}(T,N,{\bf H}_1,{\bf H}_2)=N^{-1}X(\psi,x),
\end{equation}
with $x$ a scaling variable and $X$ a universal scaling function. Equation (\ref{eq:1dCas_gen}) implies that, under constraint \eq{eq:constraint}, $X_{\rm Cas}$ depends only on the scaling variable $x$ defined in (\ref{eq:scaling_variables}) and the angle $\psi$. The latter parameter effectively describes the boundary conditions on the system. Note that, unlike the Ising model, the boundary conditions depend here {\it continuously} on one parameter---in our notation $\psi$.
\item[ii)] When $x\to 0+$ the scaling function of the Casimir force becomes positive, i.e., the force turns {\it repulsive} provided that $\psi \ne 0$. In that case $X_{\rm Cas}\sim x^{-1}$ and, thus, the overall $N$-dependence of the force is of the order of $N^{-2}$.
\item[iii)] When $x\gtrsim 1$ the scaling function has a sign that depends on the sign of $\cos(\psi)$: for $0<|\psi|<\pi/2$ the force will be {\it attractive}, while for $\pi/2<|\psi|<\pi$ it will be {\it repulsive}. For $x\gg 1$ the force decays exponentially to zero.
\item[iv)] For any $\psi$ such that $0<|\psi|<\pi/2$ the Casimir force {\it changes from attractive to repulsive} when the temperature decreases from a moderate value to zero for fixed system size, $N$.
\item[v)] When $\psi=0$ the force is attractive for {\it any} value of the scaling variable $x$.
\end{enumerate}
These 1d models have been studied analytically in the case of free (frequently termed ``open'' or Dirichlet) and periodic boundary conditions \cite{F64,J67,J67b,S68,PB2011}, but we are not aware of any investigation of them in the presence of boundary fields, which are responsible for the effects of interest in this article.
\subsection{The 1d XY model}
\label{sec:1dXY_model}
We consider a system with the Hamiltonian
\begin{equation}
\label{eq:def_1d_Ham}
{\cal H} = -J \sum _{i=1}^{N-1} {\mathbf S}_i.{\mathbf S}_{i+1}-{\mathbf H}_1.{\mathbf S}_1-{\mathbf H}_N.{\mathbf S}_N
\end{equation}
where ${\mathbf S}_i$, with ${\mathbf S}_i^2=1$ and ${\mathbf S}_i \in \mathbb{Z}^2$, $i=1,\cdots,N$, are $N$ spins arranged along a straight line. The Hamiltonian can be written in the form
\begin{eqnarray}
\label{eq:system_angles}
{\cal H} &=& -J \sum _{i=1}^{N-1} \cos \left(\varphi _{i+1}-\varphi _i\right)
\\&&
-H_1 \cos \left(\psi _1-\varphi _1\right)-H_N
\cos \left(\psi _N-\varphi _N\right), \nonumber
\end{eqnarray}
where the angles $\psi_1, \psi_2$ and $\varphi _1,\cdots,\varphi _N$ are measured with respect to the line of the chain which is taken to be, say, the x axis. The free energy $-\beta F_N$ of this system is given by
\begin{equation}
\label{eq:free_energy}
\exp \left(-\beta F_N\right)=\int_{0}^{2\pi}\exp \left(-\beta {\cal H}\right)\ \ \prod_{i=1}^{N} \frac{d\varphi_i}{2\pi}.
\end{equation}
Performing the requisite calculations (see Appendix \ref{app:XY}) one obtains
\begin{eqnarray}
\label{eq:free_energy_calculated}
\lefteqn{\exp \left(-\beta F_N\right)}\\&=&\sum _{k=-\infty }^{\infty } \exp \left(i k \psi\right) I_k\left(h_1\right) I_k(K){}^{N-1} I_k\left(h_N\right)\nonumber
\end{eqnarray}
where
\begin{equation}
\label{eq:def_parameters}
\psi \equiv (\psi_1-\psi_N), K\equiv \beta J, h_1\equiv \beta H_1, h_N\equiv\beta H_N.
\end{equation}
Note that the free energy depends only on the difference in angles, $(\psi_1-\psi_N)$, and not on $\psi_1$ and $\psi_N$ separately. For the Casimir force in the system, i.e., for the finite size part of the total force, see \eq{tot}, one then has the {\it exact} expression
\begin{widetext}
\begin{equation}
\label{FCas}
\beta F_{\text{Cas}}=
\frac{ 2\sum _{k=1}^{\infty }
\cos \left[k (\psi_1-\psi_2)\right] \log \left[\frac{I_k(K)}{I_0(K)}\right]\frac{I_k\left(h_1\right)}{I_0\left(h_1\right)} \left(\frac{I_k(K)}{I_0(K)}\right)^{N-1} \frac{I_k\left(h_N\right)}{I_0\left(h_N\right)}
}{1+2 \sum _{k=1}^{\infty } \cos \left[k (\psi_1-\psi_2)\right] \frac{I_k\left(h_1\right)}{I_0\left(h_1\right)} \left(\frac{I_k(K)}{I_0(K)}\right)^{N-1} \frac{I_k\left(h_N\right)}{I_0\left(h_N\right)}}.
\end{equation}
\end{widetext}
From here on we will be interested in the behavior of the system in the limit $\beta\gg 1$, i.e., when $T\to 0$. Obviously, when $\beta\gg 1$ from \eq{eq:def_parameters} one has $h_1\gg 1$, $h_N\gg 1$ and $K\gg 1$, which means that in \eq{eq:free_energy_calculated} one uses the large argument asymptote of $I_{k}(z)$ for $z\gg 1$. We will use the asymptote in the form reported in \cite{SP85}
\begin{equation}
\label{eq:as_form_SP}
I_\nu(z)= \frac{e^{z-\nu^2/2z}}{\sqrt{2\pi z}}\left[1+\frac{1}{8 z}+{\cal O}\left(\frac{\nu^2}{z^2}\right)\right].
\end{equation}
Retaining only the first term in the above expansion, one obtains
\begin{equation}
\label{eq:F_Cas_scaling}
\beta F_{\rm Cas}(x)=\frac{1}{N_{\rm eff}}X_{\rm Cas}(\psi,x,h_{\rm eff})
\end{equation}
where
\begin{equation}
\label{eq:F_Cas_scaling}
X_{\rm Cas}=-x\frac{ \sum _{k=1}^{\infty } k^2
\cos \left(k \psi\right) \exp
\left[-\frac{1}{2}
k^2\left(h_{\rm eff}^{-1}+x\right)\right]}{
1+2 \sum _{k=1}^{\infty } \cos \left(k \psi\right) \exp \left[-\frac{1}{2}
k^2\left(h_{\rm eff}^{-1}+x\right)\right]},
\end{equation}
and
\begin{equation}
\label{eq:scaling_variables}
x\equiv \frac{N_{\rm eff}}{K}, \qquad h_{\rm eff}^{-1}=h_1^{-1}+h_2^{-1}, \qquad N_{\rm eff}=N-1.
\end{equation}
Here, $x$ is the scaled version of the reduced temperature variable, which in systems with a non-zero transition temperature takes the form $x=t^{\nu}L$, with $t$ the reduced temperature $\propto T-T_c$, $L$ the characteristic size of the finite system and $\nu$ the correlation length exponent. Recall that with an effective transition temperature of $T=0$ and $K \propto 1/T$, the definition in (\ref{eq:scaling_variables}) is consistent with this definition under the assumption that $\nu=1$.
Obviously, when \eq{eq:constraint} is fulfilled
one has $x\gg h_{\rm eff}^{-1}$, and one can safely ignore $h_{\rm eff}$ in \eq{eq:F_Cas_scaling}. Then the behavior of the force is exactly as stated in \eq{eq:1dCas_gen}.
The representation of $X_{\rm Cas}$ given by \eq{eq:F_Cas_scaling} is convenient for all values of $x$ except in the limit $x \ll 1$. For that limit, using the Poisson identity \eq{eq:Poisson}, one obtains
\begin{eqnarray}
\label{eq:F_Cas_scaling_small_x}
\lefteqn{X_{\rm Cas}(\psi,x,h_{\rm eff})=-\frac{x}{2
\left(x+h_{\rm eff}^{-1}\right)}}\\
&&+\frac{x}{2\left(x+h_{\rm eff}^{-1}\right)^2}\frac{ \sum _{n=-\infty}^{\infty} \left(2 n \pi
+\psi\right)^2
\exp{\left[-\frac{\left(2 n \pi +\psi\right)^2}{2
\left(x+h_{\rm eff}^{-1}\right)}\right]}}{
\sum _{n=-\infty}^{\infty}
\exp{\left[-\frac{\left(2 n \pi +\psi\right)^2}{2
\left(x+h_{\rm eff}^{-1}\right)}\right]}}. \nonumber
\end{eqnarray}
Under the assumption that the constraint \eqref{eq:constraint} is fulfilled and given the asymptotic behavior of $X_{\rm Cas}$ from Eqs. \eqref{eq:F_Cas_scaling} and \eqref{eq:F_Cas_scaling_small_x}, we derive
\begin{equation}
\label{X_cas_ass}
X_{\rm Cas}(\psi,x)=\left\{ \begin{array}{ll}
-\frac{1}{2}+\frac{1}{2x} \psi^2+\cdots, & x\to 0+ \\
-x \cos(\psi) \exp(-x/2), & x \gg 1.
\end{array}
\right.
\end{equation}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_Cas.pdf}
\caption{(Color online) The scaling function $X_{\rm Cas}$ of the XY model as a function of the scaling variable $x$, see \eq{eq:scaling_variables}, for different values of the phase change $\psi$.}
\label{Fig:1d_X_Cas}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_Cas_Surface_compressed.pdf}
\caption{(Color online) The surface of the scaling function $X_{\rm Cas}(\psi,x)$ of the XY model as a function of the scaling variables $x$ and $\psi$. The horizontal plane marks the $X_{\rm Cas}=0$ value.}
\label{Fig:1d_X_Cas_Surface}
\end{figure}
From \eq{eq:F_Cas_scaling_small_x} one can also derive an expression for the low $T$ behavior of the system that retains the dependence on $H_1$ and $H_2$. The result is
\begin{eqnarray}
\label{low_T_behavior}
\beta F_{\rm Cas}&=&-\frac{1}{2}\frac{1}{
\left(J/H_1 + J/H_N+N-1\right)}\nonumber \\ && +\frac{1}{2}K\frac{\left(\psi _1-\psi
_N\right)^2 }{
\left(J/H_1+J/H_N+N-1\right)^2}.
\end{eqnarray}
This result can be also directly derived by realizing that the ground state of the system is a spin wave such that the end spins are twisted with respect to each other at angle $\psi=\psi_1-\psi_N$.
Equations \eqref{eq:F_Cas_scaling}, \eqref{eq:F_Cas_scaling_small_x}, \eqref{X_cas_ass} and \eqref{low_T_behavior} confirm the validity of the statements i)-iv) in the first part of this section. For example, \eq{eq:F_Cas_scaling} demonstrates that when $\psi=0$ the force is attractive for {\it any} value of the scaling variable $x$; \eq{X_cas_ass} then confirms this behavior for small and large values of the scaling variable $x$.
The behavior of the scaling function $X_{\rm Cas}(\psi,x)$ for different values of $\psi$ as a function of the scaling variable $x$ is shown in Fig. \ref{Fig:1d_X_Cas}. Fig. \ref{Fig:1d_X_Cas_Surface} shows a $3D$ plot of this function for $x\in[0,10]$ and $\psi\in[-\pi,\pi]$.
\subsection{The 1d Heisenberg model}
\label{sec:1dH_model}
The Hamiltonian of the system is again given by \eq{eq:def_1d_Ham} with the conditions that now the $N$ spins ${\mathbf S}_i$, $i=1,\cdots N$, again arranged along a straight line, are three-dimensional vectors ${\mathbf S}_i \in \mathbb{Z}^3$, $i=1,\cdots,N$.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_H_Cas.pdf}
\caption{(Color online) The scaling function $X_{\rm Cas}$ of the Heisenberg model as a function of the scaling variable $x$, see \eq{eq:def_FCas_Heis_scaling}, for different values of the phase change $\psi$.}
\label{Fig:1d_X_H_Cas}
\end{figure}
As shown in Appendix \ref{app:Heisenberg} the free energy of the system is given by the {\it exact} expression
\begin{widetext}
\begin{eqnarray}
\label{eq:free_energy_calcul_final_result}
\exp \left(-\beta F_N\right) &=& \left(\frac{\pi}{2K}\right)^{(N-1)/2}\frac{\pi}{2\sqrt{h_1h_N}} \sum_{n=0}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) I_{n+1/2}(h_1) I_{n+1/2}(h_N) \left[I_{n+1/2}(K)\right]^{N-1}\\
&=& \frac{\sinh h_1}{h_1}\frac{\sinh h_N}{h_N}\left[\frac{\sinh K}{K}\right]^{N-1} \left\{1+ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)} \right]^{N-1}\right\}, \nonumber
\end{eqnarray}
\end{widetext}
where $\psi_h$ is the angle between the vectors ${\mathbf H}_1$ and ${\mathbf H}_N$ and we have used that $I_{1/2}(x)=\sqrt{2/(\pi x)}\sinh(x)$. Here $I_{n+1/2}(z)$ is the modified Bessel function of the first kind of half-integer index, $P_n(x)$ is the Legendre polynomial of degree $n$ and $K$, $h_1$ and $h_N$ are defined in accord with \eq{eq:def_parameters_Heis}.
\begin{equation}
\label{eq:def_parameters_Heis}
K\equiv \beta J, h_1\equiv \beta H_1, h_N\equiv\beta H_N.
\end{equation}
When $h_1\to 0$ and $h_N\to 0$ the system considered becomes the one with Dirichlet boundary conditions, a case that was studied by M. E. Fisher in \cite{F64}. Taking into account that $I_{n+1/2}(x)=[2^{n+1/2}\Gamma(n+3/2)]^{-1}x^{n+1/2}+{\cal O}(x^{5/2+n})$ and that $P_0(x)=1$, one concludes that only the term with $n=0$ will contribute to the free energy in this case. One obtains
\begin{eqnarray}
\label{eq:free_energy_calcul_final_result_Dirichlet_bc}
\exp \left(-\beta F_N\right) &=& \left(\frac{\pi}{2K}\right)^{(N-1)/2}\left[I_{1/2}(K)\right]^{N-1}\\
&=&\left[\frac{\sinh K}{K}\right]^{N-1}. \nonumber
\end{eqnarray}
The last expression is precisely the result derived in \cite{F64}.
From \eq{eq:free_energy_calcul_final_result} one can easily derive the corresponding {\it exact} expression for the Casimir force for the one dimensional Heisenberg model. One has
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis}
\beta F_{\text{Cas}}=\frac{ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \ln\left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)}\right] \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)} \right]^{N-1}}{1+ \sum_{n=1}^{\infty} (2n+1)P_n \left(\cos\psi_h\right) \frac{I_{n+1/2}(h_1)}{I_{1/2}(h_1)} \frac{I_{n+1/2}(h_N)}{I_{1/2}(h_N)} \left[\frac{I_{n+1/2}(K)}{I_{1/2}(K)}\right]^{N-1}}.
\end{equation}
\end{widetext}
In the limit $T\to 0$ when $h_1\gg 1$, $h_N\gg 1$ and $K\gg 1$ from \eq{eq:as_form_SP} one obtains
\begin{equation}
\label{eq:F_Cas_scaling_Heis}
\beta F_{\rm Cas}(x)=\frac{1}{N_{\rm eff}}X_{\rm Cas}(\psi_h,x,h_{\rm eff})
\end{equation}
where the scaling variable $x$, as well as $h_{\rm eff}$, are as defined in \eq{eq:scaling_variables} while the scaling function $X_{\rm Cas}$ is
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis_scaling}
X_{\rm Cas}(\psi_h,x,h_{\rm eff})=-\frac{1}{2}x\frac{ \sum_{n=1}^{\infty} n(n+1)(2n+1)P_n \left(\cos\psi_h\right) \exp\left[-\frac{1}{2} n(n+1)\left(x+h_{\rm eff}^{-1}\right)\right]}{1+ \sum_{n=1}^{\infty}(2n+1)P_n \left(\cos\psi_h\right) \exp\left[-\frac{1}{2} n(n+1)\left(x+h_{\rm eff}^{-1}\right)\right]}.
\end{equation}
\end{widetext}
As in the case of the $XY$ model, when \eq{eq:constraint} is fulfilled one can ignore $h_{\rm eff}$ in the above expression. If not stated otherwise we will always suppose this to be the case. Then the scaling function $X_{\rm Cas}$ depends only on the scaling variable $x$ and the angle $\psi_h$ that parametrizes the boundary conditions on the system, exactly as set forth in \eq{eq:1dCas_gen}. The representation of $X_{\rm Cas}$ given by \eq{eq:def_FCas_Heis_scaling} is applicable for all values of $x$ except in the limit $x \ll 1$. Keeping in mind that $P_1(\cos \psi_h)=\cos\psi_h$, and in light of the fast decay off the terms in the sums in \eq{eq:def_FCas_Heis_ass}, it is clear that for those very small values of $x$ the sign of the force will be determined by the sign of $\cos \psi_h$. For the leading behavior of the Casimir force when $x\ll 1$ one obtains
\begin{widetext}
\begin{equation}
\label{eq:def_FCas_Heis_ass}
X_{\rm Cas}(\psi_h,x,h_{\rm eff})=-1+
\frac{h_{\rm eff}^{-1}}{h_{\rm eff}^{-1}+x}
+
\frac{x (1-\cos\psi_h)}{\left(h_{\rm eff}^{-1}+x\right)^2}
+x\frac{
\coth
\left(\frac{1}{h_{\rm eff}^{-1}+x}\right)
-1}{\left(h_{\rm eff}^{-1}+x\right)^2},
\end{equation}
\end{widetext}
which follows from \eq{smallx_Heis}.
One can also derive the first three terms in that expansion by considering the $N$ dependence of the ground energy of the 1d Heisenberg model, assuming it to be in the form of a spin wave. Explicitly, for the behavior of the Casimir force for $T\to 0$ from \eq{eq:def_FCas_Heis_ass} one obtains
\begin{eqnarray}
\label{low_T_behavior_Heis}
\beta F_{\rm Cas}&=&-\frac{1}{
\left(J/H_1 + J/H_N+N-1\right)}\nonumber \\ && +K \frac{1-\cos\psi_h}{
\left(J/H_1+J/H_N+N-1\right)^2}.
\end{eqnarray}
The behavior of the scaling function $X_{\rm Cas}(\psi,x)$ for different values of $\psi$ as a function of the scaling variable $x$ is shown in Fig. \ref{Fig:1d_X_H_Cas} while Fig. \ref{Fig:1d_X_H_Cas_Surface} shows a $3D$ plot of this function for $x\in[0,10]$ and $\psi\in[-\pi,\pi]$.
Thus, for the overall behavior of the Casimir force as a function of $\psi_h$ one arrives at the same set of conclusions for the Heisenberg model as for the $XY$ model as a function of $\psi$, as summarized in statements i)-v).
\begin{figure}[h]
\includegraphics[width=\columnwidth]{1d_X_H_Cas_Surface_compressed.pdf}
\caption{(Color online) The surface of the scaling function $X_{\rm Cas}(\psi,x)$ of the Heisenberg model as a function of the scaling variables $x$ and $\psi$. The horizontal plane marks the $X_{\rm Cas}=0$ value.}
\label{Fig:1d_X_H_Cas_Surface}
\end{figure}
\section{The 3d Gaussian model} \label{sec:Gaussian}
Here, we focus on a system with scalar spins. This means that, strictly speaking, there is no helicity. However, the surface fields that influence the order parameter will have sinusoidal variation along the film boundaries, conforming to the behavior of the individual components of a field that induces helical order in a multi-component system. We therefore expect that the results to be derived and discussed in this section will be germane to corresponding behavior in such a system. We consider a planar discrete system containing $L$ two-dimensional layers with a Hamiltonian
\begin{widetext}
\begin{eqnarray}
\label{eq:def_Ham_GM}
-\beta \mathcal{H}&=&\sum _{x=1}^M \sum _{y=1}^N \Bigg\{K^\| \sum _{z=1}^L S_{x,y,z} \left(S_{x+1,y,z}+S_{x,y+1,z}\right)+K^\perp\sum _{z=1}^{L-1}
S_{x,y,z} S_{x,y,z+1}+h_1 S_{x,y,1} \cos \left(k_x x+k_y y\right) \nonumber\\
&& +h_L S_{x,y,L} \cos \left(k_x \left(x+\Delta _x\right)+k_y \left(y+\Delta
_y\right)\right)-s \sum_{z=1}^L S_{x,y,z}^2\Bigg\}
\end{eqnarray}
\end{widetext}
which describes a system with short-ranged nearest neighbor interactions possessing chemically modulated bounding surfaces situated at $z=1$ and $z=L$. Here $h_1=\beta H_1$ and $h_L=\beta H_L$ are the external fields acting only on the boundaries of the system. In the specific example considered the modulation depends on the coordinates $x$ and $y$ in a wave-like way specified by the applied surface fields $h_1\cos\left(k_x x+k_y y\right)\equiv h_1 \cos({\bf k}.{\bf r})$ and $h_L \cos [k_x \left(x+\Delta_x\right)+k_y \left(y+\Delta_y\right)]\equiv h_L\cos({\bf k}.({\bf r}+{\bf \Delta}))$, the phases of which are thus shifted with respect to each other by $\Delta_x$ in $x$ direction and by $\Delta_y$ in $y$ direction. Here ${\bf r}=(x,y)$, ${\bf k}=(k_x,k_y)$ and ${\bf \Delta}=(\Delta_x,\Delta_y)$. Periodic boundary conditions are applied along the $x$ and $y$ axes, while missing neighbor (Dirichlet) boundary conditions are imposed in the $z$ direction. These boundary conditions are expressed as follows:
\begin{equation}
\label{bc_def_per}
S_{1,y,z}=S_{M+1,y,z}, \qquad S_{x,1,z}=S_{x,N+1,z}
\end{equation}
and
\begin{equation}
\label{bc_Dirichlet}
S_{x,y,0}=0 \qquad \text{and} \qquad S_{x,y,L+1}=0.
\end{equation}
Given those the boundary conditions, the Hamiltonian in Eq. (\ref{eq:def_Ham_GM}) can be rewritten in the form
\begin{eqnarray}
\label{eq:def_Ham_GM_final}
-\beta \mathcal{H}&=&\sum _{x=1}^M \sum _{y=1}^N \sum _{z=1}^L S_{x,y,z} \Bigg\{K^\| \left(S_{x+1,y,z}+S_{x,y+1,z}\right) \nonumber \\ && + K^\perp
S_{x,y,z+1} +\delta_{1,z} h_1 \cos \left[{\bf k}.{\bf r} \right] \nonumber\\
&& + \delta_{L,z} h_L \cos \left[{\bf k}.({\bf r}+{\bf \Delta}) \right]-s\; S_{x,y,z}\Bigg\}.
\end{eqnarray}
Since we will be considering the limit $M,N\to\infty$ we can always take the wave vector components $k_x$ and $k_y$ to coincide with $(2\pi p)/M $ and $(2\pi q)/N$ for some $p=1,\cdots, M$ and $q=1,\cdots,N$, respectively.
In Eqs. \eqref{eq:def_Ham_GM} and \eqref{eq:def_Ham_GM_final} one has
\begin{equation}
\label{eq:inte}
K^{\|}=\beta J^{\|}, \qquad \mbox{and} \qquad K^{\perp}=\beta J^{\perp},
\end{equation}
where $J^{\|}$ and $J^{\perp}$ are the strengths of the coupling constants along and perpendicular to the $L$ layers of the system. The parameter $s>0$ on the right hand side of \eqref{eq:def_Ham_GM_final} is subjected to the constraint that it has a value that ensures the existence of the partition function of the system. It is easy to check that $2K^{\|}+K^{\perp}-s \equiv \beta (2J^{\|}+J^{\perp})-s=0$ determines the critical temperature $\beta_c$ of the bulk model, i.e., one has
\begin{equation}
\beta_c=s/(2J^{\|}+J^{\perp}).
\label{betac}
\end{equation}
For the model defined above the Casimir force acting on the bounding planes at $z=1$ and $z=L$ has both orthogonal, $\beta F^{(\perp)}_{\rm Cas}$, and lateral, $\beta F^{(\|,\alpha)}_{\rm Cas}$, $\alpha=x$ or $\alpha=y$, components, which can be written in the form
\begin{equation}
\label{eq:gen_force}
\beta F^{(\cdots)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(\cdots)}_{\rm Cas}(x_t,x_k,x_1,x_L),
\end{equation}
where $(\cdots)$ stands for either $(\perp)$ or $(\|,\alpha)$, with $\alpha=x$ or $\alpha=y$. Here
\begin{equation}
\label{eq:field_scaling_def}
x_{1}=\sqrt{L K^\|}\frac{h_1}{K^\perp}, \qquad x_{L}=\sqrt{L K^\|}\frac{h_L}{K^\perp},
\end{equation}
are the field-dependent scaling variables, $x_t$ is the temperature-dependent one with
\begin{equation}
\label{eq:xt_and_xk}
x_t=L\sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]}, \qquad x_k=\sqrt{\dfrac{J^{\|}}{J^{\perp}}}\; L k,
\end{equation}
with $k=\sqrt{k_x^2+k_y^2}$ is the scaling variable related to the surface modulation. When $h_1={\cal O}(1)$ and $h_L={\cal O}(1)$ we will see that $F^{(\cdots)}_{\rm Cas}$ has a {\it field dependent contribution} which, in this regime, will provide the {\it leading} contribution to the force of the order of $L^{-2}$.
The Hamiltonian (\ref{eq:def_Ham_GM_final}) can be easily diagonalized in a standard way---see Appendix \ref{A:GM}. The resulting free energy of the system, $F$, is
\begin{equation}
\label{eq:fe_short}
F=\Delta F_0+\Delta F_h,
\end{equation}
where
\begin{eqnarray}
\label{eq:free_energy_GM}
\lefteqn{-\beta \Delta F_0 = \frac{1}{2} M N L \ln\pi} \\
&& -\frac{1}{2}\sum _{l=1}^L \sum _{m=1}^M
\sum _{n=1}^N \ln\left\{
s-K^{\|}\left[\cos \left(\frac{2 \pi
m}{M}\right)+\cos \left(\frac{2 \pi
n}{N}\right)\right]\nonumber \right. \\
&&\left.-K^{\perp} \cos\left(\frac{\pi
l}{L+1}\right)\right\} \nonumber
\end{eqnarray}
is the field independent part of the free energy and $\Delta F_h$, the field dependent contribution, is
i) when either $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:free_energy_h}
\lefteqn{-\beta \Delta F_h = \frac{MN}{8(L+1)} \times} \\
&& \sum _{l=1}^L \frac{\sin^2\left(\frac{\pi l}{L+1}\right)\left[h_1^2+h_L^2-2 h_L h_1
(-1)^l \cos(\mathbf{k.\Delta})\right]}{s-K^{\|}\left[\cos \left(\frac{2 \pi p
}{M} \right)+\cos \left(\frac{2 \pi q
}{N} \right)\right]-K^{\perp} \cos\left(\frac{\pi l}{L+1}\right)}, \nonumber
\end{eqnarray}
where $\mathbf{k}=(k_x=2 \pi p /{M},k_y=2 \pi q /{N})$, and $\mathbf{\Delta}=(\Delta_x,\Delta_y)$, and
ii) when $p=M$ and $q=N$:
\begin{eqnarray}
\label{eq:free_energy_h_cf}
\lefteqn{-\beta \Delta F_h = \frac{MN}{2(L+1)} \times} \\
&& \sum _{l=1}^L \frac{\sin^2\left(\frac{\pi l}{L+1}\right)\left[h_1-h_L
(-1)^l \cos \left(2 \pi (\Delta
_x+\Delta
_y)\right)\right]^2}{s-2K^{\|}-K^{\perp} \cos\left(\frac{\pi l}{L+1}\right)}. \nonumber
\end{eqnarray}
Note that there is a fundamental difference between the sub-cases in Eqs. \eqref{eq:free_energy_h} and \eqref{eq:free_energy_h_cf}; while in the first sub-case $i)$ the average field applied on the surfaces is zero when specially averaged, in the second sub-case $ii)$ it is a constant. In the last sub-case one can think of $h_L$ as a constant field acting on the second surface being twisted in direction with respect to the constant field $h_1$ applied to the first one with a twist governed by $\Delta_x$ and $\Delta_y$.
Obviously
\begin{eqnarray}
\lefteqn{s - K^{\|}\left[\cos \left(\frac{2 \pi
m}{M}\right)+\cos \left(\frac{2 \pi
n}{N}\right)\right] -
K^{\perp} \cos\left(\frac{\pi
k}{L+1}\right)} \nonumber\\
&&=\left(\beta_c/\beta-1\right)\left[ 2K^{\|}+K^\perp\right] +K^{\perp} \left[1- \cos\left(\frac{\pi
k}{L+1}\right)\right]\nonumber \\
&&+K^{\|}\left[2-\cos \left(\frac{2 \pi
m}{M}\right)-\cos \left(\frac{2 \pi
n}{N}\right)\right] >0
\end{eqnarray}
for $\beta<\beta_c$. The above implies that the statistical sum of the infinite system exists for all $\beta<\beta_c$. The statistical sum of the finite system exists, however, under the less demanding constraint that
\begin{equation}
\label{eq:T_finite}
\left(\beta_c/\beta-1\right)\left[ 2J^{\|}+J^\perp\right] +J^{\perp} \left[1- \cos\left(\frac{\pi
}{L+1}\right)\right]>0.
\end{equation}
In the remainder we will assume that the constraint given by \eq{eq:T_finite} is fulfilled for all temperatures considered here.
For the contribution of the field-independent term to the transverse Casimir force
\begin{equation}
\label{eq:_def_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=-\dfrac{\partial}{\partial L}(\beta\Delta f_0),
\end{equation}
with
\begin{equation}
\label{eq:f_0}
\Delta f_0=\lim_{M, N \to \infty}
\dfrac{\Delta F_0}{MN},
\end{equation}
it is demonstrated in Appendix \ref{A:GM} that
\begin{equation}
\label{eq:Cas_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=-\frac{1}{2}\int _{-\pi }^{\pi
}\int _{-\pi }^{\pi }\delta
\left[\coth ((1+L) \delta)-1\right] \frac{d\theta
_1d\theta _2}{ (2 \pi )^2},
\end{equation}
where $\delta=\delta\left(\theta_1,\theta_2|\beta_c/\beta,J^\|/J^\perp\right)$ is given by the expression
\begin{eqnarray}
\label{eq:def_delta}
\cosh\delta &=& 1+\left(\frac{\beta_c}{\beta}-1\right)\left(1+ 2\frac{J^{\|}}{J^\perp}\right)\\
&& +\frac{J^\|}{J^\perp}\left(2-\cos \theta_1-\cos \theta_2\right). \nonumber
\end{eqnarray}
The result in \eq{eq:Cas_no_field} is an {\it exact} expression for $\beta\Delta F^{(0,\perp)}_{\rm Cas}$; no approximations have been made. Since $\coth(x)>1$ for $x>0$ one immediately concludes that $\Delta F^{(0,\perp)}_{\rm Cas}<0$, i.e., it is an {\it attractive} force, for {\it all} values of $L$. In order to obtain scaling and, thus, the scaling form of $\Delta F^{(0,\perp)}_{\rm Cas}$ we have to consider the regime $L\gg 1$.
Obviously, then Casimir force will be exponentially small if $\delta$ is finite. For the scaling behavior of the force---see Appendix \ref{A:GM}---one obtains
\begin{equation}
\label{eq:F_Cas_no_field}
\beta\Delta F^{(0,\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(0,\perp)}_{\rm Cas}(x_t)
\end{equation}
where $X^{(0,\perp)}_{\rm Cas}(x_t)$ is the universal scaling function
\begin{eqnarray}
\label{eq:X_Cas_no_field_sf}
X^{(0,\perp)}_{\rm Cas}(x_t)&=&-\frac{1}{8 \pi}\Bigg\{
\text{Li}_3\left(e^{-2
x_t}\right)+2 x_t
\text{Li}_2\left(e^{-2
x_t}\right) \nonumber \\
&& -2 x_t^2 \ln \left(1-e^{-2
x_t}\right)\Bigg\}.
\end{eqnarray}
and the scaling variable $x_t$ is
\begin{equation}
\label{eq:delta_xt}
x_t=L \sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left(1+ 2\frac{J^{\|}}{J^\perp}\right)},
\end{equation}
in accord with \eq{delta_xt}. It is easy to show that $X^{(0,\perp)}_{\rm Cas}(x_t)$ is a {\it monotonically increasing} function of $x_t$. The behavior of $X^{(0,\perp)}_{\rm Cas}(x_t)$ is visualized in Fig.
\ref{Fig:3d_G_X_Cas_Zero_Field}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3d_G_X_Cas_Zero_Field.pdf}
\caption{(Color online) The scaling function $X^{(0,\perp)}_{\rm Cas}(x_t)$ as a function of the temperature dependent scaling variable $x_t$ The horizontal line marks the Casimir amplitude $X^{(0,\perp)}_{\rm Cas}(0)=-\zeta(3)/(8\pi)$.}
\label{Fig:3d_G_X_Cas_Zero_Field}
\end{figure}
At the critical point one has $x_t=0$ and then one immediately obtains the well known Casimir amplitude for the Gaussian model under Dirichlet boundary condition
\begin{equation}
\label{eq:F_Cas_no_field_ampl}
X^{(0,\perp)}_{\rm Cas}(x_t=0)=-\frac{\zeta(3)}{8 \pi}.
\end{equation}
It is easy to show that
\begin{equation}
\label{eq:asX0_Cas}
X^{(0,\perp)}_{\rm Cas}\simeq
\left\{\begin{array}{lcr}
-\frac{1}{8\pi} \exp(-2 x_t) \left[1+2 x_t
\left(1+x_t\right)\right],& x_t\gg 1&\\
&& \\
-\frac{1}{8 \pi
}\zeta (3)+\frac{1}{48 \pi} x_t^2 \left(6-4
x_t+x_t^2\right),& x_t\to 0.
\end{array} \right.
\end{equation}
For the field component of the transverse Casimir force
\begin{equation}
\label{eq:_def_field}
\beta\Delta F^{(h,\perp)}_{\rm Cas}=-\dfrac{\partial}{\partial L}(\beta\Delta f_h)
\end{equation}
where
\begin{equation}
\label{eq:f_h}
\Delta f_h=\lim_{M, N \to \infty}
\dfrac{\Delta F_h}{MN}
\end{equation}
one derives, see Eqs. \eqref{eq:delta_fh_final_pq} and \eqref{eq:delta_fh_final} in Appendix \ref{A:GM}:
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:Casimir_tr_pq}
\lefteqn{\beta \Delta F^{(h,\perp)}_{\rm Cas}=\frac{\lambda\sinh (\lambda)}{32 K^{\perp}}}\\
&&\times\left\{ \left[h_1^2+h_L^2-2 h_L h_1
\cos(\mathbf{k.\Delta})\right]^2 \text{csch}^2\left[\frac{1+L}{2} \lambda\right] \right.\nonumber\\
&&\left.-\left[h_1^2+h_L^2+2 h_L h_1
\cos(\mathbf{k.\Delta})\right]^2 \text{sech}^2\left[\frac{1+L}{2}
\lambda\right]\right\}. \nonumber
\end{eqnarray}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:Casimir_tr}
\lefteqn{ \beta \Delta F^{(h,\perp)}_{\rm Cas}=\frac{\lambda\sinh (\lambda)}{32 K^{\perp}} }\\
&&\times\left\{ \left[h_1-h_L
\cos 2 \pi ( \Delta
_x+ \Delta
_y)\right]^2 \text{csch}^2\left[\frac{1+L}{2} \lambda\right] \right.\nonumber\\
&&\left.-\left[h_1+h_L
\cos 2 \pi ( \Delta
_x+ \Delta
_y)\right]^2 \text{sech}^2\left[\frac{1+L}{2}
\lambda\right]\right\}. \nonumber
\end{eqnarray}
Here we have introduced the helpful notation
\begin{equation}
\label{eq:x_def_1}
\cosh \lambda=\Lambda
\end{equation}
for the case when $\Lambda\ge 1$ and
\begin{equation}
\label{eq:x_def_2}
\cos \lambda=\Lambda
\end{equation}
in the opposite case when $\Lambda\le 1$. Note that
\begin{itemize}
\item
when $h_1={\cal O}(1)$, $h_L={\cal O}(1)$ and
\begin{equation}
\label{eq:w_def}
w=L\lambda/2
\end{equation}
is such that $w={\cal O}(1)$, {\it the Casimir force is of the order of }${\cal O}(L^{-2})$ { despite} the fact that the system is at a temperature {\it above} the bulk critical one.
\item If $h_1$ and $h_L$ are such that the field-dependent scaling variables $x_1={\cal O}(1)$ and $x_L={\cal O}(1)$, see \eq{eq:field_scaling_def}, then,
in terms of $w$, the Casimir force $\beta\Delta F^{(h,\perp)}_{\rm Cas}$ reads
\begin{equation}
\label{eq:sc_funct_field}
\beta\Delta F^{(h,\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)
\end{equation}
where the scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ is
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{eqnarray}
\label{eq:scaling_h_pq}
\lefteqn{X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)=\frac{1}{8} w^2}\\
&& \times \left\{[x_1^2+x_L^2-2x_1x_L \cos\left(\mathbf{k.\Delta}\right)] \text{csch}^2 w \right. \nonumber \\
&&\left. - [x_1^2+x_L^2+2x_1 x_L \cos\left(\mathbf{k.\Delta}\right)] \text{sech}^2 w \right\}, \nonumber
\end{eqnarray}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:scaling_h}
\lefteqn{X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L) = \frac{1}{8} w^2}\\
&& \times \left\{[x_1-x_L \cos 2 \pi ( \Delta
_x+ \Delta
_y)]^2 \text{csch}^2 w \right. \nonumber \\
&&\left. - [x_1+x_L \cos 2 \pi ( \Delta
_x+ \Delta
_y)]^2 \text{sech}^2 w \right\}. \nonumber
\end{eqnarray}
The latter expression implies that in the regime considered here the field-dependent part of the force if of order of $L^{-3}$, as it is the field-independent part of it.
\end{itemize}
The asymptotic behavior of $\Delta F^{(h,\perp)}_{\rm Cas}$ for $w\gg 1$ can be easily obtained from Eqs. \eqref{eq:large_x_pq} and \eqref{eq:large_x}. The result is
\begin{eqnarray}
\label{eq:scaling_h_as}
\lefteqn{\beta\Delta F^{(h,\perp)}_{\rm Cas}\simeq -\dfrac{2w^2}{K^{\perp}L^2} e^{-2w}h_1 h_L} \nonumber \\
&&\times \left\{
\begin{array}{ll}
\cos\left(\mathbf{k.\Delta}\right), & p\ne M \quad \mbox{or} \quad q\ne N,
\\
\cos 2 \pi (\Delta_x+ \Delta_y), & p=M, q=N.
\end{array} \right.
\end{eqnarray}
which implies that in this limit the transverse component of the force is exponentially small in $L$ and attractive {\it or } repulsive depending on the product $h_1 h_L \cos[\mathbf{k.\Delta}]$ or $h_1 h_L \cos 2 \pi (\Delta_x+ \Delta_y)$.
For the field contribution to the longitudinal component of the Casimir force along the $\alpha$ axis, where $\alpha=x,y$, one has
\begin{equation}
\label{eq:_def_field_x}
\beta \Delta F^{(h,\alpha)}_{\rm Cas}(L)=-\dfrac{\partial}{\partial \Delta_\alpha}\Delta f_h.
\end{equation}
Thus, from Eqs. \eqref{eq:delta_fh_final_pq} and
\eqref{eq:delta_fh_final}
one derives
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:Casimir_longit_pq}
\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L)=-\frac{h_1 h_L }{4 K^{\perp}} k_\alpha \sin (\mathbf{k.\Delta}) \frac{\sinh(\lambda)}{\sinh[\lambda(L+1)]}
\end{equation}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:Casimir_longit}
\lefteqn{\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) = -\frac{ \pi \sin [2 \pi (\Delta_x+ \Delta_y)]}{2 K^{\perp}} h_L} \\
&& \times \Bigg\{ h_1 \frac{\sinh(\lambda)}{\sinh[(L+1)\lambda]} \nonumber\\
&& + h_L \cos[2 \pi (\Delta_x+ \Delta_y)] \left[\Lambda-\frac{\sinh(\lambda)}{\tanh (L+1)\lambda}\right]\Bigg\}. \nonumber
\end{eqnarray}
When $L\lambda\gg 1$ the above simplifies to
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:scaling_h_as_x_pq}
\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) \simeq -\dfrac{k_\alpha}{2 K^{\perp}} \sinh[\lambda] e^{-(L+1)\lambda} h_1 h_L \sin\left(\mathbf{k.\Delta}\right)
\end{equation}
and
{\it ii)} if $p=M$ and $q=N$
\begin{eqnarray}
\label{eq:scaling_h_as_x}
\lefteqn{\beta\Delta F^{(h,\alpha)}_{\rm Cas}(L) \simeq } \\
&& -\frac{ \pi h_L^2}{4 K^{\perp}} \sin [4 \pi (\Delta_x+ \Delta_y)] \left\lbrace \Lambda-\sinh[\lambda]\right\rbrace \nonumber \\
&& -\dfrac{\pi}{K^{\perp}} \sinh[\lambda] e^{-(L+1)\lambda} h_1 h_L \sin [2 \pi (\Delta_x+ \Delta_y)]. \nonumber
\end{eqnarray}
Note that in the first sub-case the $L\gg 1$ limit of the lateral force is zero, in the second sub-case, when the average value of the external field on the upper surface is not zero the lateral force tends to a finite, well defined limit which is proportional to the surface area of the system. Obviously, this force has the meaning of a local purely surface force.
Subtracting from $\Delta F^{(h,\alpha)}_{\rm Cas}$ its $L$-independent part we obtain the lateral force that will act on the upper surface due to the presence of the lower one if we act in lateral direction on the upper one. In the case $p=M$ and $q=N$ one obtains
\begin{eqnarray}
\label{eq:F_long}
\lefteqn{\beta\delta F^{(h,\alpha)}_{\rm Cas}(L) \equiv \beta\left[\Delta F^{(h,\alpha)}_{\rm Cas}(L)-\lim_{L\to\infty}\Delta F^{(h,\alpha)}_{\rm Cas}(L)\right]}\nonumber\\
&& =-\frac{ \pi h_L }{2 K^{\perp}} \sin [2 \pi (\Delta_x+ \Delta_y)] \sinh(\lambda) \Bigg\{ h_1/ \sinh[(L+1)\lambda] \nonumber \\
&& + h_L \cos[2 \pi (\Delta_x+ \Delta_y)] [1-\coth (L+1)\lambda]\Bigg\}.
\end{eqnarray}
In the other sub-case when $p\ne M$ or $q\ne N$ one has that $\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)\equiv \beta\Delta F^{(h,\alpha)}_{\rm Cas}(L)$.
In scaling variables for $\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)$ one has
\begin{equation}
\label{eq:sc_funct_field_long}
\beta\delta F^{(h,\alpha)}_{\rm Cas}(L)=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(h,\alpha)}_{\rm Cas}(w,x_1,x_L),
\end{equation}
where
{\it i)} if $p\ne M$ or $q\ne N$:
\begin{equation}
\label{eq:Casimir_longit_pq_scaling}
X^{(h,\alpha)}_{\rm Cas}=-\pi x_1 x_L \, p_\alpha \sin (\mathbf{k.\Delta}) \frac{\omega}{\sinh[2\omega]},
\end{equation}
where $p_\alpha=p$ for $\alpha=x$, and $p_\alpha=q$ for $\alpha=y$.
{\it ii)} if $p=M$ and $q=N$:
\begin{eqnarray}
\label{eq:F_long_scaling}
\lefteqn{X^{(h,\alpha)}_{\rm Cas}=-\pi x_L \omega \sin [2 \pi (\Delta_x+ \Delta_y)] } \\
&& \times \Bigg\{ x_1/ \sinh[2\omega] + x_L \cos[2 \pi (\Delta_x+ \Delta_y)] [1-\coth 2\omega]\Bigg\}. \nonumber
\end{eqnarray}
\eq{eq:sc_funct_field_long} implies that in the scaling regime the longitudinal Casimir force is of the same order of magnitude as the orthogonal component of the force.
Let us now clarify the physical meaning of the regimes $\omega={\cal O}(1)$ and $\omega\gg 1$ in terms of the temperature $T$. Taking into account \eq{eq:Lambda_def} one has
\begin{equation}
\label{eq:Lambda_def_delta}
\Lambda = 1+\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]+2\dfrac{J^{\|}}{J^{\perp}}\left[\sin^2 \frac{k_x}{2} +\sin^2 \frac{k_y}{2}\right],
\end{equation}
where $k_x=2 \pi p/{M}$, $k_y=2 \pi q/{N}$, as well as all the other terms in the sum determining $\Lambda$ are dimensionless. We again have to consider two sub-cases:
{\it i)} if $p\ne M$ or $q\ne N$.
In this case, in order to have $\lambda$ small, one needs to have $\beta/\beta_c\to 1$, and $k_\alpha\to 0$, $\alpha=x,y$. Under this conditions one has
\begin{equation}
\label{eq:lambda_eq}
\lambda \simeq \sqrt{2\left(\frac{\beta_c}{\beta}-1\right)\left[ 2\frac{J^{\|}}{J^\perp}+1\right]+\dfrac{J^{\|}}{J^{\perp}}\left[k_x^2 +k_y^2\right]}.
\end{equation}
Then
\begin{equation}
\label{eq:omega_eq}
\omega=\frac{1}{2}\sqrt{x_t^2+x_k^2},
\end{equation}
where $x_t$ and $x_k$ are defined in \eq{eq:xt_and_xk}. From \eq{eq:omega_eq} it is clear that in order to have $\omega = {\cal O}(1)$ one needs to have simultaneously $x_t={\cal O}(1)$ and $x_k={\cal O}(1)$. Taking into account that $\nu=1/2$ for the Gaussian model, one has that $x_t^2$ is in its expected form $a_t t L^{1/\nu}$, with $t=(T-T_c)/T_c$. The condition $x_k={\cal O}(1)$ implies that in order to encounter the regime $\omega = {\cal O}(1)$ one needs to have a modulation with a wave vector $k\lesssim L^{-1}$ which includes, e.g., the $k=0$ case. If $x_k\gg 1$ one will have, even at the critical point $\beta=\beta_c$ that $\omega\gg 1$ and, according to \eq{eq:scaling_h_as}, that the field contributions into the Casimir force will be exponentially small then.
{\it ii)} if $p=M$ and $q=N$.
As it is clear from \eq{eq:Lambda_def_delta}, this sub-case reduces to the previously considered one with $k_x=k_y=0$. The last implies that, then, $\omega=x_t/2$.
When $\omega = {\cal O}(1)$, from Eqs. \eqref{eq:Casimir_tr_pq} and \eqref{eq:Casimir_tr} with $h_1 = {\cal O}(1)$ and $h_L = {\cal O}(1)$ one has that $\Delta F^{(h,\perp)}_{\rm Cas} = {\cal O}(L^{-2})$, i.e., the longitudinal force in this case is in an order of magnitude {\it larger} in $L$ than the usual transverse Casimir force, which is of the order of ${\cal O}(L^{-3})$.
The behavior of the function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ is visualized in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend} if {\it i)} $p\ne M$ or $q\ne N$ and in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend_MN}
if {\it ii)} $p=M$ and $q=N$ .
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:scaling_h_pq}, as a function of $w\in (0,10]$ and $\left(\mathbf{k.\Delta}\right)\in [0,2\pi]$ for $x_1=x_L=1$. As wee see, $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_MN_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:scaling_h}, as a function of $w\in(0,10]$ and $\Delta_x+ \Delta_y\in [0,1]$ for $x_1=x_L=1$. As wee see, also in this case $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ can be both positive and negative, depending on the values of its arguments. Let us remind that in this sub-case $\omega=x_t/2$. }
\label{Fig:3D_G_h1_eq_hL_Legend_MN}
\end{figure}
We observe, inspecting the legends, that the maximal values of the function $X^{(h,\perp)}_{\rm Cas}(w,x_1,x_L)$ are in this case smaller than in previous case shown in Fig. \ref{Fig:3D_G_h1_eq_hL_Legend}.
Let us turn now to the behavior of the total orthogonal Casimir force $F^{(\perp)}_{\rm Cas}$. From Eqs. \eqref{eq:fe_short}, \eqref{eq:_def_no_field}, \eqref{eq:f_0}, \eqref{eq:F_Cas_no_field}, \eqref{eq:_def_field} and \eqref{eq:sc_funct_field} one has
\begin{equation}
\label{eq:ort_force}
F^{(\perp)}_{\rm Cas}\equiv \Delta F^{(0,\perp)}_{\rm Cas}+\Delta F^{(h,\perp)}_{\rm Cas}
\end{equation}
and
\begin{equation}
\label{eq:ort_force_sf}
\beta F^{(\perp)}_{\rm Cas}=L^{-3}\left(\frac{J^\perp}{J^\|}\right) X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L).
\end{equation}
The behavior of the scaling function of the total orthogonal Casimir force $X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$ is depicted in Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force} - \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} for the case when {\it i)} $p\ne M$ or $q\ne N$ and in the Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force_MN} for the case {\it ii)} $p=M$ and $q=N$ with $x_k=0$. Let us note that in the case {\it i)} the function $X^{(\perp)}_{\rm Cas}$ is symmetric about $x_1$ and $x_L$, while in the case {\it ii)} that is not so. The last implies that when $x_1\ne x_L$ in the case {\it ii)} we have to consider separately the sub-case $x_1\gg x_L$ and $x_1 \ll x_L$.
\begin{figure}[h]
\centering
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_total_force_compressed}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $x_1=x_L=1$. As wee see, $X^{(\perp)}_{\rm Cas}$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend_total_force}
\end{figure}
Figs. \ref{Fig:3D_G_h1_eq_hL_Legend_total_force} and \ref{Fig:3D_G_h1_eq_hL_Legend_total_force_MN} show the behavior of the force for for equal values of the field scaling variables $x_1=x_L$. When they are not equal this behavior is visualized in Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} for the case {\it i)} and in Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN}, \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_11_MN} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN} for the case {\it ii)}. Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN} represent the situation when $x_1\gg x_L$, namely $x_1=10 x_L$, while Figs. \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2} and \ref{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN} represent the results for the case when $x_1=-x_L=1$.
The comparison of these figures with Figs. (\ref{Fig:3D_G_h1_eq_hL_Legend}) and (\ref{Fig:3D_G_h1_eq_hL_Legend_MN}) shows, as it might be expected from the data presented in Fig. (\ref{Fig:3d_G_X_Cas_Zero_Field}), that the contribution of $X^{(0,\perp)}_{\rm Cas}(x_t)$ to the overall behavior of the force is quite small, at least in the depicted cases.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_1_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $x_1=10 x_L=1$. As wee see, the scaling function in that case is predominantly positive.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_2_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k,x_1,x_L)$
as a function of $x_t\in(0,10]$ and $\mathbf{k.\Delta}\in [0,2\pi]$ for $x_k=0.1$, $ x_1=-x_L=1$. As wee see, the scaling function in that case can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_total_force_MN_compressed}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1=x_L=1$. As wee see, $X^{(\perp)}_{\rm Cas}$ can be both positive and negative, depending on the values of its arguments.}
\label{Fig:3D_G_h1_eq_hL_Legend_total_force_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_1_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1=10 x_L=1$. As wee see, the scaling function in that case is predominantly positive.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_1_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_11_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $10 x_1= x_L=1$. As wee see, the scaling function in that case can be both positive and negative.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_11_MN}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_nor_eq_hL_Legend_total_force_2_MN_compressed.pdf}
\caption{(Color online) The scaling function
$X^{(\perp)}_{\rm Cas}(x_t,x_k=0,x_1,x_L)$
as a function of $x_t\in (0,10]$ and $\Delta_x+\Delta_y\in [0,1]$ for $x_1= -x_L=1$ or $x_1=-x_L=-1$. As wee see, the scaling function in that case can be both positive and negative.}
\label{Fig:3D_G_h1_not_eq_hL_Legend_total_force_2_MN}
\end{figure}
Let us now consider the behavior of the longitudinal Casimir force.
We first note that it does not have a contribution that is field-independent. Thus, the scaling fuction, which characterizes this force, is given by \eq{eq:Casimir_longit_pq_scaling} and
\eq{eq:F_long_scaling}.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_lat_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\alpha)}_{\rm Cas}(w,x_1,x_L)$, see \eq{eq:Casimir_longit_pq_scaling}, as a function of $w\in (0,3]$ and $\left(\mathbf{k.\Delta}\right)\in [0,2\pi]$ for $x_1=x_L=1$. }
\label{Fig:3D_G_h1_eq_hL_Legend_lat}
\end{figure}
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3D_G_h1_eq_hL_Legend_MN_lat_compressed.pdf}
\caption{(Color online) The scaling function $X^{(h,\alpha)}_{\rm Cas}(x_t,x_1,x_L)$, see \eq{eq:F_long_scaling}, as a function of $w\in(0,3]$ and $\Delta_x+ \Delta_y\in [0,1]$ for $x_1=x_L=1$. Let us remind that in this sub-case $\omega=x_t/2$. }
\label{Fig:3D_G_h1_eq_hL_Legend_MN_lat}
\end{figure}
Because of the term $\sin (\mathbf{k.\Delta})$, multiplying the expression for the force in the first case, and to $\sin [2 \pi (\Delta_x+ \Delta_y)]$, in the second case, the scaling function $X^{(h,\alpha)}_{\rm Cas}$ can be both positive and negative, independently on the values of $x_1$ and/or $x_L$.
\section{The 3d mean-field XY model} \label{sec:3dmf}
\subsection{With infinite surface fields} \label{subsec:infiniteh}
In Ref. \cite{BDR2011} the $XY$ model characterized by the functional
\begin{multline}
{\cal F}\left[ {\bf m};t,L\right]=\int_{-L/2}^{L/2} dz\,\left[\frac{b}{2}\left|\frac{d{\bf m}}{dz}\right|^2+\frac{1}{2}at\left|\textbf{m}\right|^2\right.\\
\left.+\frac{1}{4}g\left|{\bf m}\right|^4\right],
\label{LGenergyfunctional}
\end{multline}
has been studied in the presence of what have been termed twisted boundary conditions.
\begin{figure}[h]
\includegraphics[width=\columnwidth]{3d_Casimir_Force_XY_5_compressed.pdf}
\caption{(Color online) The scaling function $X^{(\alpha)}_{\rm Cas}(x_t)$ of the $XY$ model under twisted boundary conditions as a function of $x_t$ and $\alpha$ for $h=0$. The plane surface marks the $X^{(\alpha)}_{\rm Cas}(x_t)=0$ value of the force: the force is repulsive above it and attractive below it.}
\label{Fig:3d_Casimir_Force_XY}
\end{figure}
Switching to polar coordinates,
\begin{equation}
{\bf m}(z)=\left(\Phi(z)\cos\varphi(z),\Phi(z)\sin\varphi(z)\right),
\end{equation}
these boundary conditions can convenietly be defined by requiring that \begin{eqnarray}
&\varphi(\pm L/2)=\pm \alpha/2,\nonumber\\
&\Phi(\pm L/2) = \infty,
\label{boundaryconditions}
\end{eqnarray}
i.e., the moments at the boundaries are twisted by an angle $\alpha$ relative to one another. It has been shown that the Casimir force has the form
\begin{equation}\label{cas}
\beta F_{\rm Cas}(t,L)=\frac{b}{\hat{g}}L^{-4}X_{\rm Cas}^{(\alpha)}(x_t),
\end{equation}
where $\hat{a}=a/b$, $\hat{g}=g/b$, $x_t=\hat{a}t L^{2}$ and
\begin{equation}\label{casscalingfunctionpandtau}
X_{\rm Cas}^{(\alpha)}(x_t)=\left\{ \begin{array}{cc}
X_0^4[p^2- \left(1+\tau \right)], & x_t \ge 0 \\
X_0^4[p^2- \left(1+\tau/2 \right)^2], & x_t \le 0
\end{array} \right. .
\end{equation}
Here
\begin{equation}\label{newvarscaling}
\tau=x_t/X_0^2, \, X_0=\int_1^\infty \frac{dx}{\sqrt{(x-1)[x^2+x(1+\tau)+p^2]}}
\end{equation}
\begin{equation}\label{xoonpt}
X_0=\int_1^\infty \frac{dx}{\sqrt{(x-1)[x^2+x(1+\tau)+p^2]}},
\end{equation}
and $p$ is to be determined for any fixed value of $x_t$ so that the twisted spins at the boundary make the prescribed angle $\alpha$. Let
\begin{equation}\label{roots}
x_\pm=\frac{1}{2} \left[-(\tau +1)\pm\sqrt{(\tau +1)^2-4 p^2}\right]
\end{equation}
be the roots of the quadratic term in the square brackets in the denominator of (\ref{xoonpt}). There are two subcases: {it A)}
the roots are real, and {\it B)} the roots are complex conjugates of each
other.
{\it A)} The roots $x_\pm$ are real. Then
\begin{equation}\label{x0det}
X_0=\frac{2}{\sqrt{1-x_-}}K\left[\sqrt{\frac{x_+-x_-}{1-x_-}} \right]
\end{equation}
and
\begin{multline}\label{alphaasafunctionoftherootsfinal}
\alpha=\frac{\sqrt{|x_- x_+|} X_0}{x_-} \bigg\{1\\
-\frac{2}{X_0 \sqrt{1-x_-}} \Pi \left[\frac{x_-}{x_- -1},\sqrt{\frac{x_+-x_-}{1-x_-}} \right]\bigg\}.
\end{multline}
We note that
\begin{equation}\label{relxtp}
\tau=-1-x_--x_+,\qquad p=\sqrt{|x_- x_+|}.
\end{equation}
{\it B)} The roots $x_\pm$ are complex.
One has
\begin{equation}\label{cx0det}
X_0=\frac{2}{\sqrt{r}} K\left(w\right),
\end{equation}
and
\begin{multline}\label{calphaasafunctionoftherootsfinal}
\alpha = \frac{p X_0}{1-r}+\frac{4p}{r^2-1} \sqrt{\frac{r}{1-w^2}}\\
\times\Pi\left[\left(\frac{r-1}{r+1}\right)^2,\frac{w}{\sqrt{w^2-1}}\right].
\end{multline}
where
\begin{eqnarray}\label{notations1}
r\equiv r(x_-,x_+)&=&\sqrt{(1-x_-)(1-x_+)} \nonumber\\
&=& \sqrt{2+\tau+p^2},
\end{eqnarray}
and
\begin{multline}\label{notations2}
w^2\equiv w^2(x_-,x_+) = \frac{1}{2}+\frac{\frac{x_- + x_+}{2}-1}{2
\sqrt{(1-x_-) (1-x_+)}}\\
= \frac{1}{2}\left(1-\frac{3+\tau}{2\sqrt{2+\tau+p^2}} \right).
\end{multline}
The scaling function $X^{(\alpha)}_{\rm Cas}(x_t)$ of the $XY$ model under twisted boundary conditions as a function of $x_t$ and $\alpha$ is shown in Fig. \ref{Fig:3d_Casimir_Force_XY}.
We recall that, as shown in Ref. \cite{BDR2011} the asymptotic expression for $X_{\rm Cas}^{(\alpha)}(x_t)$
\begin{equation}\label{casscalingfunctionasympXY}
X_{\rm Cas}^{(\alpha)}(x_t)\simeq \frac{1}{2} \alpha^2 \left[|x_t|+4
\sqrt{2|x_t|}+\frac{1}{2} \left(48-3 \alpha ^2\right)\right],
\end{equation}
when $x_t\to-\infty$. According to \eq{cas} the last implies that in this regime
\begin{equation}
\label{casMFas}
\beta F_{\rm Cas}(t,L)\simeq \frac{1}{2} \alpha^2 \frac{b}{\hat{g}} |x_t|L^{-4} = \frac{1}{2}\frac{a b}{g} \alpha^2 |t| L^{-2},
\end{equation}
i.e., its leading behavior is of the order of $L^{-2}$ there due to the existence of helicity within the system.
\subsection{With finite surface fields}
The model described immediately above constrains the spins at the surface of the film to point in particular directions. The physical realization of a such a system is much more likely to be one in which the spins at the surfaces to be under the influence of finite surface fields. Here, we consider a model for such a system. In order to do so, we employ the approach utilized in Section II of \cite{BDR2011}, in which the spin system occupies sites on a lattice that is infinite in extent in two directions and that consists a finite number of layers (here labeled 1 to $L$) in the third dimension. We impose surface fields that couple in the standard way to the spins on the leftmost layer, labeled 1, and the rightmost layer, labeled $L$. The magnitude of each of those fields is $h_s$, and the angle between them is $\alpha$. In our mean field approach, the free energy is minimized by adjusting the expectation value of the amplitude and direction of the spins in each layer. The Casimir force follows from the difference between the free energies with $L$ and $L+1$ layers; because of the numerical nature of the free energy results, we are unable to take the derivative with respect to film thickness, as in Section \ref{sec:continuum}.
We find that the Casimir force is consistent with the following scaling form
\begin{equation}
F_{\rm Cas} = L^{-4}f(tL^2, h_cL) \label{eq:3dmf1}
\end{equation}
where $t$ is the bulk reduced temperature. Furthermore, for small enough $h_c$ and $t$ higher than the value at which the film orders spontaneously, the function $f$ on the right hand side of (\ref{eq:3dmf1}) has the form
\begin{equation}
f(tL^2, h_cL) = f_0(tL^2) + f_1(tL^2) \left(h_cL \right)^2 + O\left( \left(h_cL \right)^4\right)\label{eq:3dmf2}
\end{equation}
Because of this, it is possible to envision for small $h_s$ the behavior of the Casimir force that one encounters in the Gaussian model.
Figure \ref{fig:3dmfplot1} is a plot of the scaled Casimir force versus the scaled reduced temperature and scaled surface fields for two values of the film thickness, $L$. The perspective highlights the departure from the behavior in (\ref{eq:3dmf2}) that occurs when the temperature is sufficiently far below the bulk critical temperature that the moments in the film order spontaneously. The films in question consists of $L=50$ and $L=100$ layers, and the angle between the two surface fields is $\alpha = \pi/3$. As is clear from the figure, the difference between the two plots is quite small.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot1a.pdf}
\caption{(Color online) Scaled Casimir force, $L^4F_{\rm Cas}$, as a function of the scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$. The number of layers in the two films are $L=50$ and $L=100$, and $\alpha$, the angle between the surface fields, is $\pi/3$. The difference between the two plots is barely discernible, indicating that the difference between the scaling function for $L=50$ and the infinite $L$ limit is quite small. }
\label{fig:3dmfplot1}
\end{center}
\end{figure}
As indicated in Fig. \ref{fig:3dmfplot1}, $L=50$ is sufficiently large that the difference between the function and the scaling limit is quite small. Figure \ref{fig:3dmfplot2} illustrates the dependence of the scaled Casimir force on the scaled surface field amplitude for various values of the scaled reduced temperature.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot2.pdf}
\caption{(Color online) Scaled Casimir force, $L^4 F_{\rm Cas}$, as a function of the scaled surface field, $h_sL$ for various scaled reduced temperatures, $tL^2$. Here, $L=50$ and $\alpha= \pi/3$. When $tL^2>-\pi^2$ the small $h_s$ dependence of the Casimir force is quadratic, consistent with (\ref{eq:3dmf2}). Below that value of the scaled reduced temperature, the small $h_s$ dependence is linear in the absolute value of that quantity, as exemplified by the curve for $tL^2=-15$. }
\label{fig:3dmfplot2}
\end{center}
\end{figure}
For all reduced temperatures greater than $-\pi^2$, the initial dependence on scaled surface fields is quadratic, consistent with (\ref{eq:3dmf2}). In fact for temperatures at and above the bulk critical temperature ($t \ge 0$) the second term in the right hand side of (\ref{eq:3dmf2}) is the leading non-zero contribution to that expansion. This is consistent with the amplification of the Casimir force that one finds in the Gaussian model---see Section \ref{sec:Gaussian}. However, such amplification only occurs when there is spontaneous ordering in the film. Figure \ref{fig:3dmfplot3} shows the scaled Casimir force as a function of the scaled surface field for $tL^2=5$ and $tL^2=-5$, above and below the bulk transition but above the threshold for film ordering. This plot illustrates the saturation of the Casimir force when the reduced temperature is above the threshold for film ordering, $tL^2=-\pi^2$.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot3.pdf}
\caption{(Color online) Dependence of the scaled Casimir force on the scaled surface field for two values of scaled reduced temperature above the point, $tL^2=-\pi^2$, at which spontaneous ordering occurs in the film. Here, $L=50$ and $\alpha = \pi/3$. The plots illustrate the saturation of the influence of the surface fields, at odds with the amplification effect seen in Section \ref{sec:Gaussian}. The figure also illustrates the fact that the Casimir force can change change sign as the temperature is varied. This is due to the fact that there is a range of temperatures below the bulk critical temperature in which the bulk system orders while the film remains disordered. For $T>T_c$ both the bulk and the finite system are disordered. For $|h_s|\gg 1$ the Casimir force approaches its value for fixed boundary conditions, the case considered in Subsection \ref{subsec:infiniteh}.}
\label{fig:3dmfplot3}
\end{center}
\end{figure}
The Casimir force changes sign as $L$ increases for fixed $\alpha$, $T$ and $h_s$. This is displayed in Fig. \ref{fig:above_tc}.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{change_sign_above_tc.pdf}
\caption{(Color online) Scaled Casimir force, $L^2 F_{\rm Cas}$, as a function of $L$, for fixed values of temperature $t=0.001$, helicity $\alpha= \pi/3$ and value of the surface field amplitude $h_s=0.1$. }
\label{fig:above_tc}
\end{center}
\end{figure}
We also note that the force changes sign for moderate values of $L$. It can readily be established that the overall behavior of the Casimir force is
in accord with Eq. (\ref{eq:3dmf2}); see, for instance, Fig. \ref{fig:3dmfplot3}.
If spontaneous ordering is possible, then amplification of the Casimir force does occur. Figure \ref{fig:3dmfplot4} plots the newly scaled Casimir force $L^2F_{\rm Cas}$ against system size $L$, illustrating the enhanced force amplitude as a function of system size, $L$, expressed in terms of the scaled variable $tL^2$. Here, the reduced temperature is fixed at $t=-0.05$, while the surface field amplitudes are set to $0.05$, $\alpha = \pi/3$ and the system size varies from $L=2$ to $L=3,000$. The behavior displayed is a direct result of the energy stored in the helical spin configuration, a response to the surface fields that are tilted with respect to each other.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot4.pdf}
\caption{(Color online) Illustrating the $L$ dependence of the Casimir force for a negative value of reduced temperature, $t=-0.05$ with surface field amplitude $h_s=0.05$ and $\alpha=\pi/3$. The plot is generated by varying the film thickness $L$ for fixed values of $t$, $h_s$ and $\alpha$. The large graph shows how $L^2F_{\rm Cas}$ varies over an extended range of film thicknesses $L$, and the inset shows the $L$ dependence over a much smaller range.}
\label{fig:3dmfplot4}
\end{center}
\end{figure}
Of additional interest in this plot is the variation of the Casimir force for smaller values of $L$, shown in the inset. Note the change in the sign of the Casimir force. A Casimir force going as $L^{-2}$ is consistent with the energy associated with a helicity modulus, which is natural given that the $XY$ system supports such a modulus in the regime in which it spontaneously orders. In this case the surface fields play the essential role of enforcing a helical structure on the order parameter when spontaneous ordering occurs.
The enhanced Casimir force is consistent with the scaling form of (\ref{eq:3dmf1}). Figure \ref{fig:3dmfplot5} displays the dependence of the scaled Casimir force $L^4 F_{\rm Cas}$ on the scaled variable $tL^2$.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=3in]{3dmfplot5.pdf}
\caption{(Color online) The scaled Casimir force, $L^4 F_{\rm Cas}$, as a function of the scaled variable $tL^2$. The thickness of the film is $L=50$, the surface field amplitudes have been set to 0.01 and the angle between them, $\alpha$, is $\pi/3$. }
\label{fig:3dmfplot5}
\end{center}
\end{figure}
An important feature of this plot is its linear dependence on the scaled reduced temperature when it is sizable and negative. This leads to an overall $L$ dependence going as $L^{-2}$. Another significant property of the critical Casimir force plotted in Fig. \ref{fig:3dmfplot5} is its change in sign in the vicinity of the bulk critical point. In this sense, the Casimir force is tunable---and can be changed from attractive to repulsive---through a variation in temperature.
Finally, Fig. \ref{fig:3dmfplot6} displays the dependence of the scaled Casimir force, $L^4F_{\rm Cas}$, on scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$ for a variety of values of the angular difference, $\alpha$, between the two surface fields. As shown in the plots, when $\alpha$ increases from $0$ to $\pi$ the minimum of the force becomes shallower and the region of parameters $tL^2$ and $h_sL$ in which the force is repulsive expands. We also note that the amplitude of the force for any fixed combination of the parameters $tL^2$ and $h_sL$ is a monotonically increasing function of $\alpha$. The force is attractive in the whole region of $h_sL$ and $tL^2$ values only for $\alpha=0$.
\begin{widetext}
\begin{figure}[h!]
\begin{center}
\includegraphics[width=7in]{3dmfplot6_new_compressed.pdf}
\caption{(Color online) Scaled Casimir force, $L^4F_{\rm Cas}$, as a function of the scaled reduced temperature, $tL^2$ and scaled surface field amplitude, $h_sL$. The number of layers in the film is $L=50$. The values of $\alpha$, the angle between the surface fields are, reading left to right and then top to bottom are {\bf a}: 0, { \bf b}: $\pi/2$, { \bf c}: $2 \pi/3$ and { \bf d}: $\pi$. }
\label{fig:3dmfplot6}
\end{center}
\end{figure}
\end{widetext}
\pagebreak
\section{Discussion and concluding remarks} \label{sec:conclusions}
The Casimir force has provided an unexpectedly rich and varied set of phenomena for study and potential exploitation. In this paper, we have attempted to demonstrate that interactions between the bounding system and the media that supports the Casimir force allow for the possibility of utilizing those interactions, here parameterized as surface fields, to control---and in certain cases greatly amplify---that force. Our focus has been the critical Casimir force, but a number of our results extend far beyond the critical regime. We find that the angle between surface fields can significantly affect the magnitude and the sign of the Casimir force, that variations in temperature can also have such an effect, and that the strength of the critical Casimir force can undergo substantial amplification as a consequence of the application of surface fields. Such fields represent a useful and likely accurate quantification of the action of modifications of the structure or composition of bounding surfaces in the medium giving rise to the Casimir force. Thus, the results presented here could well be utilized or expanded upon to motivate experimental investigations of the effects of surface patterning on the Casimir force.
The key findings reported here are twofold. First, the combination of helicity and surface fields allows for the manipulation of both the sign and the amplitude of the Casimir force. In certain circumstances---particularly when the system supports helicity in the bulk---the force can be greatly amplified in magnitude. The second finding is that the expressions describing the Casimir force are consistent with the expectations of finite size scaling, as embodied in Eqs. (\ref{eq:F_Cas_scaling}), (\ref{eq:F_Cas_scaling_Heis}), (\ref{eq:F_Cas_no_field}), (\ref{eq:sc_funct_field}), (\ref{eq:sc_funct_field_long}), (\ref{cas}) and (\ref{eq:3dmf1}).
One possible setting for an experimental study might be a nematic liquid crystal film. Here, the order parameter is quadrupolar, rather than dipolar as in the case of the $XY$ or Heisenberg models, but the continuous symmetry with respect to rotation of the order parameter is nevertheless in the same general class as in the systems considered here. In fact, a class of Liquid Crystal Display (LCD) devices operates on the basis of inducing of a helical structure in liquid crystalline films \cite{LCDGray}. It is also possible that the results reported here are applicable to the case of a liquid Helium film in the superfluid state in which a temperature gradient exists between the substrate on which the film has condensed and that gas phase bordering its free surface. Such a temperature gradient induces flow in the superfluid component, which entails a rotation of the superfluid wave function in the complex plane \cite{[{R. P. Feynman in }] Gorter,Ginz_PIt}.
The models investigated here are unlikely to be directly realized in nature, either because of their low dimensionality, or because they neglect important phenomena such as saturation of the order parameter as in the Gaussian model or are based on approximations, such as the mean field theory. Nevertheless, we are confident in the the overall import of our results: that surface fields and helicity in the medium that generates the Casimir force are likely to prove quite significant as experimentally accessible modifiers of that force. How those surface fields are to be generated will vary from system to system, but there is every reason to anticipate that ways will be found and that the result will be a greater insight into the Casimir force and, one hopes, new and useful applications of this interaction.
\acknowledgements{D.D. gratefully acknowledges the financial support via contract DN02/8 of Bulgarian NSF. J. R. is pleased to acknowledge support from the NSF through DMR Grant No. 1006128}
|
1,314,259,995,007 | arxiv | \section{Introduction}
Deep learning methods have achieved tremendous success in computer vision applications, e.g., image classification~\cite{szegedy2015going,simonyan2014very,he2016deep,dosovitskiy2020image}, object detection~\cite{girshick2015fast,ren2015faster}, semantic segmentation~\cite{long2015fully,ronneberger2015u}. However, it is restricted to supervised learning where a well-labelled dataset is available. How to generalise well in a dataset with a different distribution remains a challenging problem. Recently, researchers have extensively studied the unsupervised domain adaptation (UDA)~\cite{venkateswara2017deep,tan2020class,wu2019domain}, where the labels are in source data but unavailable in the target data.
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{pics/illustration.png}
\caption{{An illustration of the idea of \textbf{\textit{AdaTriplet-RA}}: The different shape indicates different category. Initially, the source and target samples are separated via the domain boundary (1). When generating pseudo-labels on the target domain, there would be uncertain examples near the decision boundary and bad examples with extreme uncertainty (2). We then perform pseudo-label selection, ignoring the wrong samples (3). Through gradual training (4), the samples that are near the decision boundary are gradually becoming clearer, and the wrong samples are calibrated in the next step (5) until convergence (6).}}
\label{fig:illustration}
\end{figure}
The challenges in unsupervised domain adaptation are twofold: it needs to ensure the feature representations are agnostic to domains; it also needs to keep the discriminating capability at the same time. Most of the previous research tries to {lower the empirical risk~\cite{vapnik1991principles} at the source domain} and perform the distribution alignment of the source and target domains, assuming lowering the actual risk in the target domain~\cite{ganin2015unsupervised,long2017deep}. For instance, adversarial learning for domain distribution alignment \cite{ganin2015unsupervised, tzeng2017adversarial, zhang2019domain} represents a classical line in this direction. Maximum Mean Discrepancy (MMD)-based methods \cite{yan2017mind, chen2019graph} is another commonly-applied approach for domain alignment. Unfortunately, on the one hand, the models are more accessible to over-fitted to the source task and do not generalise well to the target task; on the other hand, the alignment of sample space between domains is ignored to alleviate the domain shift. As a result, the source classifier can wrongly recognise target samples close to the decision boundary. Moreover, even with successful domain distribution alignment, the model neglects the sample-level similarity and has a higher possibility of negative transfer.
Hence we look directly at sample matching between domains, and specific questions can be raised: {can the inter-domain sample matching help the unsupervised domain adaptation task, and if so, how to realise it?} The answer to the first question is {Yes.} Previous image/text and image matching research \cite{li2019visual, lee2018stacked, zhang2018deep, chen2017multi, zhai2019defense} tries to exploit the sample matching for full-labelled data, in which the classification and retrieval performance are mutually promoted, as proved via many empirical results. This effectiveness of combining classification and matching in retrieval tasks demonstrates that sample-level matching: can both {promote recognition performance} and {perform modality/domain alignment} at the sample level. Both of them are what we want to achieve in unsupervised domain adaptation. Classical approaches \cite{li2019visual, lee2018stacked, zhang2018deep, chen2017multi, yuan2020defense} in retrieval tasks often utilise the metric learning loss as the principal matching loss. Metric learning tries to measure and manipulate the similarities between samples regardless of the domain/modality differences.
Hence, the question remains: {How to realise domain matching?}
As in such an unsupervised learning setting, no labels are available for the target domain samples. Previous research either utilises constrained method \cite{deng2020rethinking} to solve this issue or class-wise matching with probability-based methods like MMD class-wise discriminant \cite{zhang2018unsupervised}. Alternatively, we directly apply Triplet loss for domain matching with the following contributions.
We propose an uncertainty-aware adaptive Triplet loss for the unsupervised domain adaptation. It successfully achieves the matching of semantically similar samples from different domains. We use the classifier trained on the source domain data to predict the pseudo-labels for target domain samples and apply the distance metric learning loss to optimise the model. One of the biggest challenges is that these pseudo-labels contain much noise, i.e., incorrect classification results, which introduce serious bias to the metric learning algorithm.
We introduce a robust {pseudo-label selection} method, where we define a new way of {uncertainty measurement} of the classification results in the target domain. We first define {prototypes} in the source domain, one prototype per class. Each prototype is computed via the average of all the features within one epoch whose performance is better than the average over the whole training stage. We select the reliable pseudo-labels and only use their corresponding samples in Triplet loss. Usually, we need to manually set a threshold value to avoid too many uncertain labels, which could cause a catastrophic effect on training. The selection of high certainty only targets pseudo-labels. The extremely uncertain labels cannot be relied on, as they are usually wrong. Hyper-parameter tuning for the threshold is laborious and yields poor performance. In this paper, we propose a novel trainable Topk scheme in which the threshold is determined via a Gumbel Softmax~\cite{jang2016categorical} discrete relaxation technique. Gumbel Softmax is a continuous distribution that can be smoothly annealed into a categorical distribution whose parameter gradients can be easily computed via the reparameterisation trick~\cite{jang2016categorical}. By doing this, the trainable Topk can automatically select the desired reliable pseudo-labels. We apply the selected pseudo-labels in the Triplet loss and their corresponding uncertainty value as the margin. The higher the uncertainty, the higher the margin should be. The training process is illustrated in Figure~\ref{fig:illustration}: {Initially, the data is only separated via domain boundaries. As the training is performed, the classifier tries to decide on the target data, but with uncertain samples and extremely uncertain samples with wrong pseudo-labels. Our model can gradually calibrate the wrong samples through the pseudo-label selection scheme and match the uncertain samples with the adaptive Triplet loss. Upon convergence, the source and the target samples are successfully classified via the category classifier.}
{Meanwhile, Triplet loss~\cite{schroff2015facenet} is a kind of batch-based optimisation goal for a certain model, which does not or in-comprehensive consider the global ranking quality of the whole batch. In many image matching or retrieval tasks, the performance evaluation is based on {Average Precision (AP)}, which comprehensively evaluates the global ranking performance. Nevertheless, AP is non-differentiable because of the discreteness, and the non-convexity \cite{chen2019towards}. In other words, AP cannot easily be approximated by discrete relaxing techniques such as straight-through estimator~\cite{cheng2019straight}. We want to optimise for a higher AP value during the training, whose solution lies in reinforcement learning (RL). Meanwhile, previous attention mechanism~\cite{xu2015show,anderson2018bottom} often treat attention weights as neurons in the network, which lacks strong supervision. Our approach tries to blend attention supervision with reinforcement learning. The attention weights generation is modelled as a Markov Decision Process (MDP)~\cite{sutton2018reinforcement}, and optimised via a simple policy gradient (PG) algorithm~\cite{sutton2018reinforcement, sutton1999policy}. We treat the AP as the reward in the PG algorithm, naturally solving AP's non-differentiable and non-convexity problems in usual supervised learning. Similar to the proposed adaptive distance metric learning, we adjust the reward with the certainty value to compensate for the noise in the pseudo-labels. The higher the certainty value, the higher reward should be given to the model. Note that the certainty is Top selected, and the AP is instance-level, with each sample having one AP result. Our model is termed as \textbf{\textit{AdaTriplet-RA}}, meaning Adaptive Triplet loss and Reinforced Attention.}
To summarise, the contributions of our paper are threefold:
{
\begin{itemize}
\item To facilitate the domain matching for the unsupervised domain adaptation task, we propose an uncertainty-aware Triplet loss to refine the pseudo-labels progressively. The scheme has a novel uncertainty measurement method realised via a trainable adaptive Topk selection to make a clearer decision for hard samples in the target domain.
\item We propose a novel reinforced attention mechanism algorithm to enhance the feature representation and domain matching. The reinforced attention uses the Average Precision (AP) as the reward, which is also adaptively adjusted with uncertainty values. Reinforced attention plays a critical role in domain matching and improves performance.
\item The proposed method ``AdaTriplet-RA" significantly improves the baseline methods and validates that successful domain matching can indeed boost the unsupervised domain adaptation task.
\end{itemize}
}
\section{Related Work}
\subsection{Unsupervised Domain Adaptation}
\paragraph{General Methods}
Unsupervised domain adaptation (UDA) transfers knowledge from a labelled source domain to an unlabeled target domain. Existing unsupervised domain adaptation methods focus on image classification. The mainstream approaches tend to address unsupervised domain adaptation by learning domain-invariant representation, to which our method belongs. There are mainly two kinds of approaches to learning domain-invariant features. \cite{tzeng2014deep,long2017deep,long2015learning,yan2017mind,weighted2020tmm,su2021tmm,ding2022tmm} measure the domain similarity via Maximum Mean Discrepancy (MMD)~\cite{borgwardt2006integrating}. Another line of research learns domain-invariant features using neural model-based learning, e.g., adversarial training. A representative work is the DANN~\cite{ganin2015unsupervised}. This approach applies an implicit adversarial training scheme to learn domain-invariant representation via a gradient reversal layer and a discriminator. Subsequently, research follows this direction and yields good performance in unsupervised domain adaptation task~\cite{cui2020gradually, chen2020adversarial,long2017conditional,tzeng2017adversarial, sankaranarayanan2018generate}. Notably, SymNets~\cite{zhang2019domain} proposes a symmetric object classifier that plays the role of domain discriminator. Alternatively, Zhang et al. \cite{zhang2018unsupervised} directly targeting at class-wise matching by minimising MMD-based class-wise fisher discriminant across domains. Zhao et al. \cite{zhao2022source} address the noisy pseudo-labels for source-free DA tasks where only the pre-trained model and the target data are available during training. MMAN~\cite{MMAN} introduces semantic multi-modality representations learning into adversarial domain adaptation and uses multi-channel constraints to capture fine-grained knowledge categories. Deng et al.~\cite{deng2022tmm} uses disentanglement for adversarial domain adaptation to extract more transferable high-level semantic features.
Our research builds on the baseline of SymNets \cite{zhang2019domain}. However, it focuses on the improving impact of the domain matching, also addressing the noisy pseudo-label problem \cite{zhao2022source}, with a significant improvement on unsupervised domain adaptation.
\paragraph{Domain Matching in Domain Adaptation}
There is research analysing the feasibility, problem and performance of the domain matching in domain adaptation tasks~\cite{deng2020rethinking, xie2018learning, laradji2020m, wang2022cross, sharma2021instance}. In particular, Deng et al.~\cite{deng2020rethinking} propose a similarity-guided constraint (SGC) for domain matching via the Triplet loss and emphasis the importance of domain matching. Xie et al.~\cite{xie2018learning} utilise semantic loss and adversarial domain matching for the unsupervised domain adaptation task. They propose to apply the running average for the centroid formulation and conduct centroid alignment with squared Euclidean distance loss. M-ADDA~\cite{laradji2020m} performs a different approach for sample-level matching in domain adaptation: adjusting the margin in metric learning loss with uncertainty. Wang et al.~\cite{wang2022cross} seek an alternative solution from the Contrastive loss~\cite{wu2018unsupervised} and formulate pseudo labels for the target domain, improving the existing approaches. Xu et al. \cite{xu2019unsupervised} apply importance sampling for both the domain and class-level matching in unsupervised domain adaptation. Sharma et al.~\cite{sharma2021instance} propose an instance matching scheme for domain adaptation, utilising not only the multi-sample contrastive loss but also cross-entropy. Li et al. \cite{li2021adadc} targets at unsupervised domain adaptation person re-ID task. They propose to address noisy labels and progressively refine them in deep clustering. Meng et al. \cite{meng2022exploring} exploit the label structural information via iterative clustering and pseudo labels for unsupervised domain adaptation. Our methods differ from this research in two perspectives: our uncertainty is selected with an adaptive threshold; we additionally apply reinforced attention with AP as the reward for better sample matching.
\subsection{Distance Metric Learning}
Metric learning is a spatial mapping method which can learn a feature space. In this space, it makes the feature distance of similar samples smaller. Conversely, it makes the feature distance of different samples larger to distinguish them.
Distance metric learning plays a significant role in a variety of computer vision applications, such as image retrieval~\cite{sohn2016improved}, cross-modal image-text matching~\cite{lee2018stacked}, person re-ID~\cite{hermans2017defense}, and transfer learning~\cite{oh2016deep}. Current research on distance metric learning focuses on the loss functions, e.g., Triplet loss~\cite{schroff2015facenet, hermans2017defense}, N-pair-mc~\cite{sohn2016improved}. There is also research work exploiting the mining techniques to consider the relationships between data samples, e.g., lifted structured~\cite{oh2016deep}, ranked list loss~\cite{wang2019ranked}. Among them, the Triplet loss is one of the most widely-used metric learning functions in varying tasks, given its simplicity and stability. However, most of the previous distance metric learning methods focus on developing the loss functions and mining techniques during the learning process. We focus on the pseudo-label-based Triplet loss with adaptive margin.
\subsection{Visual Attention Mechanism}
The visual attention mechanism~\cite{xu2015show} has been widely applied in many computer vision applications. Notably, the bottom-up attention model~\cite{anderson2018bottom} is the current mainstream for image captioning, visual question answering, and image-text matching. However, there needs to be more research on supervised attention. Gan et al.~\cite{gan2017vqs} propose a supervised attention scheme for visual question answering using attention annotations. Kamigaito et al.~\cite{kamigaito2017supervised} also use attention annotations for supervised attention in natural language processing tasks. Instead, we propose a supervised attention mechanism based on reinforcement learning, which can optimise the attention module towards a specific goal such as AP. Also, the proposed attention module does not need any additional annotations.
\subsection{Discreteness Relaxation Techniques}
Usual neurons in deep learning models are continuous variables, which create a non-linear mapping between the inputs and outputs. There is also a family of stochastic discrete variables in neural networks \cite{xu2015show, yin2019understanding, chung2016hierarchical, jang2016categorical, kusner2016gans}.
One has to rely on either reinforcement learning to realise sampling and exploration \cite{xu2015show, yan2021discrete}, or discreteness relaxation techniques like straight-through estimators \cite{bengio2013estimating, chung2016hierarchical}, and Gumbel techniques \cite{jang2016categorical, kusner2016gans} to train the discrete variables. Gumbel is a more efficient and effective technique in discreteness relaxation.
Our model has two discrete variables: the optimising goal (AP) of reinforced attention and the $k$ for pseudo-label selection. Hence, we apply reinforced training and Gumbel techniques to solve these problems.
\section{The Proposed Method}
\begin{figure*}[h]
\centering
\includegraphics[width=\linewidth]{pics/system.png}
\caption{A schematic diagram of our model: The source and target images are fed into the ResNet-50 backbone network, followed by the extraction of the region. Based on the region features, we explore the reinforced attention via a GRU model and Softmax normalisation. The features are processed via the uncertainty measurement module to produce reliable pseudo-labels and corresponding uncertainty scores. The uncertainty score is also utilised in the policy gradient algorithm for reinforced attention optimisation and the adaptive Triplet loss. The whole model is optimised via multiple training losses.}
\label{fig:system}
\end{figure*}
In this section, we introduce the proposed methods. We first briefly introduce the baseline model, then the regional representation, followed by a detailed illustration of the proposed adaptive triplet loss, the reinforced attention mechanism and the overall loss objectives.
\subsection{Baseline Model.}
We apply the SymNets model~\cite{zhang2019domain,zhang2020unsupervised} as the baseline model for our research. The SymNets belong to the family of domain confusion methods~\cite{zhang2019domain}.
SymNets is a symmetric network to overcome the limitation in the joint distribution of cross-domain aligned features and categories through two-level domain confusion loss.
The category-level confusion loss improves over the domain-level one by driving the learning of intermediate network features to be invariant at the corresponding categories of the two domains.
The design of a SymNet is based on a parallel task classifier $C^s$ and $C^t$. Assume the two classifiers are based on a common FC layer. $C^s$ and $C^t$ contain respectively $K$ outputs corresponding to the numbers of categories on the source and the target domains. The baseline has three Softmax classifiers: $C^s$, $C^t$ and $C^{st}$. $C^{st}$ concatenates the inputs from the source of the target domain, i.e., $v^s$ and $v^t$ to a form $[v^s, v^t]\in\mathbf{R}^{2K}$. The classifier $C^{st}$ can discriminate the domain via probability vector $P^{st}\in[0,1]^{2K}$. The SymNets train the classifiers $C^s$ and $C^t$ via cross-entropy loss with the source samples and corresponding labels. The classifier $C^{st}$ is trained with both the source and target samples and domain labels (i.e., $D = [0,1]^{2k}$) to formulate domain probabilities $P^{st}$.
\subsection{Regional Feature Representations.}
{To form a fine-grained representation of the image features, we split the image features via the channels, akin to the channel-wise attention \cite{chen2017sca}. We then formulate a set of fine-grained feature representations from the grid and channel multi-head groups. specifically, if we split the channel into $H$ heads, and each group has $N$ number of grids, then the total number of the fined-grained features is $T$, described as follows:
\begin{equation}
\begin{split}
F & = {ResNet\_50}({Input}), \\
I_g & = \{I_0, ..., I_h, ... I_H\} = Mulithead(F), \\
I_h & =\{I_{0}, ..., I_{N}\}, \\
I & = \{I_0, ..., I_T\} = Flatten(I_g),
\end{split}\label{eq:region}
\end{equation}
where $Input$ is the input image, and $F$ is the last convolution feature from the ResNet-50 network.}
\subsection{The Uncertainty Measurement.}
The pseudo-labels generated in a naive way contain much noise, i.e., incorrect classification results, which still need to be handled properly in previous research. Many previous approaches use a confidence-based uncertainty measurement as a choosing standard for pseudo-labels~\cite{rizve2021defense}. In our paper, the pseudo-labels generated in the target domain involve a Triplet loss training for cross-domain matching. As a result, we propose a novel prototype similarity-based uncertainty measurement method. Specifically, we formulate a set of prototypes in the source domain, where each category has one prototype. We obtain the prototype for each category by averaging all the image features from that category, as described,
\begin{equation}
\begin{split}
& proto_k^s = \frac{\sum_{i=1}^{N_s^k} V^{s}_k(i)}{N_s^k}, \\
\shortintertext{\textit{Practically, an average running algorithm, expressed as:}}
& proto_k^s(i) = \beta * proto_k^s (i-1) + (1-
\beta) * V^s_k(i), \\
& i = 1, ..., N_s^k,
\end{split}
\end{equation}
where $proto_k^s$ means the prototype for the category $k$ in the source domain $s$ and $N_s^k$ is the number of samples in the source dataset whose category label is $k$. $i$ is the ith iteration in one epoch. $\beta$ is the control coefficient for the running average. Note that we set all the prototypes to zeros when a new training epoch comes and compute the new prototypes in the new epoch. Hence, we obtain a set of prototypes, expressed as $P = \{ proto_k^s | k = 1,..., K\}$, where $K$ is the number of categories in the source domain.
We then calculate the similarity between the prototype set $P$ and the target domain's image features. First, we obtain the temporary classification results of the target samples via classifier $C^t$, which is expressed as
\begin{equation}
\hat{y}^t = {argmax}(C^t(V^t)).
\end{equation}
Subsequently, we compute the cosine similarity between the prototypes and the target features which correspond to the category of each prototype.
\begin{equation}
{s}_k = {Cosine}({proto}^s_k, V^t[\hat{y}^t_k]),
\end{equation}
The similarity vector ${s}_k$ is considered the certainty value of the classification results of the classifier in the target domain, i.e., the certainty value of the pseudo-label $\hat{y}^t_k$.
\subsection{Trainable Topk Scheme}
The certainty $C_i, i\in[1;B]$ and the corresponding uncertainty $U_i$ are defined:
\begin{equation}
\begin{split}
& C_i = \left \{ \begin{split} & {s}_i, \ {if} \ {s}_{i} \ in \ {Topk}({s}_{i}), \\
& 0, {otherwise} \end{split} , \right \} \\
& U_i = 1 - C_i , i \in [1; B]\\
\end{split}
\end{equation}
{To avoid laborious tuning of the $k$ hyper-parameter in a conventional Topk scheme, we propose a trainable Topk scheme, which is a generic algorithm and should be easily extended to many applications. Specifically, the adaptive Topk are implemented via a Gumbel Softmax and a masking technique to achieve the ability of back-propagation:
\begin{equation}
\begin{split}
& k = Gumbel\_Softmax({s}_i), \\
& {mask} = {Ones}(k-1) \oplus One\_hot(k) ,\\
& C_i = {mask} \odot {s}_i, i \in [1; B],
\end{split}
\end{equation}
where Ones$(Dim)$ indicates an all-ones vector with a dimension of $Dim$, $One\_hot$ means the one hot embedding, $\oplus$ is the vector adding operation, and $\odot$ is the element-wise product. With the Gumbel Softmax, the $k$ value is automatically generated and integrated with the training of the whole model; with the masking operation, we make the $k$ value in the Topk scheme trainable, as the operations involved are continuous. }
\subsection{The Adaptive Triplet Loss Learning.}
Though target samples are aligned to the source samples, some target samples might still be near the decision boundary, as illustrated in Figure~\ref{fig:illustration}. This misalignment often occurs between two similar categories, and it is not easy to correct the misclassified
target samples caused by misalignment. Based on the SymNets baseline model and to avoid aligned samples falling into other categories, we introduce an adaptive Triplet loss for cross-domain matching.
As explained previously, similar semantic samples from the source and the target domains should be aligned regardless of the domain difference. However, the lack of available labels prevents the target domain from direct matching. To solve this issue, we apply the uncertainty $U_k$ for each target sample that is classified to the category $k$ (pseudo-label $k$). The higher the uncertainty, the larger margin should be introduced to make the discrimination clearer. The cross-domain matching loss objective is described more formally:
\begin{equation}
\begin{split}
& \mathcal{L}_{Triplet_{st}}(V^s, y^s, V^t, \hat{y}^t) \\
& = \frac{1}{B} \sum^B_{i=1} U_{i} \odot [\max_{y^s_i=\hat{y}^t_j}||V^s_i - V^t_j||^2 \\
& - \min_{y^s_i\neq \hat{y}^t_k}||V^s_i - V^t_k||^2
+ (\beta + U_{i})]_{+} \\
& + \frac{1}{B} \sum^B_{i=1} U_{i} \odot [\max_{\hat{y}^t_i=y^s_j}||V^t_i - V^s_j||^2
\\ & - \min_{\hat{y}^t_i\neq y^s_k}||V^t_i - V^s_k||^2 + (\beta + U_{i})]_{+},
\end{split}
\end{equation}
where $V^s$, $V^t$ are the feature from the source and the target domain, respectively. The cross-domain Triplet loss only performs on the pseudo-labels with good certainty and an adaptive margin.
\subsection{The Reinforced Attention Mechanism.}
As shown in Figure~\ref{fig:system}, we first model the attention weights generation process as a finite Markov Decision Process (MDP) and sample a discrete action using Multinomial Sampling. We pre-define $n$ action categories, i.e., $A=\{a_1, a_2, ..., a_n\}$, The state space contains the input region features and the attention weights generated so far, which are $s_t = \{ I^0, Att^{0}..., I^{t-1}, Att^{t-1}\}$. The policy is parametrised via a GRU model to explore the environment and sample the action. More formally:
\begin{equation}
\begin{split}
& h = GRU(I^t, h^{t-1}), \ \ t= 1,..., T \\
& a = Softmax(h^t\odot W_{\mu}^t), \\
& a_{Sample}^t = Multinomial(a), \\
& logprob_{a}^t = \log (a[a_{Sample}^t]), \\
\end{split}\label{mu}
\end{equation}
where $I^i$ is the $i_{th}$ region feature in the $I$, corresponding to Equation~\ref{eq:region}. $GRU$ is the Gated Recurrent Unit (GRU) used to model the attention weights generation problem as MDP. $W_{\mu}^i \in \mathcal{R}_{s \times n}$ are the weights that need to be learned. $Sample$ is the size of the feature vector.
After we obtain the attention weights, we perform element-wisely multiplication between the hidden features and the attention weights, expressed as follows:
\begin{equation}
\begin{split}
& {Attention_t} = \frac{exp(a_{Sample}^t)}{\sum_t^T exp( a_{Sample}^t)}, \\
& E = \sum_t^T (h_t \odot {Attention_t}),
\end{split}
\end{equation}
where $Attention_t, \ t\in[1;T]$ is the normalized attention weights, and $E$ is the final image embedding.
To be simple and efficient, we formulate the PG as an online learning method, specifically, the REINFORCE algorithm~\cite{williams1992simple}. The PG for the action space is then to maximise the long-term reward with the following expression:
\begin{equation}
\begin{split}
& \nabla_\theta J(\theta) = \\
& \mathbb{E}_{\tau \sim \pi_\theta(\tau)}
\left[ \left(\sum_{t=0}^{T} \nabla_\theta \log{\pi_\theta}(a_t \mid s_t)\right) \left(\sum_{t=0}^{T} r(s_t, a_t)\right)\right].
\end{split}\vspace{0.2cm}\label{pgmu}
\end{equation}
We use the one sample Monte-Carlo to approximate the accumulative reward, i.e., $\sum_{t=0}^{T} r(s_t, a_t) = \sum_{t=0}^{T} \mathcal{R}$, where $\mathcal{R}$ is the reward and will be defined later. Also, $\log{\pi_\theta}(a_t \mid s_t) = logprob_{a}^t$, which is obtained from Equation~\ref{mu}. Hence, Equation~\ref{pgmu} can lead to a PG loss function as follows:
\begin{equation}
\begin{split}
& \mathcal{L}_{PG} = - \sum_{i=1}^{B}
\left[ \left(\sum_{t=0}^{T} \nabla_\theta logprob_{a}^t \right) \left(\sum_{t=0}^{T} \mathcal{R}_i \right)\right],
\end{split}\label{loss_discrete}
\end{equation}
where the reward function $\mathcal{R}$ is defined as follows:
\begin{equation}
\mathcal{R}_i = C_{t} \odot (AP_i (V_i^s, V_k^t) + AP_i(V_i^t, V_k^s)).
\end{equation}
\subsection{Adversarial Domain Alignment}
We include the domain adversarial loss~\cite{ganin2015unsupervised} to align the source and target domain, which is expressed as $\mathcal{L}_{adv}$:
\begin{equation}
\begin{split}
& DC = {Grad\_Reverse}(Dis(V)),
\\ & V = F+E, \\
& \mathcal{L}_{adv} = BCE(DC, Domain\_label),
\end{split}
\end{equation}
where the final embedding $V$ is an addition fusion with the original feature $F$ and $E$, and $Grad\_Reverse$ (as shown in Figure~\ref{fig:system}) is a gradient reversal layer to make the feature in-discriminating on the domain differences. $BCE$ is the binary cross-entropy loss for binary classification.
\subsection{The Overall Losses.}
The overall loss function contains several parts:
\begin{equation}
\begin{split}
Loss = \mathcal{L}_{SymNets} + \alpha*( \mathcal{L}_{PG} + \mathcal{L}_{Triplet_{st}} + \mathcal{L}_{adv}),
\end{split}
\end{equation}
where $\mathcal{L}_{SymNets}$ is the SymNets baseline's optimization loss functions, $\mathcal{L}_{PG}$ is the policy gradient loss for reinforced attention, $\mathcal{L}_{Triplet_{st}}$ is the inter-domain adaptive Triplet loss, and $\alpha$ controls the contribution of the proposed methods.
\section{Experiments}
In this section, we first introduce the datasets used, followed by implementation details, then the numerical results, and last we present the qualitative evaluation.
\subsection{Datasets}
We perform our experimental evaluation and report results on a mix of standard unsupervised domain adaptation benchmark datasets.
\subsubsection{Office-31} Office-31~\cite{saenko2010adapting} is a widely-applied dataset for real-world unsupervised domain adaptation. It contains 4,110 images, spanning 31 categories in three domains: Amazon (A), Webcam (W) and DSLR (D).
\subsubsection{Office-Home} Office-Home is an image classification benchmark dataset~\cite{venkateswara2017deep}, which contains categories of objects found in office and home environments, with 4 domains: The real world (Rw), Clipart (Cl), Product (Pr), and Art (Ar). We report the performance of our model on this dataset. In addition, we perform ablation studies of our method and compare it with State-of-the-arts methods on four domain adaptation tasks.
\subsubsection{DomainNet} DomainNet is a large unsupervised domain adaptation benchmark, containing 0.6 million images belonging to 6 domains, with 345 categories. Due to labelling noise presenting in its full version, we instead use the subset proposed in Tan et al.~\cite{tan2020class}, which applies 40-commonly seen classes for four domains: Real ({R}), Clipart ({C}), Painting ({P}) and Sketch ({S}).
\subsubsection{VisDa-2017} The VisDa-2017~\cite{peng2017visda} dataset is the largest synthetic-to-real object classification dataset with over 280k images in the training, validation and testing splits. All three splits share the
same 12 object categories. The training domain consists of 152k synthetic images, which are generated by rendering 3D models of the same object categories from different angles and under different lighting conditions. The validation domain includes 55k images by cropping an object in real images
from COCO~\cite{lin2014microsoft}. The testing domain contains 72k images cropped from video frames in YT-BB~\cite{real2017youtube}.
\begin{table*}[!t]\caption{The results on the Office-31 dataset.}
\centering
\renewcommand\arraystretch{1.25}{
\resizebox{\linewidth}{!}{
\begin{tabular}[htb]{llllllll}
\toprule
Methods & A$\rightarrow$ W & D$\rightarrow$ W & W$\rightarrow$ D & A$\rightarrow$ D & D$\rightarrow$ A & W$\rightarrow$ A & Avg \\
\midrule
ResNet-50~\cite{he2016deep}& 68.4±0.2 &96.7±0.1 &99.3±0.1& 68.9±0.2 & 62.5±0.3 &60.7±0.3 &76.1 \\
DANN~\cite{ganin2015unsupervised} &82.0±0.4 &96.9±0.2 &99.1±0.1& 79.7±0.4& 68.2±0.4& 67.4±0.5 &82.2 \\
ADDA~\cite{tzeng2017adversarial} &86.2±0.5 &96.2±0.3 &98.4±0.3& 77.8±0.3 &69.5±0.4 &68.9±0.5 &82.9 \\
JAN-A~\cite{long2017deep} & 86.0±0.4 &96.7±0.3 &99.7±0.1& 85.1±0.4 &69.2±0.3& 70.7±0.5 &84.6 \\
MADA~\cite{pei2018multi} &90.0±0.1 &97.4±0.1 &99.6±0.1& 87.8±0.2 &70.3±0.3 &66.4±0.3 &85.2 \\
Kang et al.~\cite{kang2018deep} & 86.8±0.2 &99.3±0.1 &100.0±.0 &88.8±0.4 &74.3±0.2& 73.9±0.2 &87.2 \\
CDAN+E~\cite{long2017conditional} &94.1±0.1 &98.6±0.1 &100.0±.0 &92.9±0.2 &71.0±0.3& 69.3±0.3 & 87.7 \\
SymNets~\cite{zhang2020unsupervised}
&90.8±0.1 &98.8±0.3 &100.0±.0 & 93.9±0.5 &{74.6±0.6}& 72.5±0.5 &88.4 \\
\textbf{\textit{AdaTriplet-RA}} (Ours) & \textbf{93.0±0.3} & \textbf{99.2±0.3} & \textbf{100.0±.0} & \textbf{95.2±0.4} & \textbf{75.0±0.2} & \textbf{74.1±0.5} & \textbf{89.4}\\
\bottomrule
\end{tabular}\label{office_31}
}}
\end{table*}
\begin{table*}[!t]\Huge \caption{The results on the Office-Home dataset.}
\centering
\resizebox{\linewidth}{!}{
\begin{tabular}[htb]{lllllllllllllll}
\toprule
Methods & Ar$\rightarrow$ CI & Ar$\rightarrow$ Pr & Ar$\rightarrow$ Rw & CI$\rightarrow$ Ar & CI$\rightarrow$ Pr & CI$\rightarrow$ Rw & Pr$\rightarrow$ Ar & Pr$\rightarrow$ CI & Pr$\rightarrow$ Rw & Rw$\rightarrow$ Ar & Rw$\rightarrow$ CI & Rw$\rightarrow$ Pr & Avg \\
\midrule
ResNet-50~\cite{he2016deep}& 34.9& 50.0& 58.0 &37.4 &41.9& 46.2 &38.5 &31.2& 60.4& 53.9 &41.2 &59.9 & 46.1 \\
DAN~\cite{long2015learning} &43.6 &57.0 &67.9 &45.8 & 56.5 &60.4 &44.0 &43.6 &67.7& 63.1& 51.5& 74.3& 56.3 \\
DANN~\cite{ganin2015unsupervised} & 45.6 &59.3 &70.1 &47.0 &58.5& 60.9& 46.1& 43.7 &68.5 &63.2 &51.8 &76.8 &57.6 \\
CDAN+E~\cite{long2017conditional} & \textbf{50.7} &70.6 &76.0 &57.6 &70.0 &70.0 &57.4& 50.9 &77.3 &70.9 & \textbf{56.7} &81.6 &65.8 \\
SymNets~\cite{zhang2020unsupervised} & 47.7 & 72.9& 78.5& 64.2 & 71.3 &74.2 &{64.2}& {48.8}&79.5& {74.5} &52.6& 82.7& 67.6 \\
\textbf{\textit{AdaTriplet-RA}} (Ours) & {49.3} & \textbf{75.8} & \textbf{80.4}& \textbf{67.3}& \textbf{73.7}& \textbf{75.8} & \textbf{65.6} & \textbf{50.1}& \textbf{81.0} & \textbf{75.3}& {54.3}& \textbf{83.1}& \textbf{69.3} \\
\bottomrule
\end{tabular}}\label{office_home}
\end{table*}
\begin{table*}[!t]\Huge \caption{The results on the DomainNet dataset.}
\centering
\resizebox{\linewidth}{!}{
\begin{tabular}[htb]{lllllllllllllll}
\toprule
Methods & R $\rightarrow$ C &R$\rightarrow$ P& R$\rightarrow$ S& C$\rightarrow$ R& C$\rightarrow$ P& C$\rightarrow$ S& P$\rightarrow$R &P$\rightarrow$ C &P$\rightarrow$ S &S$\rightarrow$ R &S$\rightarrow$C &S$\rightarrow$ P &AVG \\
\midrule
ResNet-50~\cite{he2016deep} & 65.75 &68.84 &59.15 &77.71 &60.60 &57.87& 84.45 & 62.35 &65.07 &77.10 &63.00 &59.72 &66.80 \\
BBSE~\cite{lipton2018detecting} & 55.38 &63.62 &47.44& 64.58& 42.18& 42.36 &81.55 &49.04 &54.10& 68.54 &48.19 &46.07& 55.25 \\
PADA~\cite{cao2018partial} &65.91 &67.13& 58.43& 74.69 &53.09 &52.86 &79.84 &59.33 &57.87& 76.52 &66.97& 61.08 &64.48 \\
MCD~\cite{saito2018maximum} &61.97 &69.33 &56.26 &79.78 &56.61 &53.66 &83.38& 58.31& 60.98& 81.74&56.27 &66.78 &65.42 \\
DAN~\cite{long2015learning} &64.36 &70.65 &58.44 &79.44 &56.78 &60.05 &84.56 &61.62 &62.21& 79.69 &65.01 &62.04& 67.07 \\
F-DANN~\cite{wu2019domain} &66.15 &71.80& 61.53& 81.85& 60.06& 61.22& 84.46& 66.81& 62.84& 81.38 &69.62 &66.50 &69.52 \\
UAN~\cite{you2019universal} &71.10 &68.90 &67.10 &83.15 &63.30 &64.66 &83.95 &65.35 &67.06 & 82.22 &70.64 &68.09 &72.05 \\
JAN~\cite{long2017deep} &65.57 &73.58& 67.61 &85.02 &64.96 &67.17 &87.06 &67.92 &66.10& 84.54 &72.77& 67.51& 72.48 \\
ETN~\cite{cao2019learning} &69.22& 72.14 &63.63 &86.54& 65.33 &63.34 &85.04 &65.69 &68.78 &84.93 &72.17& 68.99& 73.99 \\
BSP~\cite{chen2019transferability} &67.29 &73.47& 69.31 &86.50 &67.52 &70.90 &86.83 &70.33 &68.75 &84.34 &72.40 &71.47 &74.09 \\
DANN~\cite{ganin2015unsupervised} &63.37 &73.56 &72.63 &86.47& 65.73 &70.58& 86.94 &73.19 &70.15& 85.73& 75.16& 70.04& 74.46 \\
COAL~\cite{tan2020class} &73.85 &75.37 &70.50 &89.63 &69.98 &71.29 &89.81 &68.01 &70.49& 87.97 &73.21 &70.53 &75.89 \\
InstaPBM~\cite{li2020rethinking} &80.10 &75.87 &70.84 &89.67 &70.21 &72.76 &89.60 &74.41 &72.19 &87.00 &79.66 &71.75 &77.84 \\
ISFDA~\cite{li2021imbalanced} &\textbf{81.52} &77.29 &\textbf{73.55} &90.09 &75.11 &\textbf{74.78} &89.57 &\textbf{76.70} &\textbf{76.07} &{87.55} &\textbf{79.70} &73.13 &79.58 \\
SymNets~\cite{zhang2020unsupervised} & 79.17 & 81.54 & 68.61 & 86.43 & 74.37 & 67.53 & 82.65 & 64.74 & 70.11 & 84.18 & 77.07 & 77.70 & 76.18 \\
\textbf{\textit{AdaTriplet-RA}} (Ours) & 79.69 & \textbf{82.79} &73.34 & \textbf{91.14} & \textbf{78.45} & 74.28 & \textbf{89.96} & 74.45 & 74.70 & \textbf{88.22} & {78.04} & \textbf{79.74} & \textbf{80.40}\\
\bottomrule
\end{tabular}}\label{DomainNet}
\end{table*}
\begin{table*}[!t]\Huge \caption{The results on the VisDa-2017 Test dataset (ResNet-101).}
\centering
\resizebox{\linewidth}{!}{
\begin{tabular}[htb]{llllllllllllll}
\toprule
Methods & plane &bcycl &bus &car& horse &knife &mcycl &person& plant& sktbrd &train & truck& Avg \\
\midrule
ResNet-101~\cite{he2016deep} & 67.7 &36.6 &48.4 &68.2& 76.9& 5.3& 65.8 &38.0& 72.5 &29.1 &82.1 &3.73 &49.5 \\
DANN~\cite{ganin2015unsupervised} & 87.1 &63.0& 76.5& 42.0 &90.3 &42.9 &85.9 &53.1 &49.7 &36.3 &85.8 &20.7 &61.1 \\
DAN~\cite{long2015learning} & 81.9 &77.7 &82.8 &44.3 &81.2 &29.5 &65.1 &28.6 &51.9 &54.6 &82.8 &7.8 &57.4 \\
JAN-A~\cite{long2017deep} &75.7& 18.7 &82.3 &86.3 &70.2 &56.9 &80.5 &53.8 &92.5 &32.2 &84.5 &54.5 & 65.7 \\
MCD~\cite{saito2018maximum} &87.0 &60.9 &83.7 &64.0 &88.9 &79.6 &84.7 &76.9 &88.6 &40.3 &83.0 &25.8 &71.9 \\
ADR~\cite{saito2018adversarial} &87.8 &79.5 &83.7 &65.3 &92.3 &61.8 &88.9 &73.2 &87.8 &60.0 &85.5 &32.3 &74.8 \\
BSP~\cite{chen2019transferability} &92.4 &61.0 &81.0 &57.5 &89.0 &80.6 &90.1 &77.0 &84.2 &77.9 &82.1 &38.4 &75.9 \\
SWD~\cite{lee2019sliced} &90.8 &82.5 &81.7 &70.5 &91.7 &69.5 &86.3 &77.5 &87.4 &63.6 &85.6 &29.2 &76.4 \\
DADA~\cite{tang2020discriminative} &92.9 &74.2 &82.5 &65.0 &90.9 &93.8 &87.2 &74.2 &89.9 &71.5 &86.5 &48.7 &79.8 \\
IterLNL~\cite{zhang2021unsupervised} &89.0 &79.5 &84.3 &81.0 &87.7 &88.1 &\textbf{92.5} &38.7 &87.1 &\textbf{96.9} &78.8 &67.0 &80.9 \\
STAR~\cite{lu2020stochastic} &95.0 &84.0 &84.6 &73.0 &91.6 &91.8 &85.9 &78.4 &94.4 &84.7 &87.0 &42.2 &82.7 \\
SE~\cite{french2018self} &\textbf{95.9} &\textbf{87.4} &85.2& 58.6& \textbf{96.2}& \textbf{95.7} &90.6& 80.0 &\textbf{94.8} & 90.8& 88.4& 47.9 & 84.3 \\
SymNets~\cite{zhang2020unsupervised} & 89.8 & 39.1 &82.8 &92.7 &79.0 &18.3 &81.9 & \textbf{89.1} &91.8 &23.7 &91.6 &76.2 &75.6 \\
\textbf{\textit{AdaTriplet-RA}} (Ours) &92.3& 62.9& \textbf{87.2} & \textbf{94.5} &85.6 &73.1 & 88.2 & 83.4& 93.8& 83.6 &\textbf{93.0}& \textbf{77.0}& \textbf{85.3}\\
\bottomrule
\end{tabular}}\label{visda2017}
\end{table*}
\begin{table}[!t]\Huge \caption{The results on the VisDa-2017 Test dataset (ResNet-50).}
\centering
\resizebox{0.35\linewidth}{!}{
\begin{tabular}[htb]{llllllllllllll}
\toprule
Methods & Avg \\
\midrule
ResNet-50~\cite{he2016deep} &40.2 \\
DAT~\cite{ganin2016domain} &63.7 \\
GTA~\cite{sankaranarayanan2018generate} &69.5 \\
MCD~\cite{saito2018maximum} &69.2 \\
CDAN~\cite{long2018conditional} &70.0 \\
DEC~\cite{zhu2021source} &73.3 \\
CAMCD~\cite{azzam2021unsupervised} &73.6 \\
SymNets~\cite{zhang2020unsupervised} &70.8 \\
\textbf{\textit{AdaTriplet-RA}} (Ours) & \textbf{77.0}\\
\bottomrule
\end{tabular}
}
\label{visda2017_50}
\end{table}
\begin{table*}[!t]\caption{Ablation study on the Office-Home dataset.}
\centering
\resizebox{\linewidth}{!}{
\renewcommand\arraystretch{1.25}
\begin{tabular}[htb]{llllllll}
\toprule
Methods & Ar$\rightarrow$ CI & Ar$\rightarrow$ Pr & Ar$\rightarrow$ Rw & CI$\rightarrow$ Ar & Avg\\
\midrule
Baseline (SymNets) & 45.48 & 72.70 & 78.29 & 63.91 & 65.10\\
Ours + Adv. & 46.87 & 74.13 & 78.74 & 66.13 & 66.47 \\
Ours + Adv. + RA & 47.90 & 73.37 & 79.10 & 66.34 & 66.68 \\
Ours + Adv. + RA + Triplet ($k$=20) & 47.12 & 74.75 & 79.02 & 66.70 & 66.90 \\
Ours + Adv. + RA + Triplet ($k$=1) & 46.53 & 74.43 & 78.79 & 65.43 & 66.30 \\
Ours + Adv. + RA + Triplet ($k$=10) & 47.33 & 74.66 & 79.27 & 65.76 & 66.80 \\
\hline
Ours w/ $\alpha$=1 & 46.53 & 74.29 & 78.54 & 65.76 & 66.28 \\
Ours w/ $\alpha$=5 & 47.63 & 74.79 & 79.00 & 66.87 & 67.07 \\
Ours w/ $\alpha$=10 & 48.18 & 75.11 & 79.18 & 67.04 & 67.38 \\
\hline
Ours w/ H=1 & 48.18 & 75.11 & 79.18 & 67.04 & 67.38 \\
Ours w/ H=2 & 48.43 & 75.13 & 79.92 & 67.04 & 67.63\\
Ours w/ H=4 & 48.50 & 75.44 & 80.10 & 67.28 & 67.83\\
Ours w/ H=8 & 49.26 & 75.76 & 80.35 & 67.28 & 68.16\\
Ours w/ H=16 & 48.01 & 75.40 & 79.57 & 67.12 & 67.53\\
\hline
Ours + Adv. + RA + Triplet (Adp. $k$) & \textbf{49.26} & \textbf{75.76} & \textbf{80.35} & \textbf{67.28} & \textbf{68.16} \\
\bottomrule
\end{tabular}\label{ablation}
}
\end{table*}
\begin{figure*}
\centering
{
\includegraphics[width=0.85\linewidth, height=4cm]{pics/vis_new.png}
}
\caption{t-SNE Visualisation of different methods on Office-31 dataset A$\rightarrow$W task, our method has a better clustering quality than other approaches in ablation studies.}
\label{fig:vis}
\end{figure*}
\begin{figure}
\centering
\includegraphics[width= \linewidth]{pics/vis_certainty.png}
\caption{Visualisation of the certainty values in the training process on the Office-Home dataset, in which we can see the certainty increases as the training is performed, indicating more reliable pseudo-labels will be selected. }
\label{fig:certainty}
\end{figure}
\begin{figure}
\centering
\includegraphics[width= 0.8\linewidth]{pics/ab_accuracy.png}
\caption{{Visualization of the convergence situation on the Office-Home dataset. Our method has a better convergence speed and higher results than the baseline. We also observe that the combination of "Triplet" and "RA" can boost the final convergence speed and accuracy results. }}
\label{fig:ab_accuracy}
\end{figure}
\begin{figure*}
\centering
\includegraphics[width=\linewidth]{pics/vis_topk.png}
\caption{Accumulated histogram visualization for the $k$ values in the Office-Home dataset. The green colour indicates the $k$ distribution of the initial 150 iterations of the training, while the red corresponds to the last 150 iterations of the training stage. The deep green colour is the overlap of the two distributions. We observe that the red colour has a bigger coverage in the distribution, indicating that the later stage selects more reliable pseudo-labels, given the comparison of the $k$ distributions.
}
\label{fig:topk}
\end{figure*}
\begin{figure*}
\centering
\includegraphics[width=\linewidth]{pics/vis_sim.png}
\caption{Visualisation of some examples for the inter-domain sample matching effect. Based on the cosine similarity metric, we select the top-5 samples from the target domain given the source anchor image. In the early training stage, the anchor in the source domain hardly finds a match in the target domain, while the sample matching is significantly improved in the later training stage. This phenomenon indicates that the training for sample matching is successful. }
\label{fig:matching}
\end{figure*}
\subsection{Implementation Details}
\paragraph{Baseline model}
We build our model based on the ResNet-50 backbone Network~\cite{he2016deep}. We apply the SymNets training method as our baseline model~\cite{zhang2019domain}. All the training losses from the initial SymNets are kept in our training, but our model's network and training configurations are different.
\paragraph{Configurations}
Our model can be trained in a single Nvidia Geforce 2080-TI with an 11GB memory GPU card. The configurations are as follows:
\begin{itemize}
\item For both the source and target domain, the batch size is 32. In the Triplet loss and reinforced attention, we set the hyper-parameter $\beta$ as 0.5.
\item The dimension of the input and hidden features of the GRU and the embedding dimension of the adaptive Triplet loss are all 2048. The dimension of the input, hidden features of the GRU, and the embedding dimension of the adaptive Triplet loss are also 2048.
\item We use the SGD optimiser, the early stopping technique, to select the best model checkpoint. All the SGD optimisers are with Nesterov momentum and weight decay of 5e-4. On all the datasets, we apply a learning rate of 1e-3 to train the model with 80 epochs.
\item On both the large-scale DomainNet and VisDa-2017 datasets, we have a certain number of fixed layers during training (the first convolutional block in the ResNet).
\end{itemize}
\paragraph{Environment}
Our model is built on PyTorch-1.10 platform~\cite{paszke2017automatic}. We conducted all our experiments on a PC equipped with Nvidia Geforce 2080-TI GPU and installed Windows-10 and a CUDA, Cudnn, from the Nividia toolkit.
\subsection{Comparison with State-of-the-art Methods}
We compare our methods with other State-of-the-art approaches on all three datasets. The comparison of the Office-31 dataset is shown in Table~\ref{office_31}. Our average accuracy surpasses all the previous results. Specifically, we lead the SymNets~\cite{zhang2020unsupervised} method by 1.0\% in average precision of all six domain transfer tasks. Note that our model is just as efficient as SymNets though we introduce extra modules. Even with a limited computing resource and a smaller batch size (32) (compared with 128 in SymNets implementations), we improve the results of current State-of-the-art methods. The comparison of Office-Home is presented in Table~\ref{office_home}, where a similar phenomenon is observed. Our model's average accuracy in the Office-Home dataset leads to State-of-the-art by 1.7\%. The subsequent ablation studies also prove the improving impact of each ingredient of the proposed model.
The results of DomainNet are illustrated in Table~\ref{DomainNet}.
Our method outperforms SymNets~\cite{zhang2020unsupervised} in all twelve domain transition tasks and increases SymNets' average accuracy by 4.22\%.
Interestingly, the distribution of our accuracy results regarding the different domain adaptation tasks is quite different from the existing State-of-the-art method, i.e., ISFDA~\cite{li2021imbalanced}, which has a similar intuition of selective optimisation. ISFDA~\cite{li2021imbalanced} mainly performs the selection based on class balance whilst we propose a novel way of certainty measurement. Our method tends to perform better or close in most domain adaptation tasks, with 80.40\% average accuracy.
We report the results of our methods on the VisDa-2017 test set in Table~\ref{visda2017} and Table~\ref{visda2017_50}. In addition, we report the results on the ResNet-50 and ResNet-101 backbone networks. Our methods improve the SymNets baseline, by a large margin, with both backbone networks. Especially our methods based on ResNet-101 won the SE~\cite{french2018self}, which is the champion in the VisDa-2017 challenge, significantly. Interestingly, our methods with the ResNet-50 backbone surpass most methods with the ResNet-101 backbone network (including the SymNets baseline), which validates the superiority of the proposed sample matching scheme.
\subsection{Comparative Study on Each Block of the Model}
\subsubsection{Baseline} As shown in Table~\ref{ablation}, the baseline model which utilizes the same optimization techniques with SymNets~\cite{zhang2020unsupervised} yields poor accuracy results, even worse than the original results~\cite{zhang2020unsupervised}. Our implementation has a smaller batch size (32 versus 128 in~\cite{zhang2020unsupervised}).
\subsubsection{Adversarial training.} From Table~\ref{ablation}, the ``Ours + Adv." is the model with adversarial training. When adding the adversarial domain confusion, the accuracy increases, which validates the effectiveness of the adversarial domain confusion.
\subsubsection{Reinforced attention} From Table~\ref{ablation}, the ``Ours + Adv. + RA." is the model with reinforced attention. When adding the reinforced attention, the accuracy increases, which validates the effectiveness of the attention mechanism.
\subsubsection{Triplet Loss} As shown in Table~\ref{ablation}, the scheme ``Ours + Adv. + RA + Triplet ($k$=20)" has an obvious positive impact over the baseline, which validates the effectiveness of the domain matching scheme.
\subsubsection{Fixed value of $k$} Both the accuracy of a single task and the average accuracy are improved as $k$ increasing. $k$ is a critical parameter in this model, as it controls the pseudo labels certainty threshold, impacting the Triplet loss performance and reinforced attention. Although with great significance, $k$ is not extremely sensitive, with a small performance alternation on different values. Still, note that a tiny value of $k$ quickly loses functional pseudo-labels and thus deteriorates the model to a scheme similar to "Ours + Adv. + RA".
\subsubsection{Adaptive $k$}{To avoid the manual tuning of the $k$ hyper-parameter, the adaptive $k$ is with critical significance. One does not need to hand-tune the $k$ parameter, which expands the application scope and efficiency of the model; In addition, the adaptive $k$ can bring a significant performance gain, as shown in "Ours + Adv. + RA + Triplet (Adp. $k$)", yielding the best results in the ablation study.}
\subsubsection{ The value of $H$}
{We report the comparative study of the number $H$, i.e., the channel splitting factor. A suitable $H$ is good for finding fine-grained features which benefit the overall performance.
From Table \ref{ablation}, we observe that the $H$ is not a very sensitive hyper-parameter for the final performance. 8 for $H$ yields the best results.}
\subsubsection{Coefficient $\alpha$} The coefficient $\alpha$ is critical in maintaining good performance. A reasonably larger $\alpha$ increases the proportion of adaptive Triplet loss and reinforced attention in the model's training, which helped produce a better performance of adaptive Triplet loss and reinforced attention.
\subsection{Visualization}
\subsubsection{Cluster visualization}
We perform t-SNE Visualisation for different methods on Office-31 "Amazon-to-Webcam" (A$\rightarrow$W) task, illustrated in Figure~\ref{fig:vis}. On Office-31 "Amazon-to-Webcam" (A$\rightarrow$W) task, there are 2817 data in the Amazon domain as source data and 795 data in the Webcam domain as target data. Ours can have a better visualisation result than the baseline and other methods in ablation studies. Especially the scheme "w/o Adaptive Triplet" has better visualisation results than the baseline. The scheme "w/o Reinforced Attention" is better in cluster visualisation. Our full model has the best visualisation quality, which matches the numerical results.
\subsubsection{Certainty and $k$ visualization}
To see the change of certainty value and $k$ during the training stage and validate our adaptive scheme, we visualise the certainty value during training as shown in Figure \ref{fig:certainty} and the distribution of $k$ in Figure \ref{fig:topk}. The certainty value gradually increases as the training is performed, which shows that the model tends to become more confident on the pseudo-labels in the target domain. We apply a cumulative histogram to visualise the distribution of the $k$ values. We compare the $k$ value's distribution between the initial 150 iterations and the last 150 iterations of the training process. As shown in Figure \ref{fig:topk}, the model tends to select more large value $k$ in the later stage of the training (the red colour covers more area than the green colour), indicating the increasing robustness.
{\subsubsection{Convergence speed visualization}
To visualise the convergence speed, we plot the training accuracy in each epoch, as shown in Figure~\ref{fig:ab_accuracy}. As the training epoch increases, our model tends to have a higher convergence speed than the baseline. The positive impact of the Triplet training and the reinforced attention upon convergence is also validated.}
\subsubsection{Matching performance visualization}
To see the change in the inter-domain sample matching performance during the training, we visualise the matched sample by selecting the Top-5 similar examples via cosine similarity, as shown in Figure \ref{fig:matching}. We compare the matching performance between the early stage of the training and the trained model. It is clear from the figure that the trained model tends to select more correct samples from the target domain, which illustrates that our method improves the inter-domain sample matching performance.
\section{Conclusions}
This paper improves the unsupervised domain adaptation through a novel perspective, i.e., improving the sample-level discriminating capability. To this end, we propose an uncertainty-aware inter-domain sample matching scheme. We utilise an uncertainty-aware adaptive Triplet loss and reinforced attention to fulfil the domain match. This novel perspective and the corresponding technical solutions effectively improve the unsupervised domain adaptation task. {In addition, the proposed modules, such as the trainable adaptive Topk module, the adaptive Triplet loss, and the Reinforced attention, are all model-agnostic, which can easily be plugged and applied in many other applications.} Comprehensive experiments validate the effectiveness of the uncertainty-aware domain match, with State-of-the-art results achieved on several publicly available benchmark datasets.
\section{Acknowledgments}
This work is supported by the National Natural Science Foundation of China (62106110, U20B2061), the Natural Science Foundation of Jiangsu (BK20210646) and the Research Innovation Program for College Graduates of Jiangsu (KYCX22\_1209).
\bibliographystyle{elsarticle-num}
|
1,314,259,995,008 | arxiv | \section{Introduction}\label{sec:intro}
New measurement technologies like single cell RNA-sequencing (scRNA-seq)~\cite{Klein20151187,Macosko20151202} and scATAC-seq~\cite{Buenrostro2015} are revolutionizing the biological sciences. It is now possible to capture high-dimensional measurements of cell states for large populations of cells. One of the most exciting prospects associated with this new trove of data is the possibility of studying temporal processes such as differentiation and development: if we could analyze the trajectories cells traverse over time, we might understand how cell types emerge and are stabilized, and how they destabilize with age or in diseases such as cancer. We might also understand perturbations to developmental processes, e.g. how crop growth might respond to climate change.
Current measurement technologies, however, cannot directly measure trajectories of cellular differentiation because the observation process is destructive, necessarily killing the cells of interest.
With this motivation, the bioinformatics community has rushed to develop methods to infer trajectories from independent samples collected at various time-points along a developmental progression~\cite{trapnell2014, farrell2018, wolf2019, schiebinger2019,weinreb2018}.
Essentially all trajectory inference methods are based (either explicitly or implicitly) on the hypothesis that trajectories of cellular development are governed by a potential energy surface (i.e. Waddington's ``epigenetic landscape'' \cite{waddington1957, ferrell2012}).
Different methods attempt to recover different features of Waddington's landscape. Several widely used methods attempt to recover a graph summarizing the paths along the valley floors~\cite{trapnell2014, wolf2019, farrell2018,setty2016}, while others attempt to recover the potential directly~\cite{hashimoto2016,yeo2020}.
However, the optimization procedures in these approaches are non-convex and therefore they are not guaranteed to recover correct results, even in idealized settings where the data is generated from a synthetic model.
A growing body of work focuses instead on recovering trajectories through the transition kernels of Markov stochastic processes~\cite{weinreb2018, schiebinger2019}.
Our main contribution is to show that inference of the trajectories of a stochastic process driven by diffusion and drift is deeply linked to optimal transport (OT), a classical mathematical tool~\cite{monge1781,kantorovich1942,villani2008}, recently adapted for inferring cellular trajectories~\cite{schiebinger2019}.
We leverage this connection to develop rigorous theoretical guarantees and a computationally efficient methodology based on OT for regression in the space of stochastic processes.
Our paper is organized as follows: an overview of single cell measurement technologies is presented in Section~\ref{sec:measurement}.
In Section~\ref{sec:drift_diff_proc} we formulate the trajectory inference problem in terms of recovering the law on paths of a stochastic differential equation (SDE).
We then introduce our main results in Sections~\ref{sec:theory_no_growth} -- \ref{sec:methodology}.
Our main theoretical result (Theorem~\ref{th:linkOTSDE_noisy}) states that, in the limit of infinite data, the solution to a certain convex optimization problem recovers the ground truth law on paths of the SDE.
We defer the proof of this result in order to first introduce a concrete version of this optimization problem that can be solved efficiently to recover trajectories in practice (Section~\ref{sec:methodology}). This forms the basis of our computational method named Global Waddington-OT (gWOT). A brief summary of related work on trajectory inference is also presented in Section~\ref{sec:related_work}.
We then give the proofs of our theoretical results in Section~\ref{sec:theory_details}, and finally illustrate our methodology in practice with both simulated and biological data in Section~\ref{sec:numerical_results}. Finally, we conclude with a discussion in Section~\ref{sec:discussion}.
\subsection{An introduction to single cell measurement technologies}
\label{sec:measurement}
The human body is composed of roughly 20 trillion cells.
While all of these cells share essentially the same DNA, different types of cells perform vastly different functions.
This diversity is even present within individual tissues; for example, there are hundreds of distinct cell types in the brain including supportive cells like astrocytes and glia in addition to neurons, which can be further divided into subtypes such as excitatory and inhibitory.
Classical efforts to describe this diversity of cells have relied on dissecting tissues and attempting to sort out pure sub-populations according to a handful of surface markers. The state of these subpopulations could then be quantified, e.g. by measuring gene expression levels through sequencing RNA transcripts, by examining methylation patterns of DNA~\cite{methyl}, or its three dimensional structure~\cite{HiC}.
In contrast, {\em single-cell measurement technologies} quantify the states of individual cells without first sorting out specific sub-populations. This provides an unbiased sample of cell states, without requiring prior knowledge of markers defining specific sub-populations.
In order for a tissue to be profiled using a single-cell measurement technique, the strong connections between cells must be broken to form a suspension of cells in liquid. Individual cells are then isolated in microfluidic droplets surrounded by oil. Information about cellular state is then encoded in artificial DNA segments which are then sequenced. For example, in scRNA-seq, RNA transcripts are captured and converted to DNA through reverse transcription, whereas scATAC-seq works by ``attacking''~the genome with an enzyme that cuts out small segments of DNA. Sequencing these segments gives an idea of which parts of the genome are accessible to enzymes, and which parts are de-activated.
These technologies produce high-dimensional measurements of cell state. For example, scRNA-seq produces a $\sim\!\!20,000$ dimensional gene expression vector for each cell, the $i$th coordinate of which encodes the number of molecules of RNA captured for the $i$th gene.
Fuelled by the exponential decrease in the cost of sequencing, the throughput of single cell measurement technologies has rapidly increased over the past few years. As a result, it is becoming routine to collect hundreds of thousands or even millions of cells in a single study.
While some efforts have focused on cataloging the cell types that exist and applying clustering algorithms to identify new cell types~\cite{HCA}, some of the most interesting challenges relate to analyzing dynamical processes, where multiple cell types emerge from a stem cell progenitor.
However, because the measurement process involves grinding up the tissue and isolating individual cells, it is not possible to directly observe the trajectories cells trace out as they differentiate.
{The goal of trajectory inference is to recover these trajectories from static snapshots captured with single cell measurement technologies.}
\subsection{A generative model for developmental trajectories}
\label{sec:drift_diff_proc}
We model cells as evolving and proliferating in a high-dimensional space of cell states (e.g. gene expression space), a representation of which we take to be $\mathcal{X} = \mathbb{R}^d$ with $d$ potentially large. The mathematical description we adopt is a drift-diffusion process with branching: the evolution of any cell over an infinitesimal time interval $\diff t$ is governed by the SDE
\begin{align}
\diff X_t = \bvec{v}(t,X_t) \diff t + \sigma \diff B_t, \label{eq:diffusion_drift_sde}
\end{align}
where $X_t\in \mathcal{X}$ denotes the state of the cell at time $t$ and $\diff B_t$ is the increment of a $d$-dimensional Wiener process and $\sigma^2$ is the diffusion coefficient.
As $t$ varies, $X_t$ describes a path, or {\em trajectory}, through $\mathcal{X}$, and the SDE~\eqref{eq:diffusion_drift_sde} induces a probability law on such trajectories. Our goal will be to recover this law on trajectories from data (see Section~\ref{sec:inference_problem}).
To model the birth and death of cells, we employ the following classical branching mechanism: each cell is equipped with an exponential clock of rate $\tau^{-1}$. When the clock rings, the cell dies with probability $p_d$, or splits into two cells with probability $p_b = 1 - p_d$. We allow $\tau, p_d, p_b$ to vary in both space and time. \emph{A priori}, they may also depend on the position of other cells. We provide a conceptual illustration of particle trajectories from this branching process in Figure \ref{fig:branching_cartoon} for the case of potential driven dynamics, i.e. where $\bvec{v} = -\nabla \Psi$.
A population of cells is modeled as a probability distribution on $\mathcal{X}$. In the limit of a very large number of cells, we assume this distribution has a density $\rho_t(x)$ at time $t$ and position $x \in \mathcal{X}$. This density solves the following partial differential equation:
\begin{align}
\frac{\partial \rho}{\partial t} = - \mathrm{div}( \rho \bvec{v} ) + \frac{\sigma^2}{2} \Delta \rho + J \rho \label{eq:diffusion_drift_branching_pde}
\end{align}
where the three terms on the right correspond respectively to the effects of drift, diffusion and growth. Here $J : \mathbb{R}^d \to \mathbb{R}$ describes the average growth rate (with $J > 0$ if cells are dividing and $J < 0$ if cells are dying) and is linked to the microscopic parameters by $J = \tau^{-1} ( p_b - p_d )$.
With the additional assumption that the drift $\bvec{v}$ is the gradient of a potential, this PDE is exactly the one used in \cite{weinreb2018}.
Though this model is simple and nicely fits in the mathematical theory we develop later, there exist more accurate descriptions of gene expression dynamics that are derived from first principles of chemical systems \cite{schaffter2011}.
\begin{figure}[h]
\centering
\includegraphics[width = 0.75\linewidth]{cartoon_branching-eps-converted-to.pdf}
\caption{Illustration of example cell trajectories (in blue) of a diffusion-drift process \eqref{eq:diffusion_drift_sde} with branching in the case where $\bvec{v}(x) = -\nabla \Psi(x)$, i.e. there is a potential landscape. Green dots correspond to branching events, red crosses correspond to cell death, and blue circles represent cell states stopped at the final time $t_\mathrm{max}$.}
\label{fig:branching_cartoon}
\end{figure}
\paragraph{Cell-cell interactions can be incorporated via time-varying drift.} The time-dependence of the drift $\bvec{v}(t,\cdot)$ allows for cell-cell interactions at the population level in the following sense:
In the presence of cell-cell interactions, each cell $X_t^i$ experiences a drift $\bvec{v}^i$ which is a function of the position of all other cells, i.e.
\begin{align*}
\bvec{v}^i(X^i_t) = \bar{\bvec{v}} \bigl( X^i_t, \{X^j_t\}_{j \neq i} \bigr).
\end{align*}
Then if there are a large number of cells, we can reasonably make a mean field approximation
\begin{align*}
\bvec{v}^i(X^i_t) \approx \bar{\bvec{v}}( X^i_t, \rho_t ).
\end{align*}
In other words, we assume that the cells $\{X^j_t\}_{j \neq i}$ are infinitely many and distributed according to $\rho_t$. This in turn can be viewed as a time-varying vector field $\bvec{v}_t$ without interactions:
\begin{align*}
\bvec{v}_t(x) = \bar{\bvec{v}}(x, \rho_t).
\end{align*}
By assuming that there are many cells, even if each individual cell has a stochastic motion, the cell-cell interactions can be averaged.
This allows us to picture the motion of cells as \emph{independent} particles moving in a \emph{time dependent} environment.
\subsection{Inference problem}\label{sec:inference_problem}
Because of the destructive nature of the measurement process, we cannot observe trajectories taken by individual cells, but only snapshots of populations in time. From a mathematical point of view, we start a process (\ref{eq:diffusion_drift_sde}) with branching, let it evolve until a time $t_1$ (the measurement time) and then we have access to the positions of cells at this time $t_1$. Then, we start a separate and independent process and let it evolve until time $t_2$. Proceeding in this way, we obtain samples from $\rho_{t_i}$ for each instant $t_1,\ldots,t_T$. We denote the samples at each time-point $t_i$ by
\begin{equation}
\label{eq:samples}
X^{1}_{t_i}, \ldots, X^{N_i}_{t_i}\sim \rho_{t_i}, \quad \text{for $i = 1,\ldots,T.$}
\end{equation}
Since at each time-point we sample from independent realizations of the process, the data from distinct times $t_i \ne t_j$ are independent:
\begin{equation*}
X^k_{t_i} \perp \!\!\! \perp X^l_{t_j} \quad \text{for $i \ne j$.}
\end{equation*}
{\bf Our goal is to reconstruct the trajectories traversed by cells from these independent samples.}
While the trajectories are determined by the drift vector field $\bvec{v}$ in the stochastic differential equation~\eqref{eq:diffusion_drift_sde}, we do not aim to recover $\bvec{v}$ directly. Instead, we aim to recover the probability law on trajectories induced by the SDE.
In the simpler setting without branching, the SDE~\eqref{eq:diffusion_drift_sde} induces a probability law on paths valued in $\mathcal{X}$, with sample paths $X_t$ describing continuous functions valued in $\mathcal{X}$ and parameterized by time $t$.
The situation is more complicated with branching because a cell can have multiple descendants at later time-points, and so the sample paths are in fact trees in $\mathcal{X}$. %
Therefore in the case with branching we aim to recover the law on paths induced by selecting a descendant at random at each bifurcation (and even if a cell would die, allow it to proceed). %
Mathematically, this is still equivalent to the law on paths corresponding to the SDE~\eqref{eq:diffusion_drift_sde}, without the additional mechanism of branching or death (i.e. $\tau = \infty$). %
Our theoretical results in Section~\ref{sec:theory_no_growth} establish that we can recover the true law on paths in the absence of branching. We develop a computationally efficient methodology in Section~\ref{sec:methodology} and in particular show how to extend the approach to the case with branching in Section~\ref{sec:growth}.
\section{Theory: reconstructing developmental curves}
\label{sec:theory_no_growth}
We begin our theoretical development by characterizing the true law on paths induced by the SDE~\eqref{eq:diffusion_drift_sde} as the solution to a certain optimization problem. For our theoretical analysis we assume that the space $\mathcal{X}$ is a compact Riemannian manifold without boundary. We lose some generality as it does not cover the case $\mathcal{X} = \mathbb{R}^d$ (because of the lack of compactness), on the other hand it encompasses curved geometries.
We focus here on the case without branching, and discuss how our results could be extended to incorporate branching in Section~\ref{sec:growth}.
Throughout this paper, we denote by $\Omega = C([0, t_\text{max}], \mathcal{X})$ the space of continuous paths valued in $\mathcal{X}$ and remark that we will typically rescale time so that $t_\text{max} = 1$.
Furthermore, the notational convention we adopt is that if $\mathbf{R} \in \mathcal{P}(\Omega)$ is a law on the space of paths, we denote by $\mathbf{R}_{t_i}$ the law of $X_{t_i}$ under $\mathbf{R}$ (it is a probability distribution over $\mathcal{X}$) while $\mathbf{R}_{t_i,t_{i+1}}$ is the law of $(X_{t_i}, X_{t_{i+1}})$ under $\mathbf{R}$ (it is a probability distribution over $\mathcal{X}^2$). Moreover, for $\alpha, \beta$ measures on $\mathcal{X}$ with the same total mass, we denote by $\Pi(\alpha,\beta)$ the set of measures on $\mathcal{X}^2$ such whose marginals are $\alpha$ and $\beta$. As an example, $\mathbf{R}_{t_i,t_{i+1}} \in \Pi(\mathbf{R}_{t_i}, \mathbf{R}_{t_{i+1}})$.
\paragraph{Potential-driven dynamics} Solutions of the SDE \eqref{eq:diffusion_drift_sde} are not in general characterized by their temporal marginals. For example, if the drift $\bvec{v}$ induces a periodic motion, then the distribution of cells may be constant in time even though the individual cells themselves move. We refer to \cite{weinreb2018} for an exhaustive discussion on this issue. To remove this identifiability problem, as in \cite{weinreb2018}, we require the velocity field $\bvec{v}$ to be the gradient of a smooth time-dependent potential function, which we denote by $\Psi$. This requirement can be justified by its simplicity, but also by Theorem \ref{thm:SDE_min_KL} below which shows that the assumption makes the law of the SDE identifiable from the temporal marginals via an elegant variational characterization. Moreover, it is consistent with the assumption of a Waddington's landscape.
In short, we consider $\P$ the law of the SDE
\begin{equation}
\label{eq:diffusion_drift_sde_grad}
\diff X_t = -\nabla \Psi(t,X_t) \diff t + \sigma \diff B_t,
\end{equation}
and we denote by $\P_t = \rho_t$ the measure describing the population of cells alive at time $t$.
We also assume that the diffusion coefficient $\sigma^2$ is known.
Without this assumption, the trajectories are not uniquely determined by the marginals $\rho_{t}$.
For example, if $\Psi(x) = \frac{c_\Psi}{2} \| x \|^2 $ is a quadratic potential, then the equilibrium measure of the SDE \eqref{eq:diffusion_drift_sde_grad} is the isotropic Gaussian measure with variance $\frac{2d c_\Psi}{\sigma^2}$.
Therefore one can produce the same equilibrium measure but different trajectories by making the potential steeper (by increasing $c_\Psi$) while also increasing $\sigma$ so that the ratio $\frac{c_\Psi}{\sigma^2}$ is constant. We prefer to look at the scenario where $\sigma$ is known but not $\Psi$ as the latter contains more information about the trajectories of the cells.
\paragraph{Developmental curves}
As developmental time varies, the marginals $\P_t$ describe a curve in $\mathcal{P}(\mathcal{X})$, the space of probability measures on $\mathcal{X}$.
We illustrate this concept in Figure~\ref{fig:paths_perspective}(a), and note that even if the population has multiple modes, i.e. multiple cell types in different regions of $\mathcal{X}$, that there is still only a single curve in $\mathcal{P}(\mathcal{X})$.
While many different trajectories could produce the same temporal marginals, Theorem~\ref{thm:SDE_min_KL} below shows that the true law on paths $\P$ is closest to Brownian motion, in the sense of relative entropy.
{\bf Therefore we can uniquely reconstruct the law on paths $\P$ from the curve of marginals $\P_t$.}
\begin{figure}[h]
\centering
\begin{subfigure}{0.5\linewidth}
\centering\includegraphics[width = \linewidth]{cartoon_curve.pdf}
\caption{}
\end{subfigure} \\
\begin{subfigure}{0.75\linewidth}
\centering\includegraphics[width = \linewidth]{cartoon.pdf}
\caption{}
\end{subfigure}
\caption{(a) A stochastic process valued in $\mathcal{X}$ can be thought of as a curve valued in $\mathcal{P}(\mathcal{X})$ parameterized by time. (b) Illustration of the inference problem: at each time-point particles (red) are sampled from an underlying ground truth process (green). From these samples, we seek to estimate the underlying law on paths (blue). }
\label{fig:paths_perspective}
\end{figure}
Before we state the theorem, we introduce some notation.
The relative entropy between two probability measures $\alpha, \beta$ on a space $\Omega$ is
\begin{equation}
\mathrm{H}(\alpha|\beta) = \int_\Omega \log\left ( \frac{\diff \alpha}{\diff \beta} \right) \diff \alpha .\label{eq:ent}
\end{equation}
We denote by $\mathbf{W}^\sigma$ the law of the reversible Brownian motion on $\mathcal{X}$ with diffusivity $\sigma^2$, it is an element of $\mathcal{P}(\Omega)$ the set of probability distributions over $\Omega$. Here, “reversible” means that the initial condition is $\mathbf{W}_0^\sigma = \mathrm{vol}$, where $\mathrm{vol}$ is the normalized volume measure on $\mathcal{X}$ satisfying $\mathrm{vol}(\mathcal{X}) = 1$. Thus for every $t \geqslant 0$ there holds $\mathbf{W}_t^\sigma=\mathrm{vol}$. Finally,
\begin{theorem}
\label{thm:SDE_min_KL}
Recall that $\P \in \mathcal{P}(\Omega)$ is a law solution of the SDE \eqref{eq:diffusion_drift_sde_grad} and suppose $\mathrm{H}(\P_0|\mathrm{vol}) < \infty$, and consider $\mathbf{R} \in \mathcal{P}(\Omega)$ any probability measure on the set of $\mathcal{X}$-valued paths satisfying $\mathbf{R}_t = \P_t$ for all $t \in [0,t_\text{max}]$.
Then there holds
\begin{equation*}
\mathrm{H}(\P|\mathbf{W}^\sigma) \leqslant \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma),
\end{equation*}
with equality if and only if $\P = \mathbf{R}$.
\end{theorem}
We refer to Section \ref{sec:theory_details} and in particular Theorem \ref{thm:SDE_grad_min_KL} for a precise statement and a proof.
In other words, with perfect knowledge of just the marginals $\P_t$, one can recover the full law on paths of $\P$ by minimizing the strictly convex functional $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$. Moreover, even though we consider here relative entropies of probability measures supported on a large space (the space of $\mathcal{X}$-valued paths), such a quantity decomposes nicely on a temporal discretization and its minimization leads ultimately to tractable problems as detailed in Section \ref{sec:methodology}.
Though not exactly phrased like this, Theorem \ref{thm:SDE_min_KL} can be read implicitly in the works of the community working on the Schrödinger problem~\cite{leonard2013}, a sub-field of optimal transport~\cite{monge1781,kantorovich1942,villani2008}. The proof that we present was suggested to us by Aymeric Baradat.
In the simplest version of this result, we observe two snapshots of particles which have diffused according to pure Brownian motion; then the law on paths connecting the two snapshots can be reconstructed via entropy-regularized optimal transport, which is equivalent to entropy minimization on the space $\mathcal{P}(\Omega)$.
Theorem \ref{thm:SDE_min_KL} extends this to involve drift and more than two time-points.
For further background on optimal transport, entropic regularization, the connection to entropy minimization on $\mathcal{P}(\Omega)$, and the Schrödinger problem, we refer the reader to Appendix~\ref{sec:background_OT}.
\paragraph{Reconstructing curves from data}
In practice we will have imperfect information about the marginals, obtained by observing finite samples at various time-points~\eqref{eq:samples}, from which we form the empirical distributions
\begin{equation}
\hat \rho_{t_i} = \frac 1 {N_i} \sum_{j=1}^{N_i} \delta_{X_{t_i}^j} \quad \text{for $i = 1,\ldots,T$}.
\end{equation}
Figure~\ref{fig:paths_perspective}(b) illustrates the inference problem: we seek to reconstruct an approximation of $\P$ from these empirical distributions.
We view these empirical distributions as noisy data along the true curve $\P_t$ (Figure~\ref{fig:curve_perspective}), and develop an approach to recover the true curve (and hence the true law on paths $\P$) from these noisy data.
For our theoretical analysis, we work with absolutely continuous data $\widehat{\rho}^\epstheo_{t_i}$ obtained by convolving $\widehat{\rho}_{t_i}$ against a Gaussian of width $\epstheo$, which shrinks to $0$ as $T$ grows. (More precisely, as we are on a Riemannian manifold we use the heat flow to regularize measures). Note that the practical method we introduce in Section~\ref{sec:methodology} does not introduce this convolution.
The approach we take involves a trade-off between passing close to the observed data and minimizing the regularizing functional $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$, motivated by Theorem \ref{thm:SDE_min_KL} above.
Specifically, we attempt to recover $\P$ by minimizing the convex functional
\begin{equation}
\label{eq:opt_theory}
F_{T,\lambda,\epstheo}( \mathbf{R}) := \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \sum_{i=1}^{T} |t_{i+1} - t_i| \, \mathrm{H}(\widehat{\rho}^{\epstheo}_{t_i} | \mathbf{R}_{t_i}) ,
\end{equation}
which takes as its argument a law on paths $\mathbf{R} \in \mathcal{P}(\Omega)$.
Here $\lambda$ is a regularization parameter specifying the trade-off between data-fitting (second term) and regularization (first term). Up to a constant which does not depend on $\mathbf{R}_{t_i}$ (hence is irrelevant in the minimization), the data-fitting term $\mathrm{H}(\widehat{\rho}^{\epstheo}_{t_i} | \mathbf{R}_{t_i}) $ corresponds to a \emph{cross-entropy} and can be understood as a log-likelihood of the data $\widehat{\rho}^{\epstheo}_{t_i}$ given the reconstructed marginal $\mathbf{R}_{t_i}$. In fact, it would be exactly a log-likelihood if we had not convolved the empirical distributions $\hat \rho_{t_i}$ with a Gaussian. We defer to Section \ref{sec:theory_details} and in particular Remark \ref{rk:choice_DF} for additional comments on the choice of this term. We have included a factor $\sigma^2$ in front of $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ because this is the appropriate scaling in the limit $\sigma \to 0$, see \cite[Section 5]{leonard2013} (note however that in our analysis $\sigma^2$ is fixed).
To build intuition, consider the case where the number of samples $N_i$ at each time $t_i$ is extremely large, so that we have essentially perfect knowledge of the marginals $\rho_{t_i}$ at times $t_1, \ldots, t_T$. Then we may choose $\lambda$ to be vanishingly small so that the optimal $\mathbf{R}$ will pass precisely through the data at the measured times $t_i$.
The entropic regularization term then determines the behavior of the curve at intermediate time-points: the segments are given by connecting $\rho_{t_i}$ to $\rho_{t_{i+1}}$ with entropy-regularized optimal transport, which is equivalent to entropy-minimization.
Roughly speaking (although this should only be relied on for intuition because entropic regularization breaks the metric properties of optimal transport), the resulting curve will be piece-wise geodesic in Wasserstein space (i.e. the space of probability distributions with the optimal transport metric), as illustrated in Figure~\ref{fig:curve_perspective}. More precisely, it will be a composition of Schr\"odinger bridges between marginals $\rho_{t_i}$ and $\rho_{t_{i+1}}$. This ``gluing'' approach was essentially the one used by Schiebinger et al. \cite{schiebinger2019} under the name Waddington-OT, where the recovered law on paths $\mathbf{R}$ can be constructed from consecutive pairwise laws $\mathbf{R}_{t_i, t_{i+1}}$ specified by
\begin{align}
\inf_{\mathbf{R}_{t_i, t_{i+1}} \in \Pi(\widehat{\rho}_{t_i}, \widehat{\rho}_{t_{i+1}})} \mathrm{H}(\mathbf{R}_{t_i, t_{i+1}} | \mathbf{W}^{\sigma}_{t_i, t_{i+1}}).\label{eq:gluing}
\end{align}
We now state our main theoretical result that encompasses a more general case than what we just described.
\begin{theorem}
\label{th:linkOTSDE_noisy}
Let $\mathbf{R}^{T,\lambda,\epstheo}$ denote the minimizer of $F_{T,\lambda,\epstheo}$. Then in the limit $T\to \infty$, followed by $\lambda \to 0, \epstheo \to 0$, we have that $\mathbf{R}^{T,\lambda,\epstheo}$ converges narrowly to $\P$ in the space $\mathcal{P}(\Omega)$ of probability distributions on the space of $\mathcal{X}$-valued paths.
\end{theorem}
This theorem establishes that the true law on paths $\P$ can be recovered through convex optimization.
We refer again to Section \ref{sec:theory_details} and in particular Theorem \ref{theo:main_convergence} for a precise statement and a proof. Importantly, we do not assume that $N_i \to + \infty$, that is each $\hat \rho_{t_i}$ does not converge to $\P_{t_i} = \rho_{t_i}$; encoded in Theorem \ref{th:linkOTSDE_noisy} is a regularizing effect in the temporal variable. However, we emphasize that this result is not quantitative and we do not have a rate of convergence. As we discuss in Section~\ref{sec:discussion}, this quantitative rate might be determined by some notion of the ``curvature''.
A quantification would also most likely lead to a result where $\lambda$ and $\epstheo$ decay slowly enough (at a rate depending on $T$ and the $N_i$) for the convergence to hold.
Overall, this perspective of developmental curves motivates a practical and computationally efficient approach for recovering developmental trajectories from snapshots collected at various time-points. We introduce a computational methodology to do so in Section~\ref{sec:methodology}, and we discuss in Section~\ref{sec:growth} the extension of our theoretical results to the case with branching.
\subsection{A note on reconstructing the drift $\bvec{v}$} \label{sec:reconstruction_of_drift}
We note that once the law $\mathbf{R}$ is known, information about the drift $\bvec{v}$ can be recovered via a regression problem. Indeed, at least on short timescales, if $(X_t)_{t \in [0,t_\text{max}]}$ has a law given by $\mathbf{R}$ and $\mathcal{X}$ is flat,
\begin{align}
\bvec{v}(t_i, x) \sim \mathbb{E}_{\mathbf{R}} \left[ \left. \frac{X_{t_{i+1}} - X_{t_i} }{ t_{i+1} - t_{i} } \right| X_{t_i} = x \right] \label{eq:drift_reconstruct}
\end{align}
Thus, one can set up a learning problem (e.g. taking $\bvec{v}$ to belong to a parametric class of functions) to find a $\bvec{v}$ which approximates the right hand side. Alternatively it is possible to estimate the drift at each observed point by directly computing the expectation \eqref{eq:drift_reconstruct}, as we do later in Section \ref{sec:tristable}.
\section{Methodology: Wasserstein regression}\label{sec:methodology}
\begin{figure}
\centering\includegraphics[width = 0.5\linewidth]{comparison-eps-converted-to.pdf}
\caption{Conceptual illustration of our global regression method (gWOT) compared to the straightforward Waddington-OT ``gluing'' approach.}
\label{fig:curve_perspective}
\end{figure}
Our theoretical results establish that the law on paths $\P$ of a stochastic differential equation~\eqref{eq:diffusion_drift_sde_grad} can be recovered from snapshot data~\eqref{eq:samples} by minimizing the convex functional~\eqref{eq:opt_theory}. In this section our aim will be to detail the development of a tractable, finite-dimensional optimization problem~\eqref{eq:optim_dsc_time} motivated by the theory~\eqref{eq:opt_theory} which is amenable to efficient computational solution. In Figure \ref{fig:curve_perspective} we provide a conceptual illustration in terms of curves valued in $\mathcal{P}(\mathcal{X})$. As we discussed in Section \ref{sec:theory_no_growth}, $\{\widehat{\rho}_{t_i}\}_{i = 1}^T$ are noisy samples from the ground truth process $\P$. The ``gluing'' approach \eqref{eq:gluing} as per Waddington-OT in this regime of limited data results in a reconstructed law that is a poor estimate of the true law. Our proposed method, Global Waddington-OT (gWOT), optimizes in both the marginals $\mathbf{R}_{t_i}$ and couplings to produce a global regression that counteracts noisy fluctuations in the data introduced by sampling effects.
\subsection{Discretization in space and time}
\label{sec:discretization_time_space}
\paragraph{Time discretization}
For the moment, $\mathcal{X}$ still denotes a compact Riemannian manifold and $\mathbf{W}^\sigma \in \mathcal{P}(\Omega)$ is the law of the reversible Brownian motion with diffusivity $\sigma^2$.
Following the theoretical framework of Section~\ref{sec:theory_no_growth}, we consider a general setting where we seek to minimize a loss functional $\mathrm{L} : \mathcal{P}(\Omega) \to [0, \infty]$ over laws on paths $\mathbf{R}$ in continuous space and time,
\begin{equation}
\label{eq:optim_ctstime}
\mathrm{L}(\mathbf{R}) = \lambda \mathrm{Reg}(\mathbf{R}) + \mathrm{Fit}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}), \quad \mathbf{R} \in \mathcal{P}(\Omega),
\end{equation}
where we choose the regularization term $\mathrm{Reg}(\mathbf{R})$ to be the relative entropy of laws on paths as per \eqref{eq:opt_theory}, given by $$\mathrm{Reg}(\mathbf{R}) = \sigma^2 \mathrm{H}(\mathbf{R} | \mathbf{W}^\sigma).$$
In addition, we impose that the data-fitting term $\mathrm{Fit}(\cdot)$ be local in time, that is, it will only depend on the temporal marginals of $\mathbf{R}$ at the measurement times $t_1, \ldots, t_T$. The setting of Theorem \ref{th:linkOTSDE_noisy} corresponds to $\mathrm{Fit}(\cdot)$ being a sum of cross-entropies of the reconstructed marginals relative to the measurements, and in practice we explore a more general data-fitting term as discussed in Section~\ref{sec:choice_of_data_fitting}.
Although the minimization in \eqref{eq:optim_ctstime} is \emph{a priori} over the very large space of laws on paths $\mathcal{P}(\Omega)$, the minimizer can in fact be characterized by solving a corresponding minimization problem in a much smaller space of marginals $\mathcal{P}(\mathcal{X}^2)^{T-1}$. To see this, let $\mathbf{R}^\star$ be the minimizer of the loss functional $\mathrm{L}$ defined in \eqref{eq:optim_ctstime}. As $\mathrm{Fit}(\cdot)$ depends only on the temporal marginals, we know that $\mathbf{R}^\star$ minimizes $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$ among all $\mathbf{R}$ such that $\mathbf{R}^\star_{t_i} = \mathbf{R}_{t_i}$ for $i \in \{1, \ldots, T\}$. By arguments already developed elsewhere (see for instance \cite[Section 2]{benamou2019} the joint law $\mathbf{R}^\star_{t_1, \ldots, t_T}$ at the instants $t_1,\ldots,t_T$ is enough to reconstruct $\mathbf{R}^\star$. Moreover, one needs only to reconstruct the pairwise couplings $\mathbf{R}^\star_{t_i, t_{i+1}}$ for $i \in \{ 1,\ldots,T-1 \}$ to recover $\mathbf{R}^\star_{t_1, \ldots, t_T}$. Indeed, a general argument (\cite[Theorem 4.5]{baradat2020minimizing}) shows that the minimizer of $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$ with marginal constraints on a subset of $[0,t_\text{max}]$ is a Markov process (as soon as $\mathbf{W}^\sigma$ is a Markov process).
These considerations are summarized below.
First, let us recall the definition of the entropic regularization of optimal transport.
\begin{definition} \label{def:ot}
Let $\pi_0$ be a given initial distribution on $\mathcal{X}$ and let $\pi_0 \mathbf{W}^{\sqrt{\varepsilon}}$ be the law of a Brownian motion with diffusivity $\varepsilon$ started from initial distribution $\pi_0$ at time $t = 0$. Let $\pi_0 \mathbf{W}^{\sqrt{\varepsilon}}_{0, 1}$ be the joint law of this process at times $t = 0, 1$. Then for positive measures $\alpha, \beta$ of equal mass on $\mathcal{X}$, we define
\begin{align}
\mathrm{OT}_\varepsilon(\alpha, \beta; \pi_0) &= \inf_{\gamma \in \Pi(\alpha, \beta)} \varepsilon \mathrm{H}(\gamma | \pi_0 \mathbf{W}^{\sqrt{\varepsilon}}_{0, 1}). \label{eq:entropic_ot}
\end{align}
\end{definition}
\noindent The reader not familiar with optimal transport is referred to Appendix \ref{sec:background_OT} for a background on such theory and its link with entropy minimization.
The key proposition to handle time discretization (which effectively comes from \cite{benamou2019}) is the following.
\begin{prop}\label{prop:time_disc}
The minimizer $\mathbf{R} \in \mathcal{P}(\Omega)$ of \eqref{eq:optim_ctstime} has marginals $\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T} \in \mathcal{P}(\mathcal{X})$ at instants $t_1, \ldots, t_T$ respectively that are the unique minimizers of the discrete-time functional
\begin{align}
\inf_{\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T} \in \mathcal{P}(\mathcal{X})} \lambda \mathrm{Reg}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) + \mathrm{Fit}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}), \label{eq:optim_dsc_time}
\end{align}
where
\begin{align}
\label{eq:reg_disc_time}
\mathrm{Reg}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) &= \dfrac{1}{\Delta t_1} \mathrm{OT}_{\sigma^2 \Delta t_1} (\mathbf{R}_{t_1}, \mathbf{R}_{t_2}; \pi_0) + \sum_{i = 2}^{T-1} \dfrac{1}{\Delta t_i} \mathrm{OT}_{\sigma^2 \Delta t_i} (\mathbf{R}_{t_i}, \mathbf{R}_{t_{i+1}}; \mathbf{R}_{t_i}).
\end{align}
and we have written $\pi_0$ for the stationary distribution of the heat flow on $\mathcal{X}$.
Furthermore, for $1 \leq i \leq T-1$ let $\mathbf{R}_{t_i, t_{i+1}} \in \Pi(\mathbf{R}_{t_i}, \mathbf{R}_{t_{i+1}})$ be the optimal coupling of Definition \ref{def:ot} corresponding to each of the terms $\mathrm{OT}_{\sigma^2 \Delta t_1} (\mathbf{R}_{t_1}, \mathbf{R}_{t_2}; \pi_0)$, $\mathrm{OT}_{\sigma^2 \Delta t_i} (\mathbf{R}_{t_i}, \mathbf{R}_{t_{i+1}}; \mathbf{R}_{t_i})$. Now let
\begin{align}
\mathbf{R}_{t_1, \ldots, t_T} &= \mathbf{R}_{t_1, t_2} \circ \cdots \circ \mathbf{R}_{t_{T-1}, t_T} \label{eq:concat_finite_dim}
\end{align}
be the unique Markov process whose pairwise couplings are $\mathbf{R}_{t_i, t_{i+1}}$. Then the law $\mathbf{R}$ that minimizes \eqref{eq:optim_ctstime} can be fully characterized in terms of the finite-dimensional distribution \eqref{eq:concat_finite_dim} by
\begin{align}
\mathbf{R}(\cdot) = \int_{\mathcal{X}^T} \mathbf{W}^\sigma(\cdot | x_1, \ldots, x_T) \diff\mathbf{R}_{t_1, \ldots, t_T}(x_1, \ldots, x_T)
\end{align}
where $\mathbf{W}^\sigma(\cdot | x_1, \ldots, x_T)$ is the law of the reversible Brownian motion with diffusivity $\sigma^2$ conditioned on passing through $x_1, \ldots, x_T$ at times $t_1, \ldots, t_T$ respectively.
\end{prop}
\begin{proof}
See Appendix \ref{sec:proof_time_dis}.
\end{proof}
\paragraph{Space discretization}
We insist that in the discussion of the previous subsection, no information is lost between \eqref{eq:optim_ctstime} and \eqref{eq:optim_dsc_time} (and the convergence when $T \to + \infty$ is guaranteed by Theorem \ref{th:linkOTSDE_noisy}). On the other hand, upon introducing a discretization of the space $\mathcal{X}$ we must necessarily depart from our theoretical framework and we have for the moment no guarantee of convergence.
Necessarily we will restrict to working on a discrete approximation of the space $\mathcal{X}$ and thus deal with discrete measures supported on a fixed finite set $\overline{\mathcal{X}} \subset \mathcal{X}$. Since the space $\mathcal{X}$ is assumed to have high dimension, discretization by gridding is infeasible although we remark that our approach can certainly be applied directly to a gridded space. Therefore, by default we choose $\overline{\mathcal{X}}$ to be equal to the union of all measured points, i.e.
$$\bigcup_{i = 1}^{T} \mathrm{supp}(\hat \rho_{t_i}) = \overline{\mathcal{X}},$$
but in general $\overline{\mathcal{X}}$ may taken to be larger. See Section \ref{sec:aug_supp} where we explore a strategy to add points to the support.
Next we will need to replace each term in \eqref{eq:optim_dsc_time} with its discrete counterpart. The $\mathbf{R}_{t_i}$ are now probability distributions on $\overline{\mathcal{X}}$ and can be represented by vectors in the probability simplex in $\mathbb{R}^{|\overline{\mathcal{X}}|}$. The pairwise couplings $\mathbf{R}_{t_i,t_{i+1}}$ can likewise be represented by a matrix of size $|\overline{\mathcal{X}}| \times |\overline{\mathcal{X}}|$ whose row and column sums correspond to $\mathbf{R}_{t_i}$ and $\mathbf{R}_{t_{i+1}}$ respectively. Finally, $\mathbf{W}^\sigma$ is replaced by a Markov chain on $\overline{\mathcal{X}}$ that can be thought of as approximating the reversible Brownian motion on $\overline{\mathcal{X}}$, i.e. the process with transition probabilities given by
\begin{align}
\mathbb{P}(X_{t_{i+1}} = x | X_{t_i} = x') &\propto \exp\left(-\frac{1}{2\sigma^2 \Delta t_i} \| x - x'\|^2\right), \quad x, x' \in \overline{\mathcal{X}}. \label{eq:disc_markov_chain}
\end{align}
The discretized space $\overline{\mathcal{X}}$ is in general not a faithful approximation of $\mathcal{X}$: in the case where $\overline{\mathcal{X}}$ is comprised of observed samples, we are explicitly constrained to a region of the ambient space $\mathcal{X}$ that is visited by the process. Thus, the stationary distribution of the Markov chain \eqref{eq:disc_markov_chain} encoding the heat flow on $\overline{\mathcal{X}}$ will not resemble that of the process in the continuous space $\mathcal{X}$. In practice therefore, the choice of $\pi_0$ the initial distribution of $\mathbf{W}^\sigma$ is up to the user, and we will adopt the convention of taking it to be uniform on $\overline{\mathcal{X}}$. The overall question of convergence of the Markov process on $\overline{\mathcal{X}}$ to the Wiener measure is not an easy one, as it is related to the convergence of a graph Laplacian on $\overline{\mathcal{X}}$ to the Laplace Beltrami operator on $\mathcal{X}$. We prefer not to tackle the question of finding a convergent space discretization in the present article and leave it for future work.
\subsection{Choice of the data-fitting term} \label{sec:choice_of_data_fitting}
Before presenting in details our choice of data-fitting term, let us introduce the following additional notation for optimal transport in the context of discrete measures. It corresponds to the usual definition of entropy-regularized optimal transport for discrete measures~\cite{peyre2019}.
\begin{definition}
If $\alpha, \beta$ are two measures of the same mass on a discrete space $\overline{\mathcal{X}} \subset \mathbb{R}^d$, we can drop the semicolon in $\mathrm{OT}$ and define
\begin{align*}
\mathrm{OT}_\varepsilon(\alpha, \beta) &= \inf_{\gamma \in \Pi(\alpha, \beta)} \varepsilon \mathrm{H}(\gamma | K_\varepsilon) - \varepsilon \inner{\mathbf{1} \otimes \mathbf{1}, \gamma},
\end{align*}
where $(K_\varepsilon)_{ij} = \exp\left(-\frac{1}{2\varepsilon}\|x_i - x_j\|^2\right)$ for all $x_i, x_j \in \overline{\mathcal{X}}$, commonly known as the Gibbs kernel \cite{peyre2019}, while $\mathbf{1} \otimes \mathbf{1}$ is the vector indexed by $\overline{\mathcal{X}} \times \overline{\mathcal{X}}$ whose all entries are $1$. Note that in the case of normalized measures $\alpha$ and $\beta$, the final term results in only a constant offset.
\end{definition}
In order to improve the performance of our scheme in practice when available data is limited compared to the theoretical setting, we make some modifications to the form of the data-fitting functional $\mathrm{Fit}(\cdot)$. In particular, we will take it to be
\begin{align}
\mathrm{Fit}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) &= \sum_{i = 1}^{T} w_i \enskip \inf_{\widehat{\mathbf{R}}_{t_i}} \left[ \mathrm{OT}_{\varepsilon_i} (\mathbf{R}_{t_i}, \widehat{\mathbf{R}}_{t_i}) + \lambda_i \mathrm{H}(\widehat{\rho}_{t_i} | \widehat{\mathbf{R}}_{t_i}) \right], \label{eq:dffunc}
\end{align}
where the parameters $w_i > 0, \sum_i w_i = 1$ may be specified by the user to assign varying weights for each time-point (by default, we use uniform weights $w_i = T^{-1}$ when each time-point has the same number of observed particles).
Compared to the form of the functional~\eqref{eq:opt_theory} in Section \ref{sec:theory_no_growth}, which enforces that the reconstructed marginals $\mathbf{R}_{t_i}$ are close to the measured data $\widehat{\rho}_{t_i}$ directly via a cross-entropy term $\mathrm{H}(\widehat{\rho}_{t_i} | \mathbf{R}_{t_i})$, we instead opt for a more lenient data-fitting term through addition of an entropy-regularized optimal transport term $\mathrm{OT}_{\varepsilon_i} (\mathbf{R}_{t_i}, \widehat{\mathbf{R}}_{t_i})$ that connects the reconstructed marginal $\mathbf{R}_{t_i}$ to an intermediate marginal $\widehat{\mathbf{R}}_{t_i}$. This is then compared to the data $\widehat{\rho}_{t_i}$ by a cross-entropy term $\mathrm{H}(\widehat{\rho}_{t_i} | \widehat{\mathbf{R}}_{t_i})$. Here $\lambda_i$ tunes the balance between the cross-entropy and the optimal transport term, while the strength of the entropic regularization $\varepsilon_i$ can be chosen by the user.
This specific modification is motivated by observations of the behavior of several choices of data-fitting functionals in a setting where limited data are available. The cross-entropy $\mathrm{H}(\widehat{\rho}_{t_i} | \mathbf{R}_{t_i})$ in particular is local in space and therefore is only sensitive to pointwise agreement between $\mathbf{R}_{t_i}$ and $\widehat{\rho}_{t_i}$, that is, the recovered marginal $\mathbf{R}_{t_i}$ must place some mass at each point in the supports of the measurements $\widehat{\rho}_{t_i}$. In practice when available data is limited, measured data points may be subject to significant sampling noise and thus not accurately reflect the characteristics of the underlying process. This may then result in undesirable artifacts and fluctuations in the reconstructed marginals, where the reconstructed process is forced ``out of its way'' to pass through all measured data points. This effect is illustrated in Figure \ref{fig:datafitting_counterexample2}.
In order to remedy this issue, we bestow upon the data-fitting term some awareness of the structure of the underlying space $\overline{\mathcal{X}}$ by allowing for some spatial rearrangement of mass from $\mathbf{R}_{t_i}$ to $\widehat{\mathbf{R}}_{t_i}$ via the optimal transport term $\mathrm{OT}_{\varepsilon_i}(\mathbf{R}_{t_i}, \widehat{\mathbf{R}}_{t_i})$.
We find that the choice of a hybrid data-fitting functional \eqref{eq:dffunc} yields a reconstruction with the highest quality even in the regime of high sampling-induced noise.
On the other hand, another choice would be to use a purely optimal transport loss function between $\mathbf{R}_{t_i}$ and $\widehat{\rho}_{t_i}$ as present in the literature \cite{hashimoto2016, yeo2020}.
We did not choose this option since in the regime when $N_i$ is small, fluctuations in the observations due to finite sampling would have to be accounted for by transportation of mass. Therefore, minor discrepancies between the reconstruction and the data would incur an unreasonably high cost. As an example, a pure optimal transport loss performs poorly in a setting where particles are sampled from a bistable process with $N_i = 1$, as we illustrate in Figure \ref{fig:datafitting_counterexample2}: the reconstructed mass is placed equidistant from either mode and is clearly at the wrong place.
Finally, we remark that since entropy-regularized optimal transport is connected to Gaussian deconvolution~\cite{rigollet2018entropic}, the addition of the $\mathrm{OT}_{\varepsilon_i}$ term has also the additional effect to mimic the Gaussian convolution in~\eqref{eq:opt_theory}. In practice, the value of the data-fitting entropic regularization parameter $\varepsilon_i$ is chosen empirically to be small enough so that the resulting diffusive effect is insignificant relative to the spatial scale of the data.
\begin{figure}
\centering
\includegraphics[width = \linewidth]{datafitting_counterexample.pdf}
\caption{Illustration of the effects of various choices for the data-fitting functional on estimated marginals of a bistable process, where samples are obtained at 50 timepoints with $N_i = 1$. CE, OT, OT+CE correspond respectively to data-fitting via cross entropy, optimal transport, and a combination of optimal transport and cross-entropy as in \eqref{eq:dffunc}. }
\label{fig:datafitting_counterexample2}
\end{figure}
\subsection{Extending to the case with branching}\label{sec:growth}
Cell division and death are an essential aspect of most biological processes, and this is the fundamental motivation for us to consider branching in (\ref{eq:diffusion_drift_sde}). However, accounting for growth is a challenging task in trajectory inference because the data (see Equation~\eqref{eq:samples}) only contain information on the \emph{relative} abundance of cells. Therefore, there is a problem of identifiability of the effects of transport and growth \cite{chizat2018scaling, fischer2019}, and failure to appropriately account for growth can result in spurious mass transport being introduced to explain for appearance or disappearance of mass.
We show here how to extend the methodology of Section \ref{sec:methodology} to the case where particles can branch, but at a rate that is known (either exactly or approximately). Specifically, we show how to modify the optimization procedure of Sections \ref{sec:discretization_time_space} and \ref{sec:choice_of_data_fitting} in order to recover the \emph{transport} component of the drift-diffusion branching process.
We model a process with growth by dispensing with the unit mass constraint. The population of cells at any time $t$ is described by a \emph{positive measure} $\rho_t$, integration over which corresponds to cell numbers or biomass. Since the marginals of such a process no longer have the same mass, we can no longer use the framework of probability laws on paths as done previously. In particular, this affects the form of the regularizing functional, which we previously took to be the relative entropy $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ on probability laws in $\mathcal{P}(\Omega)$.
With branching, it seems natural to replace the reference process $\mathbf{W}^\sigma$, which was a Brownian motion, by a \emph{branching Brownian motion} (see e.g. \cite[Chapter 1]{etheridge2000introduction}). Along these lines, the first author is currently working with Aymeric Baradat on extending the theoretical framework of Section~\ref{sec:theory_no_growth} to work directly on the law of processes with both diffusion and branching \cite{AymericHugo}. Though the work is still in progress, let us give two outputs from it. First, it is possible to prove an analogue of Theorem \ref{thm:SDE_min_KL}, where the process $\P$ is transformed into the law of a branching and diffusion process. However, such $\P$ is parameterized by a single scalar function $\Psi(t,x)$: that is not only the drift is $-\nabla \Psi$, but the branching mechanism is also a function of $\Psi$. This leaves less freedom about what the ``ground truth'' should be as one cannot choose independently the drift and the branching mechanism. Second, though entropy minimization in the case without branching is amenable to fast solvers via Sinkhorn's algorithm (see for instance \cite{benamou2019} for the case with many marginal constraints), such technique seems to break when doing entropy minimization with respect to the law of a branching Brownian motion.
\paragraph{Regularizing functional with branching}
Here we present a simple modification of our regularization functional that allows us to continue using ordinary Brownian motion for the reference measure.
While the effects of branching, drift and diffusion all take place simultaneously in the process described by \eqref{eq:diffusion_drift_sde}, we introduce an artificial separation between the effects of transport and branching by alternating between a transport step that captures spatial dynamics, and a branching step that accounts for cell division and cell death.
In terms of the population-level PDE \eqref{eq:diffusion_drift_branching_pde}, this alternating scheme is equivalent to operator splitting of the drift-diffusion and branching effects \cite[Section 5.6]{petter2017finite}, where for each interval $(t_{i}, t_{i+1})$ we approximate \eqref{eq:diffusion_drift_branching_pde} by the system
\begin{align}
\frac{\partial \rho^{*}_t}{\partial t} &= J\rho^{*}_t, \quad \rho^{*}_{t_i}(\cdot) = \rho_{t_i}(\cdot) \label{eq:split_pde_growth} \\
\frac{\partial \rho_t}{\partial t} &= -\mathrm{div}(\rho_t \bvec{v}) + \frac{\sigma^2}{2} \Delta \rho_t, \quad \rho_{t_i}(\cdot) = \rho^{*}_{t_{i+1}}(\cdot) \label{eq:split_pde_transport}
\end{align}
The solution of the branching component \eqref{eq:split_pde_growth} is exactly
\begin{align*}
\rho^*_{t_{i+1}}(x) &= \rho_{t_i}(x) \exp\left(J(x) \Delta t_i\right) = g_i(x) \rho_{t_i}(x),
\end{align*}
where we have defined the quantity
\begin{align*}
g_i(x) &= \exp(J(x) \Delta t_i).
\end{align*}
Recall that $J$ is related to the birth-death process parameters by $J = \tau^{-1} (p_b - p_d)$.
Overall, this term is a multiplicative factor by which the density at each location increases over the time interval $(t_i, t_{i+1})$ exponentially in the birth-death rate.
To implement this splitting approach, we will treat the candidate process $\mathbf{R}$ as a sequence of $T-1$ transport couplings that correspond to the evolution of the diffusion-drift component \eqref{eq:split_pde_transport}
\begin{align*}
\mathbf{R} = (\mathbf{R}_{t_1, t_2}, \mathbf{R}_{t_2, t_3}, ..., \mathbf{R}_{t_{T-1}, t_T}),
\end{align*}
which we intersperse with branching according to \eqref{eq:split_pde_growth}. We illustrate this approach in Figure \ref{fig:growth}.
The effect of branching is captured by permitting the facing marginals of the couplings to differ by a factor of $g_i$ --- that is, we would like $\mathbf{R}_{t_i} \approx g_{i} \overline{\mathbf{R}}_{t_i}$. If we have exact knowledge of the branching rates, then this should be an equality.
\begin{figure}[h]
\center\includegraphics[]{growth-eps-converted-to.pdf}
\caption{Illustration of our splitting scheme for accounting for branching: at each time $t_i$, we first perform transport from $\mathbf{R}_{t_i}$ to $\overline{\mathbf{R}}_{t_{i+1}}$ to capture the diffusion-drift component \eqref{eq:split_pde_transport}, and then perform branching from $\overline{\mathbf{R}}_{t_{i+1}}$ to $\mathbf{R}_{t_{i+1}}$. to capture the branching component \eqref{eq:split_pde_growth}.}
\label{fig:growth}
\end{figure}
Since the total mass of marginals $\mathbf{R}_{t_i}$ may differ, the transport terms with more total mass would be over-weighted in the objective function, whilst those with less total mass would be under-weighted. To combat this effect, we introduce tuning parameters $m_i$ which are estimates of the total mass of~$\mathbf{R}_{t_i}$.
Finally, in order to quantify the extent to which branching rates $g_i$ are known, we introduce a branching penalty $G_i(\cdot, \cdot)$ to be a term to enforce the effect of branching at time $t_i$. $G_i(\overline{\mathbf{R}}_{t_i}, \mathbf{R}_{t_i})$ should encourage that $\mathbf{R}_{t_i} \approx g_i \overline{\mathbf{R}}_{t_i}$ in some appropriate way. Two specific choices we consider for enforcing branching are:
\begin{itemize}
\item Exact branching constraint:
\begin{align}\label{eq:exact_growth_constraint}
G_i(\overline{\mathbf{R}}_{t_i}, \mathbf{R}_{t_i}) = \iota(\mathbf{R}_{t_i} = g_i\overline{\mathbf{R}}_{t_i}) = \begin{cases}
0, &\mathbf{R}_{t_i} = g_i \overline{\mathbf{R}}_{t_i} \\
+\infty, &\text{otherwise}
\end{cases}
\end{align}
\item Soft branching constraint with penalty $\kappa_i$:
\begin{align}\label{eq:soft_growth_constraint}
G_i(\overline{\mathbf{R}}_{t_i}, \mathbf{R}_{t_i}) = \kappa_i \mathrm{KL}(\mathbf{R}_{t_i} | g_i \overline{\mathbf{R}}_{t_i} ).
\end{align}
\end{itemize}
In the above, we take $\mathrm{KL}(\alpha | \beta)$ to denote the Kullback-Leibler divergence generalized to positive measures (see e.g. \cite{chizat2018scaling, peyre2019}), defined for $\alpha, \beta \in \mathcal{M}_+(\mathcal{X})$ to be
\begin{align}
\mathrm{KL}(\alpha | \beta) &= \int_{\mathcal{X}} \log\left( \frac{\diff\alpha}{\diff\beta} \right) \diff\alpha - \int_{\mathcal{X}} \diff\alpha + \int_{\mathcal{X}} \diff\beta.\label{eq:kl_divergence_defn}
\end{align}
We make note that when $\alpha$ and $\beta$ are equal in mass, that $\mathrm{KL}$ is the same as $\mathrm{H}$.
Note also that the exact branching constraint can also be understood as the soft branching constraint with $\kappa = +\infty$. Incorporating these effects, the appropriate regularizing functional can be written as
\begin{align}
\begin{split}
\mathrm{Reg}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) &= \inf_{\overline{\mathbf{R}}_{t_2}} \left[ \dfrac{1}{m_1 \Delta t_1} \mathrm{OT}_{\sigma^2 \Delta t_1}(\mathbf{R}_{t_1}, \overline{\mathbf{R}}_{t_2}; \pi_0) + \dfrac{1}{m_2 \Delta t_1} G_2(\overline{\mathbf{R}}_{t_2}, \mathbf{R}_{t_2}) \right] \\
\quad &+\sum_{i = 2}^{T-1} \inf_{\overline{\mathbf{R}}_{t_{i+1}}} \left[ \dfrac{1}{m_i \Delta t_i} \mathrm{OT}_{\sigma^2 \Delta t_i} (\mathbf{R}_{t_i}, \overline{\mathbf{R}}_{t_{i+1}}; \mathbf{R}_{t_i}) + \dfrac{1}{m_{i+1}\Delta t_i} G_{i+1}(\overline{\mathbf{R}}_{t_{i+1}}, \mathbf{R}_{t_{i+1}})\right]. \label{eq:regfunc_growth}
\end{split}
\end{align}
An important note is that since we now optimize over positive measures $\mathbf{R}_{t_i} \in \mathcal{M}_+(\overline{\mathcal{X}})$, in order to avoid ambiguity it is necessary in our optimization problem to demand that $\mathbf{R}_{t_1} \in \mathcal{P}(\overline{\mathcal{X}})$. In other words, we will model our process as starting with unit mass and subsequently deviating due to the effects of branching.
\paragraph{Data-fitting functional with branching}
The form of the data-fitting functional in the case of branching is similar to that of \eqref{eq:dffunc} from Section \ref{sec:choice_of_data_fitting}, except we scale appropriately by user-specified weights $1/m_i$ so that $m_i^{-1} \widehat{\mathbf{R}}_{t_i}$ has mass of roughly order 1. We also use $\mathrm{KL}$ instead of $\mathrm{H}$ since $m_i^{-1} \widehat{\mathbf{R}}_{t_i}$ may not perfectly normalized.
\begin{align}
\mathrm{Fit}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) &= \sum_{i = 1}^{T} w_i \enskip \inf_{\widehat{\mathbf{R}}_{t_i}} \left[ \dfrac{1}{m_i}\mathrm{OT}_{\varepsilon_i} (\mathbf{R}_{t_i}, \widehat{\mathbf{R}}_{t_i}) + \lambda_i \mathrm{KL}(\widehat{\rho}_{t_i} | m_i^{-1} \widehat{\mathbf{R}}_{t_i}) \right]. \label{eq:dffunc_growth}
\end{align}
\subsection{Algorithmic considerations}
\label{subsection:algorithm}
We now discuss methods for computational solution of the general problem with branching, of which the model without branching is a special case when $g_i \equiv 1, m_i \equiv 1$ and we enforce the null branching rate exactly as per \eqref{eq:exact_growth_constraint}. Owing to the complexity of the model and especially since we seek to handle the situation of branching, an iterative Sinkhorn-type scheme is out of reach in general, moreover in the case of branching this seems to be a fundamental limitation \cite{AymericHugo}. Instead, we resort to solving this variational problem using gradient-based methods \cite{cuturi2016smoothed, frogner2015learning}.
Direct computation of the gradient of the optimal transport loss with respect to one of its marginals is a costly procedure, requiring first solution of a Sinkhorn scaling subproblem to find the optimal coupling \cite{frogner2015learning}. Instead, since our problem is convex, we choose to proceed via the dual problem. One particular advantage of dealing with the dual problem is that the Legendre dual of the optimal transport loss and therefore its gradients can be evaluated in closed form \cite{cuturi2016smoothed}, eliminating the need to solve a series of costly subproblems. We summarize the dual formulation of our problem which we solve in practice in the following.
\begin{prop}\label{prop:dual}
The unique solution to the dual problem corresponding to
\begin{align*}
\inf_{\substack{\mathbf{R}_{t_1} \in \mathcal{P}(\overline{\mathcal{X}}), \\ \mathbf{R}_{t_2}, \ldots, \mathbf{R}_{t_T} \in \mathcal{M}_+(\overline{\mathcal{X}})}}^{} \lambda \mathrm{Reg}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T}) + \mathrm{Fit}(\mathbf{R}_{t_1}, \ldots, \mathbf{R}_{t_T})
\end{align*}
where $\mathrm{Reg}$ and $\mathrm{Fit}$ are specified by \eqref{eq:regfunc_growth} and \eqref{eq:dffunc_growth} respectively can be found by solving the concave maximization problem
\begin{align}
\sup_{ \{\hat{u}_i, \hat{v}_i \}_{i = 1}^T} &-\dfrac{\lambda}{m_1 \Delta t_1}\mathrm{OT}^*_{\sigma^2 \Delta t_1}(u_1, v_1; \pi_0) - \sum_{i = 2}^T \dfrac{\lambda}{m_i \Delta t_{i-1}} G_i^*(\phi_{i-1}, \psi_i) \nonumber \\
&- \sum_{i = 1}^T \dfrac{w_i}{m_i} \mathrm{OT}_{\varepsilon_i}^*(\hat{u}_i, \hat{v}_i) -\sum_{i = 1}^T \lambda_i w_i \mathrm{KL}^*\left( \widehat{\rho}_{t_i} \Big| -\dfrac{\hat{v}_i}{\lambda_i} \right) , \label{eq:dual}
\end{align}
where $\mathrm{OT}_{\sigma^2 \Delta t_1}^*(\cdot, \cdot; \pi_0)$ is the Legendre transform of $\mathrm{OT}_{\sigma^2 \Delta t_1}(\cdot, \cdot; \pi_0)$ constrained to have arguments in $\mathcal{P}(\overline{\mathcal{X}})$; $\mathrm{OT}_{\varepsilon_i}^*(\cdot, \cdot)$ and $G_i^*(\cdot, \cdot)$ are respectively the Legendre transforms of $\mathrm{OT}_{\varepsilon_i}(\cdot, \cdot)$ and $G_i(\cdot, \cdot)$ in both their arguments, and $\mathrm{KL}^*$ here denotes the Legendre transform of the generalized Kullback-Leibler divergence $\mathrm{KL}$ in its second argument. Furthermore, \eqref{eq:dual} is written in terms of auxiliary variables $\{ u_i \}_{i = 1}^{T-1}, \{ v_i \}_{i = 1}^{T-1}, \{ \phi_i \}_{i = 1}^{T-1}$ and $\{ \psi_i \}_{i = 2}^{T}$, which are functions of the optimization variables $\{ \hat{u}_i, \hat{v}_i \}_{i = 1}^T$, defined recursively by the following relations:
\begin{align}
\begin{cases}
\dfrac{\lambda u_1}{\Delta t_1} + w_1 \hat{u}_1 = 0, \\
\dfrac{\lambda u_i}{\Delta t_i} + \dfrac{\lambda \psi_i}{\Delta t_{i-1}} + w_i \hat{u}_i = 0, &\text{ for } 2 \leq i \leq T - 1 \\
\dfrac{\lambda \psi_T}{\Delta t_{T-1}} + w_T \hat{u}_T = 0, \\
\dfrac{v_{i-1}}{m_{i-1}} + \dfrac{\phi_{i-1}}{m_i} = 0, &\text{ for } 2 \leq i \leq T.\\
u_i = -\sigma^2 \Delta t_i \log\left( \overline{K}_{\sigma^2 \Delta t_i} \exp\left( \dfrac{v_i}{\sigma^2 \Delta t_i} \right)\right), &\text{ for } 2 \leq i \leq T-1.
\end{cases}. \label{eq:dual_constraints}
\end{align}
In the above, by $\overline{K}_{\sigma^2 \Delta t_i}$ we denote the transition matrix of time-$\Delta t_i$ transition probabilities for the reference process $\mathbf{W}^{\sigma}$ (also see Definition \ref{def:gibbs}). Furthermore, for the choices of branching constraints introduced in \eqref{eq:soft_growth_constraint}, \eqref{eq:exact_growth_constraint} we may set $G_i^*(\cdot, \cdot) = 0$ and add additional constraints for $1 \leq i \leq T-1$:
\begin{align}
\begin{split}
\begin{cases}
\phi_{i} = -g_i \psi_{i+1} &\text{ for hard branching constraint \eqref{eq:exact_growth_constraint}} \\
\phi_{i} = \kappa g_i \log(1 - \psi_{i+1}/\kappa) &\text{ for soft branching constraint \eqref{eq:soft_growth_constraint}}
\end{cases}
\end{split}\label{eq:dual_growth_constraint}
\end{align}
\end{prop}
\begin{proof}
See Appendix \ref{proof:dual}. For the reader's reference, we list in the appendix the Legendre transforms of relevant functions in Table \ref{table:legendre}, and illustrate the recurrence relationship of the auxiliary variables in Figure \ref{fig:dependency_diagram}.
\end{proof}
To solve the dual problem \eqref{eq:dual}, any gradient-based optimization method can be used since the problem is unconstrained and convex in the variables $\{ \hat{u}_i, \hat{v}_i \}_{i = 1}^T$. In order to easily evaluate gradients of the dual objective \eqref{eq:dual} we employ automatic differentiation, although we note that a more involved computation of gradients by hand is indeed possible and may improve performance in practice. We chose to implement our method using the PyTorch framework \cite{paszke2017} to leverage its automatic differentiation engine and also support for GPU acceleration.
Evaluation of the dual objective \eqref{eq:dual} involves stepping through a recurrence relation involving the auxiliary variables, and in particular requires $\mathcal{O}(T)$ convolutions against kernel matrices $\overline{K}_{\sigma^2 \Delta t_i}$ of dimension $|\overline{\mathcal{X}}|^2$, whose entries are functions of the squared Euclidean distances between pairs of points in $\overline{\mathcal{X}}$. In settings with many time-points and where $\overline{\mathcal{X}}$ is large, storage of these kernel matrices and evaluating convolutions become increasing costly. To avoid storing these kernels explicitly in memory and also improve overall performance, we employ the KeOps library \cite{charlier2020} to enable GPU-accelerated on-the-fly computation of these kernel convolutions with automatic differentiation compatibility. In practice, we generally solve the dual problem \eqref{eq:dual} using L-BFGS with a tolerance on the primal-dual gap as the stopping criterion, although alternative criteria such as a tolerance on the gradient or simply setting a fixed number of iterations may also be used. We direct the reader to Section \ref{sec:code_avail} for our implementation of gWOT as an open-source software package.
\subsection{Related work}
\label{sec:related_work}
We conclude the presentation of our methodology by explaining where our inspiration comes from as well as the link with other works.
\paragraph{Learning with optimal transport as a regularizer}
One main source of inspiration for our work is \cite{schmitzer2019dynamic}, where the authors set up a learning problem for trajectory inference where the regularization term comes from optimal transport, and the data-fitting term is a log-likelihood. This approach was similar to the one followed in \cite{bredies2020optimal, bredies2019extremal} where the authors set up a learning problem with an optimal transport regularizer. They give a detailed theoretical analysis of the problem they solve as well as a numerical method \cite{bredies2020generalized}. In \cite{bredies2019extremal, bredies2020generalized}, they show that their approach leads to a (spatial) discretization free algorithm as the minimizers of the learning problem they consider are sparse and live ultimately on a low dimensional space. Compared to these works, we work with entropy minimization rather than ``plain'' optimal transport, and we provide an identification of what the ``ground truth'' should be (that is, laws of SDE) as well a theoretical proof of consistency in the setting of sparse data. In addition, by relying on entropy-regularized optimal transport we can leverage efficient numerical tools as presented Section \ref{subsection:algorithm}.
The problem we tackle has the form of the minimization of an entropy over the space of paths together with a data-fitting term which is a function only of the temporal marginals. This is very close to the problem studied in \cite{benamou2019} in a different context (namely Mean Field Games) where the data-fitting term is replaced by a different functional, which still depends only on the marginals. That work helped us understand the effect of the discretization in time presented in Section \ref{sec:discretization_time_space} and suggested a Sinkhorn-like algorithm for finding solutions of the dual problem. Although such an approach can be derived exclusively for the setting without branching, in practice we find that the L-BFGS method works more generally and converges faster.
Ultimately, once we have reconstructed marginals, we interpolate between time points with Schrödinger bridges. Some works \cite{benamou2019second, chen2018measure, chewi2020fast} try to produce smoother interpolations, that is \emph{splines} in the Wasserstein space. However, in these works the authors assume precise knowledge of the temporal marginals, contrary to the framework of sparse data that we tackle. Incorporating ideas from these articles could be a future direction of research, though in our case the imperfect knowledge of the temporal marginal, and not the smoothness of the interpolation, is the main issue at stake.
\paragraph{Comparison to other trajectory inference methods}
Numerous methods have been proposed in recent years for recovering trajectories from scRNA-seq time-courses. However, few provide theoretical guarantees. If we are to rely on trajectory inference to understand disease and develop new therapies, we need to know when to trust the results. One notable exception is the work by Weinreb et al.~\cite{weinreb2018}, who analyzed the equilibrium case, when data are sampled from a single snapshot of a process at its steady state.
They leveraged results from spectral graph theory~\cite{ting2011analysis}
to establish that an underlying diffusion-drift equation can be identified from such a snapshot when the drift is conservative, i.e. it arises from a potential function.
We provide the first theoretical analysis of the inference problem in the non-equilibrium case, in which our data are a series of temporal snapshots. We demonstrate that recovery can be achieved through convex optimization.
Some recent methods \cite{yeo2020, chen2020solving, tong2020trajectorynet} use the same type of generative model as we do, but they rather parameterize the potential $\Psi$ by a neural network and then learn the weights of the network thanks to the data. Although neural networks are powerful for learning representations of the dynamics, due to nonconvexities they are susceptible to local minima. Compared to these works, we write a convex learning problem (hence numerical optimization is guaranteed to reach a global optimizer) which we prove converges to the ground truth.
Finally, we acknowledge that there are important variants and extensions of the trajectory inference problem that we have not treated. For example, it is possible to recover additional information, such as estimates of velocity in gene expression space~\cite{RNAvelocity}, cell lineage~\cite{Yachie}, or spatial location~\cite{slideseq}.
Lineage tracing in particular has been demonstrated to be crucial for accurate trajectory inference~\cite{packer2019lineage}, especially in cases of complex convergent trajectories. While we have recently demonstrated that OT-based methodology can be extended to leverage lineage information~\cite{forrow2020}, we have not incorporated this into the theoretical framework we present here.
\section{Theoretical results: precise statements and proofs}
\label{sec:theory_details}
This section provides theoretical justifications for our method, especially regarding the convergence of the scheme as the number of measurements goes to infinity. In particular we prove the two theorems stated in Section \ref{sec:theory_no_growth}.
\paragraph{Setting}
Let $\mathcal{X}$ be a compact smooth Riemannian manifold without boundary. The Laplace-Beltrami operator on $C^2(\mathcal{X})$ is denoted by $\Delta$. We will denote by $K$ a lower bound on its Ricci curvature, $K > - \infty$ by compactness. The normalized volume measure on $\mathcal{X}$ is $\mathrm{vol}$: it is normalized in such a way that $\int_{\mathcal{X}} \diff \mathrm{vol} =1$.
Up to a change of the temporal scaling, we assume without loss of generality that $t_\text{max} = 1$. We denote by $\Omega = C([0,t_\text{max}], \mathcal{X}) = C([0,1], \mathcal{X})$ the set of continuous $\mathcal{X}$-valued paths endowed with the topology of uniform convergence and its Borel $\sigma$-algebra. It is a Polish space, and $\mathcal{P}(\Omega)$ is the set of laws on the space of paths. We endow $\mathcal{P}(\Omega)$ with the topology of narrow convergence, that is, convergence against bounded continuous functions. We denote by $(X_t)_{t \in [0,1]}$ the canonical process on $\Omega$, and for each $t \in [0,1]$ we write $X_t(\omega) = \omega_t$ for the evaluation at time $t$. If $\mathbf{R} \in \mathcal{P}(\Omega)$ is a probability measure on the space of paths $\Omega$, we denote by $\mathbf{R}_t \in \mathcal{P}(\mathcal{X})$ its marginal at time $t$. That is, if $X$ is a random element of $\Omega$ distributed according to $\mathbf{R}$ then $X_t$ is distributed according to $\mathbf{R}_t$.
By a Wiener measure with diffusivity $\sigma^2$, we mean an element of $\mathcal{P}(\Omega)$ which is a diffusion measure generated by the second order elliptic operator $f \mapsto \frac{\sigma^2}{2} \Delta f$ (in the sense of \cite[Definition 1.3.1]{hsu2002stochastic}). By the law of solutions of the SDE \eqref{eqn:SDE-grad} below, we mean diffusion measure generated by $f \mapsto \frac{\sigma^2}{2} \Delta f - \nabla \Psi \cdot \nabla f$. We denote by $\mathbf{W}^\sigma$ the reversible Wiener measure on $\mathcal{X}$ with diffusivity $\sigma^2$. Here, ``reversible'' means that the initial condition is $\mathrm{vol}$ which is invariant under the heat flow on a manifold without boundary, thus for every $t \in [0,1]$ there holds $\mathbf{W}^\sigma_t = \mathrm{vol}$.
\bigskip
\paragraph{Statement of the results}
Let us start with a basic result over which our method relies: a variational characterization of the law of SDE when its drift is gradient.
\begin{theorem}
\label{thm:SDE_grad_min_KL}
Let $\P_0 \in \mathcal{P}(\mathcal{X})$ be a probability distribution with $\mathrm{H}(\P_0|\mathrm{vol})< + \infty$. Let $\Psi : [0,1] \times \mathcal{X} \to \mathbb{R}$ be a smooth ($C^2$) time-dependent potential. We consider $\P$ the law of the SDE
\begin{equation}\label{eqn:SDE-grad}
\diff X_t = - \nabla \Psi(t,X_t) \diff t + \sigma \diff B_t
\end{equation}
with initial condition $\P_0$. Then, if $\mathbf{R} \in \mathcal{P}(\Omega)$ is such that $\mathbf{R}_t = \P_t$ for all $t \in [0,1]$, there holds
\begin{equation*}
\mathrm{H}(\P|\mathbf{W}^\sigma) \leqslant \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)
\end{equation*}
with equality if and only if $\P = \mathbf{R}$.
\end{theorem}
\noindent In other words, with a perfect knowledge of the marginals (the $\P_t$), one just needs to minimize the strictly convex functional $\mathrm{H}(\cdot | \mathbf{W}^\sigma)$ to recover the ``ground truth'' $\P$. Although not stated exactly as this, this result can be read implicitly in the literature on the Schrödinger problem. We do not claim originality of this variational characterization, but for the sake of completeness, we still present a short proof which was suggested to us by Aymeric Baradat.
\bigskip
Let us turn now to the framework of sparse data %
where we do not have a perfect knowledge of the marginals. As a data-fitting term between our ``reconstructed marginal'' $r$ and the ``measurement'' $p$, in Section~\ref{sec:theory_no_growth} we used $\mathrm{H}(p|r)$. Up to the constant $\mathrm{H}(p|\mathrm{vol})$ it coincides with the cross-entropy, that we denote by $\mathrm{DF}(r, p)$ for ``Data-Fitting'':
\begin{equation*}
\mathrm{DF}(r, p) = \mathrm{H}(p|r) - \mathrm{H}(p|\mathrm{vol}),
\end{equation*}
see Definition \ref{defi:DF} below. Note that this is nothing but $\mathrm{DF}(r, p) =- \int (\log r) p$. Though $\mathrm{DF}(r,p)$ is not bounded from below, it is if we assume some regularity on $p$ as detailed in Lemma \ref{lem:F_bd_below} below. As $p$ (the ``measurement'') is fixed in our optimization procedure, minimizing $\mathrm{H}(p|r)$ or $\mathrm{DF}(r,p)$ as function of $r$ lead to the same result. For reasons clarified in Remarks \ref{rk:choice_DF} and \ref{rk:DF_limit} below, it actually more convenient to work with $\mathrm{DF}(r,p)$ rather than $\mathrm{H}(p|r)$, and this is the choice we make in this section.
\begin{remark}
\label{rk:choice_DF}
The key points of this data-fitting term are that it is linear with respect to $p$ and that, for a given $p$ it is minimized for $r=p$. The linearity in $p$ enables an averaging effect which is crucial if we know $p$ only weakly in time (that is, for each $t$ we have a bad approximation of the marginal but such an approximation gets better when averaged in time). Note that among all smooth local functionals of $r$ and $p$, that is, ones that can be written $\int_{\mathcal{X}} f(p(x),r(x)) \diff x$ for some smooth $f$, only $\mathrm{DF}$ is linear in $p$ and minimized for $r=p$ (for $p$ fixed).
\end{remark}
We are able to prove the following result, which is the main one of this section.
\begin{theorem}
\label{theo:main_convergence}
Let $\P_0 \in \mathcal{P}(\mathcal{X})$ be a probability distribution with $\mathrm{H}(\P_0|\mathrm{vol})< + \infty$. Let $\Psi : [0,1] \times \mathcal{X} \to \mathbb{R}$ a smooth ($C^2$) time-dependent potential. We consider $\P$ the law of the SDE
\begin{equation*}
\diff X_t = - \nabla \Psi(t,X_t) \diff t + \sigma \diff B_t
\end{equation*}
with initial condition $\P_0$.
Let us assume the following:
\begin{enumerate}[label=(\roman*)]
\item
For every $T \geqslant 1$, we have a sequence of ordered instants $\{ t^{T}_i \}_{i=1}^T$ between $0$ and $1$.
\item For each $T$ and each $i \in \{ 1,2,\ldots, T \}$, we have $N^{T}_i$ random variables $\{ X^{T}_{i,j}\}_{j=1}^{N_i^{T}}$ which are i.i.d. and distributed according to $\P_{t^{T}_i}$.
\item The variables $X^{T}_{i,j}$ and $X^{T'}_{i',j'}$ are independent except if $(T,i,j) = (T',i',j')$.
\item The family of ordered instant $\{ t^{T}_i \}_{i=1}^T$ becomes dense in $[0,1]$ as $T \to + \infty$, but we assume nothing on the $N_i^{T}$ except $N^{T}_i \geqslant 1$.
\end{enumerate}
Denoting by $\Phi_\epstheo$ the heat flow on $\mathcal{P}(\mathcal{X})$ followed for a time $\epstheo$, we form the following random probability distribution
\begin{equation*}
\widehat{\rho}^{T,\epstheo}_i := \Phi_\epstheo \left( \frac{1}{N^{T}_i} \sum_{j=1}^{N^{T}_i} \delta_{X^{T}_{i,j}} \right).
\end{equation*}
We consider $\mathbf{R}^{T,\lambda,\epstheo} \in \mathcal{P}(\Omega)$ the (unique) minimizer of the functional
\begin{equation*}
\mathbf{R} \mapsto \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \sum_{i=1}^{T} \left( t^{T}_{i+1} - t^{T}_{i} \right) \mathrm{DF} \left( \mathbf{R}_{t^{T}_i}, \widehat{\rho}^{T,\epstheo}_i \right).
\end{equation*} %
Then, it holds
\begin{equation*}
\lim_{\lambda \to 0, \epstheo \to 0} \left( \lim_{T \to + \infty} \, \mathbf{R}^{T,\lambda,\epstheo} \right) = \P
\quad \hbox{almost surely} \end{equation*}
for the topology of narrow convergence
\end{theorem}
The proof of Theorem \ref{theo:main_convergence} will rely on the analysis of the limit $T \to + \infty$, which is the most technical point, and then the limit $\lambda, \epstheo \to 0$ which on the other hand is routine. This theorem is important in the case where $N^{T}_i$ is of order $1$ in the limit $N \to + \infty$: in this regime, each $\widehat{\rho}^{T,\lambda,\epstheo}$ is a bad approximation of $\P_{t^{T}_i}$, but thanks to time averaging in the limit $T \to + \infty$ one can still recover the ``ground truth'' $\P$.
A key result for passing to the limit $T \to + \infty$ is the following. We have removed the dependency in $\epstheo$ to simplify the statement. Also, the context is slightly more general as $\left( t^{T}_{i+1} - t^{T}_{i} \right) $ is replaced by weights $\omega^{T}_i$, and we simply assume some weak-space time convergence of the $\widehat{\rho}^{T}_i$ to a $\mathcal{P}(\mathcal{X})$-valued curve. This latter assumption is easily implied by the law of large numbers in the framework of Theorem \ref{theo:main_convergence}.
First, define the Fisher information:
\begin{align}
\label{eq:def_Fisher}
\mathcal{I}(p) := \int_{\mathcal{X}} \frac{|\nabla p(x)|^2}{p(x) } \mathrm{vol}(\diff x).
\end{align}
We have
\begin{theorem}
\label{theo:sparse_data}
Fix $\lambda > 0$. Let us assume the following:
\begin{enumerate}[label=(\roman*)]
\item For every $T \in \mathbb{N}$ we have an ordered family of instants $\{ t^{T}_i\}_{i=1}^T$; a family of measurements (the data) $\widehat{\rho}^{T}_i$ which is just a collection of $T$ probability measures on $\mathcal{X}$; and $\{ \omega^{T}_i \}_{i=1}^T$ a collection of non negative weights.
\item There exists a constant $L$ such that, for each $T$ and $i$, the measure $\widehat{\rho}^{T}_i$ satisfies $\mathcal{I}(\widehat{\rho}^{T}_i) \leqslant L$.
\item There exists a continuous curve $\overline{\rho} \in C([0,1], \mathcal{P}(\mathcal{X}))$ valued in the set of probability distributions over $\mathcal{X}$ such that each $\overline{\rho}_t$ has
$\mathcal{I}(\overline{\rho}_t) \leqslant L$
and
the following weak convergence holds: for all continuous function $a : [0,1] \times \mathcal{X} \to \mathbb{R}$,
\begin{equation*}
\lim_{T \to + \infty} \, \sum_{i=1}^{T} \omega^{T}_i \int_{\mathcal{X}} a \left( t^{T}_i, x \right)\widehat{\rho}^{T}_i(\diff x) = \int_0^1 \int_{\mathcal{X}} a(t,x) \overline{\rho}_t(\diff x) \, \diff t.
\end{equation*}
\end{enumerate}
For each $T$, let $\mathbf{R}^T \in \mathcal{P}(\Omega)$ be the (unique) minimizer of
\begin{equation*}
\mathbf{R} \mapsto F_T( \mathbf{R} ) := \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \sum_{i=1}^{T} \omega^{T}_i \mathrm{DF} \left( \mathbf{R}_{t^{T}_i}, \widehat{\rho}^{T}_i \right) .
\end{equation*}
Then, as $T \to + \infty$, the sequence $(\mathbf{R}^T)_{T \geqslant 1}$ converges narrowly on $\mathcal{P}(\Omega)$ to the (unique) minimizer of
\begin{equation*}
\mathbf{R} \mapsto F(\mathbf{R}) := \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \int_0^1 \mathrm{DF} \left( \mathbf{R}_t, \overline{\rho}_t \right) \, \diff t
\end{equation*}
\end{theorem}
\begin{remark}
\label{rk:DF_limit}
Here let us emphasize once again the choice of our data-fitting term. We will actually prove that $F_T(\mathbf{R}^T)$ converges to $F(\mathbf{R})$, where $\mathbf{R}$ is the unique minimizer of $F$. In particular,
\begin{equation*}
\lim_{T \to + \infty} \, \sum_{i=1}^{T} \omega^{T}_i \mathrm{DF} \left( \mathbf{R}^T_{t^{T}_i}, \widehat{\rho}^{T}_i \right) = \int_0^1 \mathrm{DF} \left( \mathbf{R}_t, \overline{\rho}_t \right) \, \diff t.
\end{equation*}
On the other hand, such convergence would not hold if we replace $\mathrm{DF}(r,p)$ by $\mathrm{H}(p|r)$. This is because
\begin{equation*}
\liminf_{T \to + \infty} \, \sum_{i=1}^{T} \omega^{T}_i \mathrm{H} \left(\widehat{\rho}^{T}_i | \mathrm{vol} \right) \geqslant \int_0^1 \mathrm{H} \left( \overline{\rho}_t | \mathrm{vol} \right) \, \diff t
\end{equation*}
by lower semi continuity of the entropy, but the inequality can be strict as we have only a weak convergence of the $\widehat{\rho}^T$.
\end{remark}
Theorem \ref{theo:sparse_data} is our most technical result for this section. Once we have proved it, taking the limits $\lambda \to 0$ and $\epstheo \to 0$ is standard in the theory of $\Gamma$-convergence:
\begin{theorem}
\label{theo:GammaConvergence_stdt}
Let $\P \in \mathcal{P}(\Omega)$ with $\mathrm{H}(\P|\mathbf{W}^\sigma) < + \infty$. For each $\lambda > 0$ and $\epstheo > 0$, let $\mathbf{R}^{\lambda,\epstheo}$ the minimizer of the functional %
\begin{equation*}
\mathbf{R} \mapsto G_{\lambda,\epstheo}(\mathbf{R}) := \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \int_0^1 \mathrm{H} \left( \Phi_\epstheo \P_t \right | \mathbf{R}_t) \, \diff t.
\end{equation*}
Then, as $\epstheo \to 0, \lambda \to 0$, the measure $\mathbf{R}^{\lambda,\epstheo}$ converges to the %
minimizer of $\mathbf{R} \mapsto \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ among all measures such that $\mathbf{R}_t = \P_t$ for all $t \in [0,1]$. Furthermore, from Theorem \ref{thm:SDE_grad_min_KL} this implies that if $\P$ is the law of an SDE with a gradient drift as in \eqref{eqn:SDE-grad}, then $\mathbf{R}^{\lambda,\epstheo}$ converges to $\P$.%
\end{theorem}
Theorem \ref{theo:sparse_data} and Theorem \ref{theo:GammaConvergence_stdt} are related by the simple relation between the functionals $F$ and $G$, as $\mathrm{DF}(r,p) = \mathrm{H}(r|p) - \mathrm{H}(p|\mathrm{vol})$.
Theorem \ref{theo:main_convergence}
is a straightforward consequence of these two theorems:
\begin{proof}[\bf Proof of Theorem \ref{theo:main_convergence}]
We use Theorem \ref{theo:sparse_data} to take the limit $T \to + \infty$. Note that for a fixed $\epstheo$, the measures $\widehat{\rho}^{T,\epstheo}$ satisfy $\mathcal{I}(\widehat{\rho}^{T, \epstheo}) \leqslant L$ with $L$ depending on $\epstheo$ but not on $T$ thanks to the smoothing effect of the heat flow (see Proposition \ref{prop:heat_flow} below). Moreover, almost surely the weak convergence assumption with $\overline{\rho}_t = \Phi_\epstheo \P_t$ holds: this is nothing else than the law of large numbers.
The key point is that, if we call $\mathbf{R}^{\lambda,\epstheo}$ the limit of the $\mathbf{R}^{T,\lambda,\epstheo}$ then by Theorem \ref{theo:sparse_data} it is the unique minimizer of
\begin{equation*}
\mathbf{R} \mapsto F_{\lambda,\epstheo}(\mathbf{R}) := \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \int_0^1 \mathrm{DF} \left( \mathbf{R}_t, \Phi_\epstheo \P_t \right) \,dt,
\end{equation*}
Notice that from the definition of the data-fitting term $\mathrm{DF}$, the functional $G_{\lambda, \epstheo}$ in Theorem \ref{theo:GammaConvergence_stdt} differs from $F_{\lambda, \epstheo}$ only by a constant, that is,
\begin{equation*}
G_{\lambda,\epstheo}(\mathbf{R}) = F_{\lambda,\epstheo}(\mathbf{R}) + \int_0^1 \mathrm{H}(\Phi_\epstheo \P_t | \mathrm{vol}) \, \diff t.
\end{equation*}
Thus $\mathbf{R}^{\lambda,\epstheo}$ is also the minimizer of $G_{\lambda,\epstheo}$.
Finally, to take the limit $\epstheo \to 0$ together $\lambda \to 0$ we apply Theorem \ref{theo:GammaConvergence_stdt}.
\end{proof}
The rest of this section is devoted to the proofs of remaining theorems.
The proof of Theorem \ref{thm:SDE_grad_min_KL} is not so involved, but for the convenience of the reader we recall some elements of stochastic analysis on manifolds. The proof of Theorem \ref{theo:sparse_data} is much more technical. The main technical difficulty is the following: we assume some weak convergence of the family $\widehat{\rho}^{T}_i$ on $[0,1] \times \mathcal{X}$ against space-time continuous functions $a$, but to handle the data-fitting term we need to apply it to $a(t,x) = - \log \mathbf{R}_t(x)$ which has no such regularity \emph{a priori}. We use the heat flow to regularize the marginals $\mathbf{R}_t$, but we need quantitative estimates on how the different terms behave with the heat flow. We prove such estimates in Section \ref{sec:preliminary-for-them-sparse}. In particular, we need to understand how regularizing the marginals of $\mathbf{R}$ by the heat flow influences the value of $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$: we prove a contraction estimate (see Proposition \ref{prop:heat_flow_decreases_A} and Proposition \ref{prop:properties_regularization}) where the Ricci curvature of the Riemannian manifold plays a key role; this is of its own interest. This will take care of the proof of Theorem \ref{theo:sparse_data}. The proof of Theorem \ref{theo:GammaConvergence_stdt} will then be routine.
\subsection{Preliminaries for the Proof of Theorem \ref{thm:SDE_grad_min_KL}}
We recall a few basic facts about the entropy functional and Wiener measures.
\paragraph{Definition and dual formulation of the entropy}
Here we recall the definition of the entropy and its dual formulation. We refer to \cite[Section 3]{leonard2012girsanov} for the details about the dual formulation \eqref{eq:dual_entropy}.
\begin{definition}
If $Y$ is a Polish space endowed with its Borel $\sigma$-algebra $\mathcal{B}$, we define for probability measures $p,r$ on $(Y,\mathcal{B})$
\begin{equation*}
\mathrm{H}(p|r) = \begin{cases}
\displaystyle{\int_Y \log \left( \frac{\diff p}{\diff r}(y) \right) \, p(\diff y) } & \text{if } p \ll r, \\
+ \infty & \text{otherwise}.
\end{cases}
\end{equation*}
Equivalently, it coincides with
\begin{equation}
\label{eq:dual_entropy}
\mathrm{H}(p|r) = \sup_{U} \left\{ \int_Y U(y) \, p(\diff y) - \log \int_Y e^{U(y)} \, r(\diff y) \right\},
\end{equation}
where the supremum is taken over all bounded and continuous functions $U : Y \to \mathbb{R}$.
\end{definition}
Thanks to Jensen's inequality (or it can be seen in \eqref{eq:dual_entropy} by taking $U = 0$) there always hold $\mathrm{H}(p|r) \geqslant 0$ for any probability distributions $p,r$.
\paragraph{Stochastic analysis on manifolds}
We refer to \cite{hsu2008brief} or \cite[Chapter 3]{hsu2002stochastic} for details about Brownian motion on Riemannian manifolds.
We recall that $\Omega = C([0,1], \mathcal{X})$ is the set of $\mathcal{X}$-valued paths and that $(X_t)_{t \in [0,1]}$ is the canonical process. Let $\mathcal{F}_t$ denote the Borel $\sigma$-algebra generated by the random variables $X_s$ for $s \leqslant t$, in such a way that $(\mathcal{F}_t)_{t \in [0,1]}$ is a filtration. We will need only the following martingale property about Wiener measures.
\begin{prop}
\label{prop:martingale_exp}
Let $\tilde{\mathbf{W}}^\sigma \in \mathcal{P}(\Omega)$ denote a Wiener measure whose initial distribution is not necessarily $\mathrm{vol}$.
Let $\varphi : [0,1] \times \mathcal{X} \to \mathbb{R}$ be a smooth function. Then, the process whose value at time $t \in [0,1]$ is given by
\begin{equation}
\label{eq:martingale_epx}
\exp \left( \frac{1}{\sigma^2} \left( \varphi(t,X_t) - \varphi(0,X_0) - \int_0^t \left[\partial_s \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi \right](s,X_s) \diff s \right) \right)
\end{equation}
is a $\mathcal{F}_t$-martingale under $\tilde{\mathbf{W}}^\sigma$.
\end{prop}
\begin{proof}
With respect to $\tilde \mathbf{W}^\sigma$, the following stochastic process
\begin{equation*}
M^\varphi_t = \varphi(t,X_t) - \varphi(0,X_0) - \int_0^t \left[ \partial_s \varphi + \frac{\sigma^2}{2} \Delta \varphi \right](s,X_s) \diff s
\end{equation*}
is a bounded martingale (by definition of diffusion measure) and its quadratic variation is given by (see \cite[Section 1.3]{hsu2008brief})
\begin{equation*}
\langle M^\varphi \rangle_t =
\sigma^2 \int_0^t |\nabla \varphi(s,X_s)|^2 \diff s.
\end{equation*}
Then \eqref{eq:martingale_epx} is nothing else than the exponential martingale~\cite[Proposition 5.11]{leGall2016brownian} associated to $\sigma^{-2} M^\varphi$.
\end{proof}
\subsection{Proof of Theorem \ref{thm:SDE_grad_min_KL}}
We recall that $\P$ is the law of the SDE \eqref{eqn:SDE-grad} and $\mathbf{W}^\sigma$ is the reversible Wiener measure with diffusivity $\sigma^2$. We want to prove that $\P$ minimizes the entropy with respect to $\mathbf{W}^\sigma$ among all measures which share the same temporal marginals. The method of proof consists in using the exact expression of the density of $\P$ with respect to $\mathbf{W}^\sigma$.
\begin{prop}
\label{prop:density_p}
In the framework above, the Radon-Nikodym derivative of $\P$ with respect to $\mathbf{W}^\sigma$ is given $\mathbf{W}^\sigma$-a.e. by
\begin{equation*}
\frac{\diff \P}{\diff \mathbf{W}^\sigma}(X) = \frac{\diff \P^0}{\diff \mathrm{vol}}(X_0) \exp \left( \frac{1}{\sigma^2} \left( \Psi(0,X_0) - \Psi(1,X_1) + \int_0^1 \left( \partial_s \Psi - \frac{1}{2} |\nabla \Psi|^2 + \frac{\sigma^2}{2} \Delta \Psi \right)(s,X_s) \diff s \right) \right).
\end{equation*}
\end{prop}
\noindent The key point, that holds only because the drift in the SDE is a gradient, is that the density of $\P$ with respect to $\mathbf{W}^\sigma$ does not involve a stochastic integral.
When $\mathcal{X}$ is a flat space, note that Girsanov formula applied to the process $X_t$ with the SDE $\diff X_t = a(t,X_t) \diff t + \sigma \diff B_t$ for a drift $a$ gives
\begin{align*}
\frac{\diff \P}{\diff \mathbf{W}^\sigma}(X) = \frac{\diff \P^0}{\diff \mathrm{vol}}(X_0) \exp \left( \frac{1}{\sigma^2} \left(\int_0^1 \langle a(s, X_s), \diff X_s\rangle - \frac{1}{2}\int_0^1 |a(s, X_s) |^2 \diff s \right) \right).
\end{align*}
For us $a(t,X_t) = -\nabla \Psi (t, X_t)$ and Itô's formula says
\begin{align*}
\int_0^1 \langle a (s, X_s), \diff X_s\rangle = - \int_0^1 \langle \nabla \Psi (s, X_s), \diff X_s\rangle
= \Psi(0,X_0) - \Psi(1,X_1) + \int_0^1 \left( \partial_s \Psi + \frac{\sigma^2}{2} \Delta \Psi \right)(s,X_s) \diff s .
\end{align*}
This would conclude the proof. For the general case of a curved space $\mathcal{X}$ we prefer to present a proof which does not involve stochastic integration over the manifold and relies only on martingale characterizations.
\begin{proof}[Proof of Proposition \ref{prop:density_p}]
For each $x \in \mathcal{X}$, let $\mathbf{W}^{\sigma,x}$ be the Wiener measure starting from $x$, that is such that $\mathbf{W}^{\sigma,x}_0 = \delta_x$.
Define a process $D$ whose value at time $t$ is given by
\begin{equation*}
D_t = \exp \left( \frac{1}{\sigma^2} \left( \Psi(0,X_0) - \Psi(t,X_t) + \int_0^t \left( \partial_s \Psi - \frac{1}{2} |\nabla \Psi|^2 + \frac{\sigma^2}{2} \Delta \Psi \right)(s,X_s) ds \right) \right).
\end{equation*}
As stated in Proposition \ref{prop:martingale_exp}, under $\mathbf{W}^{\sigma,x}$ this is a bounded martingale: it is the exponential martingale of the process $(N_t)_{t \in [0,1]}$, defined by
\begin{equation*}
N_t = \frac{1}{\sigma^2} \left( \Psi(0,X_0) - \Psi(t,X_t) + \int_0^t \left( \partial_s \Psi + \frac{\sigma^2}{2} \Delta \Psi \right)(s,X_s) \diff s \right).
\end{equation*}
whose quadratic variation is $\sigma^{-2} \int_0^1 |\nabla \Psi(s,X_s)|^2 \diff s$. As a consequence, we can define $\tilde{\P}^x = D_1 \mathbf{W}^{\sigma,x} \in \mathcal{P}(\Omega)$.
Then, let us take $f \in C^\infty(\mathcal{X})$. Under $\mathbf{W}^{\sigma,x}$, we know that the process $M^f$ whose value at time $t$ is given by
\begin{equation*}
M^f_t = f(X_t) - f(X_0) - \int_0^t \frac{\sigma^2}{2} \Delta f(X_s) \diff s
\end{equation*}
is a martingale with quadratic variation given by $\sigma^2 \int_0^t |\nabla f(X_s)|^2 \diff s$. Applying Girsanov's theorem \cite[Theorem 5.22]{leGall2016brownian} for real-valued semi martingales, we know that under $\tilde{\P}^x$ the process
\begin{equation*}
M^f - \langle M^f, N \rangle = \left( f(X_t) - f(X_0) - \int_0^t \frac{\sigma^2}{2} \Delta f(X_s) \diff s + \int_0^t \nabla f(X_s) \cdot \nabla \Psi(X_s) \diff s \right)_{t \in [0,1]}
\end{equation*}
is a local martingale. As it is clearly bounded, it is a martingale. This exactly shows that $\tilde{\P}^x$ is a diffusion measure generated by $f \mapsto \frac{\sigma^2}{2} \Delta f - \nabla \Psi \cdot \nabla f$, and its initial distribution is $\mathbf{W}^{\sigma,x}_0 = \delta_x$.
Eventually we average in $x$: we can define
\begin{equation*}
\tilde{\P} = \int_{\mathcal{X}} \tilde{\P}^x \, \P_0(\diff x) = \frac{\diff \P_0}{\diff \mathrm{vol}}(X_0) D_1 \, \mathbf{W}^\sigma,
\end{equation*}
and by an easy conditioning argument we see that $\tilde{\P}^x$ is still diffusion measure generated by $f \mapsto \frac{\sigma^2}{2} \Delta f - \nabla \Psi \cdot \nabla f$ with initial distribution $\P_0$. By uniqueness, $\tilde{\P} = \P$ that is $\diff \P / \diff \mathbf{W}^\sigma = \diff \P_0 / \diff \mathrm{vol}(X_0) D_1 \, \mathbf{W}^\sigma$.
\end{proof}
\begin{proof}[\bf Proof of Theorem \ref{thm:SDE_grad_min_KL}]
Let $\P$ be the law of the solution of the SDE \eqref{eqn:SDE-grad} and $\mathbf{R}$ be another probability distribution on $\Omega$ such that $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) < + \infty$ (otherwise the result trivially holds). Let $p,r \in L^1(\Omega, \mathbf{W}^\sigma)$ respectively denote the Radon-Nikodym derivative of $\P$ and $\mathbf{R}$ with respect to $\mathbf{W}^\sigma$.
By strict convexity of the function $x \mapsto x \log x$, there always holds $\mathbf{W}^\sigma$-a.e.
\begin{equation*}
r \log r - p \log p \geqslant (1+ \log p) (r - p),
\end{equation*}
with equality if and only if $r=p$. By integrating with respect to $\mathbf{W}^\sigma$, we find that
\begin{equation}
\label{eq:zz_aux_2}
\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) - \mathrm{H}(\P|\mathbf{W}^\sigma) \geqslant \mathbb{E}_{\mathbf{R}} \left[ 1 + \log p \right] - \mathbb{E}_{\P} \left[ 1 + \log p \right]
\end{equation}
On the other hand, given Proposition \ref{prop:density_p}, we have
\begin{align*}
&\mathbb{E}_{\mathbf{R}} \left[ 1 + \log p \right] \\
&= \mathbb{E}_{\mathbf{R}} \left[ 1 + \log \left( \frac{\diff \P_0}{\diff \mathrm{vol}} \right)(X_0) + \frac{1}{\sigma^2} \left( \Psi(0,X_0) - \Psi(1,X_1) + \int_0^1 \left( \partial_s \Psi - \frac{1}{2} |\nabla \Psi|^2 + \frac{\sigma^2}{2} \Delta \Psi \right)(s,X_s) \diff s \right) \right].
\end{align*}
As this expression depends only %
on the temporal marginals of $\mathbf{R}$, it shows that the right hand side of \eqref{eq:zz_aux_2} vanishes if $\mathbf{R}_t = \P_t$ for all $t \in [0,1]$. This completes the proof.
\end{proof}
\subsection{Preliminaries for the proof of Theorem \ref{theo:sparse_data} }\label{sec:preliminary-for-them-sparse}
This subsection provides properties of the entropy functional and the heat flow that will be crucially used for the proof of Theorem \ref{theo:sparse_data}. It give quantitative estimates of the different terms featured in the functionals $F$ and $F_T$ when the marginals are regularized with the heat flow. Most of the results in this subsection are reorganization of known results for our purpose; though the contraction estimate of Proposition \ref{prop:heat_flow_decreases_A} has only be stated in the two marginals case before, and Proposition \ref{prop:data_fitting_heat_flow} seems to be novel.
\paragraph{Additional properties of the entropy}
We recall the following standard result.
\begin{prop}\label{prop:compact-sublevel-H}
Let $(Y,\mathcal{B})$ be a Polish space endowed with its Borel $\sigma$-algebra. On the set $\mathcal{P}(Y)^2 = \mathcal{P}(Y) \times \mathcal{P}(Y)$ endowed with the topology of narrow convergence, the functional $(p,r) \to \mathrm{H}(p|r)$ is jointly convex and lower semi continuous.
Moreover, if $r \in \mathcal{P}(Y)$, then for any $c \geqslant 0$ the sublevel set $\{ p \in \mathcal{P}(Y) \ : \ \mathrm{H}(p|r) \leqslant c \}$ is compact for the topology of narrow convergence.
\end{prop}
\begin{proof}
The first claim follows from the representation \eqref{eq:dual_entropy} which shows that $(p,r) \mapsto \mathrm{H}(p|r)$ can be expressed as a supremum of convex and lower continuous functionals on $\mathcal{P}(Y)^2$. The compactness of the sublevel sets is classical, and it follows for instance from combining \cite[Remark 5.1.5]{ambrosio2008gradient} for a characterization of tight subsets of $\mathcal{P}(Y)$, the tightness of $r$, and the dual representation \eqref{eq:dual_entropy}.
\end{proof}
As a preparatory step let us also prove that the entropy with respect to a fixed probability distribution is $1$-convex with respect to the total variation. This is a rephrasing of Pinsker's inequality which is classical when studying entropy minimization.
\begin{lemma}
\label{lemma:entropy_strictly_convex}
Let $(Y, \mathcal{B})$ a measurable space and let $p,q,r$ be three probability measures on it. Then
\begin{equation*}
\mathrm{H} \left( \left. \frac{p+q}{2} \right| r \right) \leqslant \frac{1}{2} \mathrm{H}(p|r) + \frac{1}{2} \mathrm{H}(q|r) - \frac{1}{2} \left\| p -q \right\|^2_{\mathrm{TV}}
\end{equation*}
\end{lemma}
\begin{proof}
We can always assume that $\mathrm{H}(p|r), \mathrm{H}(q|r) < + \infty$ as otherwise the right hand side is infinite. Let $s = (p+q)/2$. An algebraic computation (see also \cite[Equation (2.2)]{csiszar1975divergence}) leads to
\begin{align*}
\mathrm{H}(s|r) = \frac{1}{2}[ \mathrm{H}(p|r) + \mathrm{H}(q|r)] -\frac{1}{2}[ \mathrm{H}(p|s)+\mathrm{H}(q|s)]
\end{align*}
On the other hand, thanks to Pinsker's inequality (see for instance \cite[Equation (2.3)]{csiszar1975divergence}), $\mathrm{H}(p|s) \geqslant 2 \| p- s \|^2_{\mathrm{TV}} = \frac{1}{2} \| p- q \|^2_{\mathrm{TV}}$. Similarly, $\mathrm{H}(q|s) \geqslant \frac{1}{2} \| p- q \|^2_{\mathrm{TV}} $ which yields the desired inequality.
\end{proof}
\paragraph{Heat flow}
We define $\Phi_s : L^1(\mathcal{X},\mathrm{vol}) \to L^1(\mathcal{X},\mathrm{vol})$ be the heat flow on the manifold $\mathcal{X}$. That is, if $f \in L^1(\mathcal{X},\mathrm{vol})$ then $u(s,x) = (\Phi_s f)(x)$ is the unique solution of the Cauchy problem
\begin{equation*}
\begin{cases}
\displaystyle{ \frac{\partial u}{\partial s} = \Delta u } & \text{in } (0,+ \infty) \times \mathcal{X}, \\
\displaystyle{\lim_{s \to 0^+} u(s,\cdot) } = f & \text{in } L^1(\mathcal{X},\mathrm{vol}).
\end{cases}
\end{equation*}
We recall that $\mathcal{X}$ is a smooth compact Riemannian manifold without boundary thus there is no need for boundary conditions. Note that here we follow the convention of \cite{bakry2013analysis} and we do not include factor $1/2$ in front of the Laplacian as it leads to cleaner estimates for the contraction properties of the heat flow. The heat flow preserves the total mass, therefore, if $f \cdot \mathrm{vol}$ is a probability distribution, then so is $u(s,x) \mathrm{vol}(\diff x)$ for all $s \geqslant 0$. Moreover, $u(s,x) \mathrm{vol}(\diff x)$ converges narrowly to $f(x) \mathrm{vol}(\diff x)$ when $s \to 0^+$. Actually the heat flow is well defined even for initial conditions given by general probability measures (see \cite[Theorem 1]{erbar2010heat}), and we use it in the statement of Theorem \ref{theo:main_convergence} when defining the $\widehat{\rho}^{T,\epstheo}_i$.
We now collect a few well known properties of the heat flow. (The assumption that $\mathcal{X}$ is a closed manifold will be crucially used.) We denote by $\| f \|_p$ the $L^p(\mathcal{X},\mathrm{vol})$ norm of a function $f : \mathcal{X} \to \mathbb{R}$.
\begin{prop}
\label{prop:heat_flow}
Let $f \in L^1(\mathcal{X},\mathrm{vol})$ and write $u(s,x) = (\Phi_s f)(x)$. Moreover, let $K$ be a lower bound on the Ricci curvature of the manifold $\mathcal{X}$.
\begin{enumerate}[label=(\roman*)]
\item For every $s_0 > 0$, the function $u$ is of class $C^\infty$ on $(s_0,+\infty) \times \mathcal{X}$, and it is bounded from below by a strictly positive constant provided $u$ is non-negative and different from $0$.
\item For every $s>0$ there exists a constant $C_s$ depending only on $s$ and $\mathcal{X}$ such that
\begin{equation*}
\| u(s,\cdot) \|_\infty + \mathrm{Lip}(u(s,\cdot)) \leqslant C_s \| f \|_1,
\end{equation*}
being $\mathrm{Lip}(u(s,\cdot))$ the best Lipschitz constant of $u(s,\cdot)$.
\item If $f \in C^1(\mathcal{X})$, then for every $s>0$ there holds everywhere on $\mathcal{X}$
\begin{equation}
\label{eq:Bakry_Emery}
\left| \nabla u (s, \cdot) \right|^2 \leqslant e^{-2sK} \Phi_s \left\{ %
|\nabla f|^2 \right\}.
\end{equation}
\item If $f(x) \mathrm{vol}(\diff x)$ is a probability measure then for every $s>0$
\begin{equation}
\label{eq:contraction_entropy_heat_flow}
\mathrm{H}( u(s,\cdot) | \mathrm{vol} ) \leqslant \min[e^{-2Ks},1]\, \mathrm{H}(f | \mathrm{vol}) ,
\end{equation}
where we have identified a probability measure with its density with respect to the volume measure.
\item There exists $s_0 > 0$ such that, if $s \in (0,s_0)$ then there exists a constant $C_s$ depending on $s$ and $\mathcal{X}$ such that
\begin{equation*}
\mathcal{I}(\Phi_s p) \leqslant C_s
\end{equation*}
provided that $p \in \mathcal{P}(\mathcal{X})$ is any probability measure and $\mathcal{I}$ is defined in \eqref{eq:def_Fisher}.
\end{enumerate}
\end{prop}
\begin{proof}
The first point is simply parabolic regularity, and the lower bound holds thanks to the maximum principle on the compact manifold.
For the second point, that $\| u(s,\cdot) \|_\infty \leqslant C_s \| f \|_1$ is a straightforward $L^1 - L^\infty$ estimate which can be justified for instance by the Gaussian upper bound for the heat kernel \cite[Corollary 3.1]{li1986parabolic}. The Lipschitz estimate can be obtained by combining \cite[Theorem 4.7.2]{bakry2013analysis} (which proves that the Lipschitz constant is controlled at time $s >0$ by the $L^\infty$ norm) with the $L^1-L^\infty$ estimate:
\begin{equation*}
\mathrm{Lip}(u(s,\cdot)) \leqslant C'_{s/2} \| u(s/2,\cdot) \|_\infty \leqslant C'_{s/2} C''_{s/2} \| f \|_1.
\end{equation*}
The Bakry-Emery estimate \eqref{eq:Bakry_Emery} can be found in \cite[Theorem 4.7.2]{bakry2013analysis}.
For the decay estimate \eqref{eq:contraction_entropy_heat_flow} we know thanks to \cite{erbar2010heat} that $\Phi$ is the $\mathrm{EVI}_K$ gradient flow of the entropy $\mathrm{H}(\cdot |\mathrm{vol} )$, and this entails automatically the estimate thanks to \cite[Theorem 3.5]{muratori2020gradient} in the case $K \geqslant 0$, while for the case $K < 0$ we simply use that $\mathrm{H}(\cdot|\mathrm{vol})$ decreases along the heat flow.
For the last point, where we recall that $\mathcal{I}(\Phi_s p)$ is the Fisher information (see \eqref{eq:def_Fisher}) of $\Phi_s$, we can use that $\mathcal{I}$ is the metric slope of the entropy $\mathrm{H}(\cdot|\mathrm{vol})$ in Wasserstein distance, and then use the general decay estimate of the metric slope of an energy along its gradient flow \cite[Eq. (3.14)]{muratori2020gradient}.
\end{proof}
\paragraph{Heat flow and regularization of the marginals}
We will use the heat flow to regularize the marginals: if $\mathbf{R} \in \mathcal{P}(\Omega)$, then the function $\rho^{(s)}_t(x) = \Phi_s \mathbf{R}_t(x)$ (of $t$ and $x$) is smooth by parabolic regularity. We will need a more quantitative smoothness estimate which is the object of the following proposition.
\begin{prop}
\label{prop:HF_reg}
Let $s > 0$. Then there exist constants $C$ depending only on $\mathcal{X}$ and $C_s$ depending only on $s$ and $\mathcal{X}$ for which the following hold:
\begin{itemize}
\item
For each $\mathbf{R} \in \mathcal{P}(\Omega)$ its heat flow regularization $\Phi_s \mathbf{R}_t$ has density
$\rho^{(s)}(t,\cdot)$ (with respect to the volume measure) that satisfies for all $t \in [0,1], x \in \mathcal{X}$,
\begin{equation*}
\rho^{(s)}(t,x) \geqslant \frac{1}{C_s}.
\end{equation*}
\item Moreover for all $t_1, t_2 \in [0,1]$ and $x_1,x_2 \in \mathcal{X}$,
\begin{equation*}
|\rho^{(s)}(t_1,x_1) - \rho^{(s)}(t_2,x_2)| \leqslant C_s \left( \sigma \sqrt{\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C + C\sigma^2} \sqrt{|t_1 - t_2|} + d_{\mathcal{X}}(x_1,x_2) \right).
\end{equation*}
\end{itemize}
\end{prop}
\noindent In other words for a given $s > 0$, $\Phi_s \mathbf{R}_t$ is continuous jointly in $t$ and $x$ with a modulus of continuity which depends only on $s$ and $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$.
\begin{proof}
The first estimate is straightforward thanks to the lower bound on the heat kernel, see Proposition \ref{prop:heat_flow}(i).
For the second one, again thanks to the regularizing effect of the heat flow (Proposition \ref{prop:heat_flow}(ii)) we know that there exists a constant $C_s < + \infty$ such that for all $t,x_1,x_2$,
\begin{equation*}
|\rho^{(s)}(t,x_1) - \rho^{(s)}(t,x_2)| \leqslant C_s d_{\mathcal{X}}(x_1,x_2)
\end{equation*}
whatever $\mathbf{R} \in \mathcal{P}(\Omega)$ is. Thus the only tricky point is the temporal regularity, and we reason by duality. Let $t_1,t_2$ be two instants and $f \in L^1(\mathcal{X},\mathrm{vol})$. From the self adjointness of the heat flow and the Lipschitz regularizing effect of the heat flow,
\begin{multline*}
\int_{\mathcal{X}} f(x) \left( \rho^{(s)}(t_1,x) - \rho^{(s)}(t_2,x) \right) \, \mathrm{vol}(\diff x) =
\int_{\mathcal{X}} (\Phi_s f)(x) \left( \mathbf{R}_{t_1}(\diff x) - \mathbf{R}_{t_2}(\diff x) \right) \\
= \mathbb{E}_{\mathbf{R}} \left[ (\Phi_s f)(X_{t_1}) - (\Phi_s f)(X_{t_2}) \right] \leqslant \mathrm{Lip}(\Phi_s f) \mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2}) \right] \leqslant C_s \| f \|_1 \mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_1}) \right].
\end{multline*}
On the other hand, notice that
\begin{align*}
\mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2}) \right]
\leqslant \sqrt{ \mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2})^2 \right]}.
\end{align*}
Lemma \ref{lem:R-displacement} stated and proved below allows us to control the right and side with $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$, applying it we get the estimate: for all $f \in L^1(\mathcal{X}, \mathrm{vol})$,
\begin{equation*}
\int_{\mathcal{X}} f(x) \left( \rho^{(s)}(t_1,x) - \rho^{(s)}(t_2,x) \right) \, \mathrm{vol}(\diff x) \leqslant C_s \sigma \| f \|_1 \sqrt{\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C + C \sigma^2 } \sqrt{|t_1 - t_2|}.
\end{equation*}
Taking the supremum in $f$, and as $\rho^{(s)}(t,\cdot)$ is a continuous function,
\begin{equation*}
\sup_{x \in \mathcal{X}} \left| \rho^{(s)}(t_1,x) - \rho^{(s)}(t_2,x) \right| \leqslant C_s \sigma \sqrt{\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C + C\sigma^2} \sqrt{|t_1 - t_2|}.
\end{equation*}
This concludes the proof.
\end{proof}
In the proof above we have crucially used the following lemma, which shows that the entropy functional $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ controls the expected value of the squared displacement of the process $\mathbf{R}$. In particular, it implies that the curve $t \mapsto \mathbf{R}_t$ is $1/2$ Hölder in quadratic Wasserstein distance, with norm controlled by $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$. One way to prove it would be rely on dynamical formulation, starting from the dynamical formulation linked to the minimization of $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ (see for instance \cite[Theorem 35]{gentil2018dynamical} on Riemannian manifolds or \cite{gigli2020benamou} on more generals spaces) and then connecting it with the dynamical formulation of the Wasserstein distance (see for instance \cite[Section 3.2]{gentil2018dynamical}). We prefer to present here a more probabilistic and elementary proof relying on heat kernel estimates.
\begin{lemma}\label{lem:R-displacement}
There exists a constant $C$ depending only on $\mathcal{X}$
such that for each $\mathbf{R} \in \mathcal{P}(\Omega)$,
\begin{align*}
\mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2})^2 \right]
\leqslant C (\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C + C\sigma^2|t_1 - t_2|) \sigma^2 |t_1-t_2|.
\end{align*}
In particular as $t_1, t_2 \in [0,1]$,
\begin{align*}
\mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2})^2 \right]
\leqslant C (\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C + C \sigma^2) \sigma^2 |t_1-t_2|.
\end{align*}
\end{lemma}
\begin{proof}
For any $\eta > 0$ using \eqref{eq:dual_entropy} with the function $U : X \in \Omega \mapsto \eta d_{\mathcal{X}}(X_{t_1}, X_{t_2})$ there holds
\begin{equation}\label{eqn:eta-R}
\eta \mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_{t_1},X_{t_2})^2 \right] \leqslant \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \log \mathbb{E}_{\mathbf{W}^\sigma} \left[ \exp(\eta d_{\mathcal{X}}(X_{t_1},X_{t_2})^2) \right].
\end{equation}
It remains to choose $\eta$ and bound the last term. We use the Gaussian upper bound for the heat kernel $p_\sigma$ (the transition probability for $W^\sigma$) on $\mathcal{X}$ \cite[Corollary 3.1]{li1986parabolic},
that is,
\begin{align*}
p_\sigma (x, y, t) \leqslant \frac{C}{\sigma^d t^{d/2}}\exp\left[C \sigma^2 t - \frac{d_{\mathcal{X}}(x,y)^2}{C\sigma^2 t}\right]
\end{align*}
for some constant $C$ depending only on $\mathcal{X}$ and $d$ is the dimension of $\mathcal{X}$.
Note that
\begin{align*}
& \mathbb{E}_{\mathbf{W}^\sigma} \left[ \exp(\eta d_{\mathcal{X}}(X_{t_1},X_{t_2})^2) \right] \\
& = \int_{\mathcal{X}}\int_{\mathcal{X}} \exp\left[\eta d_{\mathcal{X}}(x,y)^2\right] p_\sigma(x,y, |t_1-t_2|) \, \mathrm{vol}(\diff x) \mathrm{vol} (\diff y) \\
& \leqslant \int_{\mathcal{X}}\int_{\mathcal{X}} \frac{C}{\sigma^d|t_1-t_2|^{d/2}} \exp\left[C \sigma^2 |t_1-t_2| - \frac{d_{\mathcal{X}}(x,y)^2}{C\sigma^2|t_1-t_2|} + \eta d_{\mathcal{X}}(x,y)^2\right] \mathrm{vol}(\diff x) \mathrm{vol} (\diff y)
\end{align*}
Letting $\eta =\frac{1}{2C \sigma^2 |t_1-t_2|} $ we get for some constant $C_1$ depending only on $\mathcal{X}$:
\begin{align}
& \mathbb{E}_{\mathbf{W}^\sigma} \left[ \exp(\eta d_{\mathcal{X}}(X_{t_1},X_{t_2})^2) \right] \nonumber \\
&\leqslant \int_{\mathcal{X}}\int_{\mathcal{X}} \frac{C}{\sigma^d|t_1-t_2|^{d/2}} \exp\left[C \sigma^2 |t_1-t_2| - \frac{d_{\mathcal{X}}(x,y)^2}{2C\sigma^2|t_1-t_2|}\right] \mathrm{vol}(\diff x) \mathrm{vol} (\diff y)\nonumber \\
& \leqslant C_1 \exp\left[ C\sigma^2 |t_1-t_2| \right],
\label{eqn:eta-Rbis}
\end{align}
where the last inequality comes from the estimate (applied with $s = |t_1 - t_2|$)
\begin{align*}
\int_{\mathcal{X}}\int_{\mathcal{X}} \frac{1}{s^{d/2}} \exp\left[ - \frac{d_{\mathcal{X}}(x,y)^2}{s}\right]\mathrm{vol}(\diff x) \mathrm{vol} (\diff y) \leqslant C_2
\end{align*}
for some constant $C_2$ depending only on $\mathcal{X}$. To see this last estimate, consider a constant $\bar r>0$ depending on the compact smooth manifold $\mathcal{X}$, where $\bar r$ is smaller than the injectivity radius of $\mathcal{X}$ and
for each $x \in \mathcal{X}$ the geodesic polar coordinates $(r, \theta)$ at $x$ in the geodesic ball $B_{\bar r} (x)$ has the Riemannian volume form with bound $\mathrm{vol} \leqslant 2 r^{d-1} \diff r \diff \theta$. Then,
\begin{align*}
\int_{\mathcal{X}} \frac{1}{s^{d/2}} \exp\left[ - \frac{d_{\mathcal{X}}(x,y)^2}{s}\right] \mathrm{vol} (\diff y) & \leqslant
\int_{B_{\bar r}(x)} \frac{1}{s^{d/2}} \exp\left[ - \frac{r^2}{s}\right] \mathrm{vol} (\diff y) + \int_{\mathcal{X} \backslash B_{\bar r}(x)} \frac{1}{s^{d/2}} \exp\left[ - \frac{\bar r^2}{s}\right] \mathrm{vol} (\diff y)\\
& \leqslant \int_{\mathbb{R}^d} \frac{1}{s^{d/2}} \exp\left[ - \frac{r^2}{s}\right] 2 r^{d-1} \diff r \diff \theta + \int_{\mathcal{X}} \frac{1}{s^{d/2}} \exp\left[ - \frac{\bar r^2}{s}\right] \mathrm{vol} (\diff y)
\end{align*}
where the first term in the last line is a universal constant depending only on $d$ and the second term is bounded by $\mathrm{vol}(\mathcal{X})$ multiplied by a constant depending only on $\bar r$. Integrating this with respect to $\mathrm{vol}(\diff x)$ gives the desired estimate.
Now, back to \eqref{eqn:eta-R} and plugging the estimate \eqref{eqn:eta-Rbis} with our choice of $\eta$, we see
\begin{align*}
\mathbb{E}_{\mathbf{R}}\left[ d_{\mathcal{X}}(X_1,X_2)^2 \right] \leqslant (\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + C_3 + C \sigma^2 |t_1-t_2|) 2C \sigma^2 |t_1-t_2|.
\end{align*}
This completes the proof.
\end{proof}
\paragraph{Heat flow and entropy on the space of paths}
When we regularize the marginals, it is not straightforward to see how the entropy on the space of paths changes. To that end, we introduce an auxiliary variational problem, the one where all the temporal marginals are fixed.
\begin{definition}
\label{defi:action}
Let $\rho \in C([0,1], \mathcal{P}(\mathcal{X}))$ be a continuous curve valued in $\mathcal{P}(\mathcal{X})$ (with respect to the narrow topology). We define $\mathcal{A}_\sigma(\rho)$ to be
\begin{equation*}
\mathcal{A}_\sigma(\rho) =
\inf \left\{ \sigma^2 \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) \ : \ \mathbf{R} \in \mathcal{P}(\Omega) \text{ and } \forall t \in [0,1], \mathbf{R}_t = \rho_t \right\}.
\end{equation*}
By convention $\mathcal{A}_\sigma(\rho) = + \infty$ if the minimization problem above has no admissible competitor.
\end{definition}
The key point is a dual representation of the action $\mathcal{A}$ which allows us to adopt a PDE perspective on the problem.
\begin{prop}
\label{prop:dual_raw_form}
Let $\rho \in C([0,1], \mathcal{P}(\mathcal{X}))$. Then there holds
\begin{equation}
\label{eq:dual_raw_form}
\mathcal{A}_\sigma(\rho) = \sigma^2 \mathrm{H}(\rho_0|\mathrm{vol}) \\
+ \sup_{\varphi} \left\{ - \int_{\mathcal{X}} \varphi(0,x) \, \rho_0(\diff x) - \int_0^1 \int_{\mathcal{X}} \left( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi \right) \rho_t(\diff x) \, \diff t \right\}.
\end{equation}
where the supremum is taken over all $\varphi \in C^2([0,1] \times \mathcal{X})$ such that $\varphi(1,\cdot) = 0$.
\end{prop}
\begin{proof}
We start from a duality result from \cite[Proposition 2.3]{arnaudon2017entropic} which enables us to write
\begin{equation*}
\mathcal{A}_\sigma(\rho) = \sigma^2 \mathrm{H}(\rho_0|\mathrm{vol}) + \sigma^2 \, \sup_{\psi} \left\{ \int_0^1 \int_{\mathcal{X}} \psi(t,x) \, \rho_t(\diff x) \, \diff t - \int_{\mathcal{X}} \left[ \log \mathbb{E}_{\mathbf{W}^{\sigma,x}} \exp \left( \int_0^1 \psi(t,X_t) \, \diff t \right) \right] \, \rho_0(\diff x) \right\}
\end{equation*}
where $\mathbf{W}^{\sigma,x}$ is the Wiener measure starting at $x \in \mathcal{X}$, namely, it is the Wiener measure which satisfies $\mathbf{W}^{\sigma,x}_0 = \delta_x$. Here the supremum is taken over all $\psi \in C([0,1] \times \mathcal{X})$. Importantly, their result handles the case $\mathcal{A}_\sigma(\rho) = + \infty$, that is, the left hand side is finite if and only if the right hand side is.
Their result is originally stated for $\mathcal{X}$ being the torus but the proof can be copied word for word in a Polish space. Moreover, in their result they have an additional constraint about the law of $\mathbf{R}_{0,1}$ the joint law of $(X_0,X_1)$: removing a constraint amounts to removing a Lagrange multiplier, hence the result stated above.
The key point is that if we let $- \psi = \frac{1}{\sigma^2} ( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi)$ for some smooth $\varphi$ satisfying the terminal condition $\varphi(1,x) = 0$, then by the martingale properties of Wiener measures recalled in Proposition \ref{prop:martingale_exp},
\begin{equation*}
\mathbb{E}_{\mathbf{W}^{\sigma,x}} \left[ \exp \left( - \frac{\varphi(0,X_0)}{\sigma^2} + \int_0^1 \psi(t,X_t) \, \diff t \right) \right] =1.
\end{equation*}
As $X_0 = x$ under $\mathbf{W}^{\sigma,x}$, we see that
\begin{align*}
\int_{\mathcal{X}} \left[ \log \mathbb{E}_{\mathbf{W}^{\sigma,x}} \exp \left( \int_0^1 \psi(t,X_t) \, \diff t \right) \right] \, \rho_0(\diff x) &= \int_{\mathcal{X}} \left[ \log e^{\sigma^{-2} \varphi(0,x)} \right] \, \rho_0(\diff x)\\ & = \frac{1}{\sigma^2} \int_{\mathcal{X}} \varphi(0,x) \, \rho_0(\diff x).
\end{align*}
On the other hand, for any $\psi \in C([0,1] \times \mathcal{X})$ there exists $\varphi$ satisfying the terminal condition $\varphi(1,x) = 0$ such that $- \psi = \frac{1}{\sigma^2}( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi$): it is enough to solve the linear backward diffusion heat equation
\begin{equation*}
\partial_t u + \frac{\sigma^2}{2} \Delta u = - \psi u
\end{equation*}
with terminal condition $u(1,x) = 1$ and take $\varphi = \sigma^2 \log u$.
Therefore we can rewrite
\begin{equation*}
\mathcal{A}_\sigma(\rho)
= \sigma^2 \mathrm{H}(\rho_0|\mathrm{vol}) + \sup_{\varphi} \left\{ - \int_0^1 \int_{\mathcal{X}} \left( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi \right) \rho_t(\diff x) \, \diff t - \int_{\mathcal{X}} \varphi(0,x) \rho_0(\diff x) \right\}. \qedhere
\end{equation*}
\end{proof}
The main result is the following contraction result for $\mathcal{A}_\sigma$ under the heat flow. This can be seen as a path-space counterpart of the well known contraction of entropy under the heat flow (which we recalled in Proposition \ref{prop:heat_flow}(iv)). Closely related results are also available in the case where only two marginals are fixed, for instance a contraction estimate has been derived in the Schrödinger problem for smooth densities in \cite[Theorem 37]{gentil2018dynamical}. On the other hand, still in the two marginal case but in the limit $\sigma \to 0$, it is well understood that the heat flow is Lipschitz with respect to Wasserstein distance \cite[Theorem 1]{erbar2010heat}.
\begin{prop}
\label{prop:heat_flow_decreases_A}
Let $\rho \in C([0,1], \mathcal{P}(\mathcal{X}))$ %
and define, for $s \geqslant 0$, the new curve $\rho^{(s)} : t \mapsto \Phi_s \rho_t$. Furthermore let $K$ be a lower bound on the Ricci curvature of the manifold $\mathcal{X}$. Then, for any $s \geqslant 0$ it holds that
\begin{equation*}
\mathcal{A}_\sigma(\rho^{(s)}) \leqslant e^{ - 2 K s } \mathcal{A}_\sigma(\rho).
\end{equation*}
\end{prop}
\begin{proof}
This is a consequence of the dual formulation in Proposition \ref{prop:dual_raw_form}.
If $\varphi : [0,1] \times \mathcal{X} \to \mathbb{R}$ is a $C^2$ function with $\varphi(1, \cdot) =0$ then by self adjointness of the heat flow,
\begin{multline*}
\int_{\mathcal{X}} \varphi(0,\cdot) \, \rho^{(s)}_0 + \int_0^1 \int_{\mathcal{X}} \left( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi \right) \rho^{(s)}_t \diff t \\
= \int_{\mathcal{X}} \left\{ \Phi_s \varphi \right\}(0,\cdot) \, \rho_0 + \int_0^1 \int_{\mathcal{X}} \left( \partial_t \left\{ \Phi_s \varphi \right\} + \Phi_s \left\{ \frac{1}{2} |\nabla \varphi|^2 \right\} + \frac{\sigma^2}{2} \Delta\left\{ \Phi_s \varphi \right\} \right) \rho_t \, \diff t.
\end{multline*}
We have used that $\Phi_s \partial_t \varphi = \partial_t \Phi_s \varphi$ (this is Schwarz theorem) and $\Phi_s \Delta \varphi = \Delta \Phi_s \varphi$ (which can be checked for instance by noticing that $s \mapsto \Delta \Phi_s \varphi$ also follows the heat flow). To handle the term with the gradient, we use the Bakry-\'Emery estimate \eqref{eq:Bakry_Emery}. Thus, by writing $\tilde{\varphi}_s = e^{2sK} \Phi_s \varphi$, there holds
\begin{multline*}
-\int_{\mathcal{X}} \varphi(0,\cdot) \, \rho^{(s)}_0 - \int_0^1 \int_{\mathcal{X}} \left( \partial_t \varphi + \frac{1}{2} |\nabla \varphi|^2 + \frac{\sigma^2}{2} \Delta \varphi \right) \rho^{(s)}_t \diff t \\
\leqslant - e^{-2sK} \left[ \int_{\mathcal{X}} \tilde{\varphi}(0,\cdot) \, \rho_0 + \int_0^1 \int_{\mathcal{X}} \left( \partial_t \tilde{\varphi_s} + \frac{1}{2} |\nabla \tilde{\varphi}_s|^2 + \frac{\sigma^2}{2} \Delta \tilde{\varphi}_s \right) \rho_t \, \diff t \right]
\leqslant e^{-2sK} (\mathcal{A}_\sigma(\rho) - \sigma^2 \mathrm{H}(\rho_0|\mathrm{vol}))
\end{multline*}
where the last inequality comes from Proposition \ref{prop:dual_raw_form}. Taking the supremum in $\varphi$, we end up with the estimate
\begin{equation*}
\mathcal{A}_\sigma(\rho^{(s)}) \leqslant e^{-2sK} \mathcal{A}_\sigma(\rho) + \sigma^2 \left[ \mathrm{H}(\Phi_s \rho_0|\mathrm{vol}) - e^{-2Ks} \mathrm{H}(\rho_0|\mathrm{vol}) \right].
\end{equation*}
Applying \eqref{eq:contraction_entropy_heat_flow} we get the desired inequality.
\end{proof}
We now define the regularizing operator $\mathcal{G}_s$ which acts at the level of laws on the space of paths.
\begin{definition}
For each $\mathbf{R} \in \mathcal{P}(\Omega)$ with $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) < + \infty$ and for each $s \geqslant 0$ define
\begin{equation*}
\mathcal{G}_s(\mathbf{R}) = \mathrm{argmin} \left\{ \mathrm{H}(\tilde{\mathbf{R}} | \mathbf{W}^\sigma) \ : \ \forall t \in [0,1], \ \tilde{\mathbf{R}}_t = \Phi_s \mathbf{R}_t \right\}.
\end{equation*}
That is, among all probability distributions on the space of paths whose marginals coincide with $t \mapsto \Phi_s \mathbf{R}_t$, the measure $\mathcal{G}_s(\mathbf{R}) \in \mathcal{P}(\Omega)$ is the one with the smallest entropy.
\end{definition}
To see why
the measure $\mathcal{G}_s(\mathbf{R})$ is well defined, first notice that
thanks to Proposition \ref{prop:heat_flow_decreases_A} we have $\mathcal{A}_\sigma((\Phi_s \mathbf{R}_t)_t) \leqslant e^{-2Ks} \mathcal{A}_\sigma((\mathbf{R}_t)_t) \leqslant e^{-2Ks} \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) < + \infty$. This guarantees that the minimization problem has nonempty admissible solutions. Since
each sublevel set of the entropy is compact, there exists a minimizer and from strict convexity of the entropy functional such a minimizer is uniquely determined.
Notice that
\begin{align*}
\mathcal{A}_\sigma((\Phi_s \mathbf{R}_t)_t) =\mathrm{H}(\mathcal{G}_s(\mathbf{R})|\mathbf{W}^\sigma)
\end{align*}
and we have
\begin{prop}
\label{prop:properties_regularization}
For each $\mathbf{R} \in \mathcal{P}(\Omega)$ with $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)< + \infty$ the following holds:
\begin{enumerate}[label=(\roman*)]
\item For any $s \geqslant 0$,
$\mathrm{H}(\mathcal{G}_s(\mathbf{R}) |\mathbf{W}^\sigma) \leqslant e^{-2Ks} \mathrm{H}(\mathcal{G}_0 (\mathbf{R})|\mathbf{W}^\sigma) \leqslant e^{-2Ks} \mathrm{H}( \mathbf{R} |\mathbf{W}^\sigma)$.
\item %
$\mathcal{G}_s(\mathbf{R})$ converges to $\mathcal{G}_0(\mathbf{R})$
narrowly as $s \to 0^+$.
\end{enumerate}
\end{prop}
\begin{proof}
The first property is nothing else than a rewriting of Proposition \ref{prop:heat_flow_decreases_A} together with the definition of $\mathcal{G}_s$ and $\mathcal{A}_\sigma$.
\medskip
For the second property we use a sequential characterization. Let $(s_n)_{n \in \mathbb{N}}$ a sequence which goes to $0$. Thanks to the contraction estimate, we know that $\mathrm{H}(\mathcal{G}_{s_n} \mathbf{R} | \mathbf{W}^\sigma)$ is uniformly bounded in $n$. Let $\tilde{\mathbf{R}}$ be any accumulation point of $\mathcal{G}_{s_n} \mathbf{R}$: it exists thanks to the compactness of the sublevel sets of $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$. The only thing to prove is $\tilde{\mathbf{R}} = \mathcal{G}_0 \mathbf{R}$. Below we do not relabel subsequence, that is we assume that $\mathcal{G}_{s_n} \mathbf{R} \to \tilde{\mathbf{R}}$ as $n \to + \infty$.
The marginals of $\tilde{\mathbf{R}}$ are the same as the marginal of $\mathbf{R}$: this is straightforward to check as the marginals of $\mathcal{G}_{s_n} \mathbf{R}$ (which converge to the ones of $\tilde{\mathbf{R}}$) are the $(\Phi_{s_n} \mathbf{R}_t)_{t \in [0,1]}$ and $\Phi_{s_n} f \to f$ as $s_n \to 0$ for instance in $L^1(\mathcal{X}, \mathrm{vol})$. Thus, using the lower semi continuity of the entropy, the definition of $\mathcal{G}_{s_n}$ and then the contraction estimate for the action,
\begin{equation*}
\mathrm{H}(\tilde{\mathbf{R}} | \mathbf{W}^\sigma) \leqslant \liminf_{n \to + \infty} \, \mathrm{H}( \mathcal{G}_{s_n}(\mathbf{R}) | \mathbf{W}^\sigma ) = \liminf_{n \to + \infty} \, \mathcal{A}_\sigma( (\Phi_{s_n}\mathbf{R}_t)_t | \mathbf{W}^\sigma ) \leqslant \liminf_{n \to + \infty} \, e^{-2Ks_n} \mathcal{A}_\sigma((\mathbf{R}_t)) = \mathcal{A}_\sigma((\mathbf{R}_t)_t).
\end{equation*}
This shows by definition that $\tilde{\mathbf{R}} = \mathcal{G}_0 \mathbf{R}$ and concludes the proof.
\end{proof}
\noindent We think that it should be possible to prove that $\mathcal{G}_0$ is continuous on its domain of definition, that would imply the set $\{ \mathbf{R} \in \mathcal{P}(\Omega) \ : \ \mathbf{R} = \mathcal{G}_0 \mathbf{R} \}$ is closed. Notice that Theorem \ref{thm:SDE_grad_min_KL} asserts that the law of a SDE whose drift is a smooth gradient belongs to $\{ \mathbf{R} \in \mathcal{P}(\Omega) \ : \ \mathbf{R} = \mathcal{G}_0 \mathbf{R} \}$, and we think that it is possible to prove that this set is the closure of laws of such SDEs. However in the proof of Theorem \ref{theo:sparse_data} we do not rely on this property and rather use the strict convexity of the entropy.
\begin{remark}
One may prefer to have a direct probabilistic construction of the operator $\mathcal{G}_s$ whose role is to smooth the marginals while not increasing too much the entropy with respect to $\mathbf{W}^\sigma$. In the case where $\mathcal{X}$ is the torus such a construction has been performed in \cite{baradat2020small} (with a construction that also handles temporal boundary values in a finer way). From a law $\mathbf{R} \in \mathcal{P}(\Omega)$, one regularizes it by considering the law of $(X_t + Z_s)_{t \in [0,1]}$ where $(X_t)_{t \in [0,1]}$ follows $\mathbf{R}$ while $Z_s$ is random variable distributed according to $\Phi_s \delta_0$ and independent from $(X_t)_{t \in [0,1]}$. The intuition is that $Z_s$ is a Gaussian random variable of variance proportional to $s$ but this is not exactly the case because the heat flow on the torus is not obtained from projection of the heat flow on $\mathbb{R}^d$. Then, it is clear that the marginals of the new process are the $(\Phi_s \mathbf{R}_t)_{t \in [0,1]}$ while evaluating the entropy is easy. To perform a similar construction on a Riemannian manifold seems to be a more delicate matter which would likely involve parallel transport: this is out of the scope of the present article, and we have preferred to present our proof that is based on $\mathcal{A}_\sigma$ and its dual formulation.
\end{remark}
\paragraph{The data-fitting term and its behavior under the heat flow}
Our last preliminaries before we start the proof of Theorem \ref{theo:sparse_data} concern the data-fitting term and its behavior under the heat flow.
We define the data-fitting term as follows, sometimes the denomination ``cross-entropy'' is used.
\begin{definition}
\label{defi:DF}
If $p,r \in \mathcal{P}(\mathcal{X})$ such that $\mathrm{H}(p|\mathrm{vol}) < + \infty$ we define
\begin{equation*}
\mathrm{DF}(r,p) = \mathrm{H}(p|r) - \mathrm{H}(p|\mathrm{vol}).
\end{equation*}
In particular, if $r \ll \mathrm{vol}$, identifying a measure with its density with respect to the Lebesgue measure,
\begin{equation*}
\mathrm{DF}(r,p) = - \int_{\mathcal{X}} \log r(x) \, p(\diff x).
\end{equation*}
\end{definition}
We have this easy property which follows directly from the joint convexity and lower semi continuity of the entropy.
\begin{prop}
If $\mathrm{H}(p|\mathrm{vol}) < + \infty$ then the function $r \mapsto \mathrm{DF}(r,p)$ is convex and lower semi continuous on $\mathcal{P}(\mathcal{X})$.
\end{prop}
In the proof of Theorem \ref{theo:sparse_data}, we will need a quantitative control of the effect of the heat flow on the data-fitting term $\mathrm{DF}$ as in the following proposition.
Here, unlike the usual heat flow on the entropy functional in the literature, the heat flow is applied to $r$, the reference measure of the relative entropy functional $\mathrm{H}(p|r)$.
\begin{prop}
\label{prop:data_fitting_heat_flow}
Take $p,r \in \mathcal{P}(\mathcal{X})$ and assume that the density of $p$ satisfies (in Sobolev sense) $\mathcal{I}(p) < + \infty$ where we recall that $\mathcal{I}$ is defined in \eqref{eq:def_Fisher}. Then, for every $s > 0$,
\begin{equation*}
\mathrm{DF}( \Phi_s r, p ) \leqslant \mathrm{DF}( r,p ) + \frac{1}{4}\mathcal{I}(p) s.
\end{equation*}
(It is remarkable that the second term of the right hand side is independent of $r.)$
\end{prop}
\begin{proof}
By a slight abuse of notation, we denote by $r \in L^1(\mathcal{X},\mathrm{vol})$ the density of $r$ with respect to $\mathrm{vol}$.
Let us write by $r(s,\cdot)$ the density of $\Phi_s r$ with respect to $\mathrm{vol}$. It satisfies the heat equation
\begin{equation*}
\frac{\partial r}{\partial s} = \Delta r.
\end{equation*}
In particular, we can compute:
\begin{align*}
\frac{\diff}{\diff s} \mathrm{DF}( \Phi_s r, p )& = \frac{\diff}{\diff s} \int_{\mathcal{X}} - \log r(s,x) p(x) \, \mathrm{vol}(\diff x) = -\int_{\mathcal{X}} \frac{\partial_s r(s,x)}{r(s,x)} p(x) \, \mathrm{vol}(\diff x) \\
& = - \int_{\mathcal{X}} \frac{\Delta r(s,x)}{r(s,x)} p(x) \, \mathrm{vol}(\diff x) = \int_{\mathcal{X}} \nabla r (s,x) \cdot \nabla \left( \frac{p(x)}{r(s,x)} \right) \, \mathrm{vol}(\diff x) \\
& = - \int_{\mathcal{X}} \frac{|\nabla r(s,x)|^2}{r(s,x)^2} p(x) \, \mathrm{vol}(\diff x) + \int_{\mathcal{X}} \frac{\nabla r(s,x)}{r(s,x)} \cdot\frac{ \nabla p(x)}{p(x)} p(x) \, \mathrm{vol}(\diff x)\\
& \leqslant \frac{1}{4} \int_{\mathcal{X}} \frac{|\nabla p(x)|^2}{p(x)} \mathrm{vol}(\diff x) = \frac{1}{4} \mathcal{I}(p).
\end{align*}
Integrating this equation with respect to $s$ yields the conclusion.
\end{proof}
\subsection{Proof of Theorem \ref{theo:sparse_data}}
We now have all the tools at our disposal to prove the convergence result.
\begin{lemma}
\label{lem:F_bd_below}
With the assumptions of Theorem \ref{theo:sparse_data}, the functionals $F_T$ and $F$ are bounded from below by a constant independent on $T$.
\end{lemma}
\begin{proof}
As the entropy $\mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma)$ is non negative, the problematic terms are the $\mathrm{DF}(\mathbf{R}_{t_i}, \widehat{\rho}^T_i)$ which can be negative. However, we always have
\begin{equation*}
\mathrm{DF}(r,p) \geqslant - \mathrm{H}(p|\mathrm{vol}).
\end{equation*}
On the other hand, using log-Sobolev inequalities (see, e.g. \cite[Equation (1.1)]{chen1997estimates}), there exists a constant $C$ which depends only on $\mathcal{X}$ such that for all $p$,
\begin{equation*}
\mathrm{H}(p|\mathrm{vol}) \leqslant C \mathcal{I}(p),
\end{equation*}
where $p$ is the Fisher information. The result follows as $\mathcal{I}(\widehat{\rho}^T_i)$ is assumed to be uniformly bounded in $T$ and $i$ and so is $\mathcal{I}(\overline{\rho}_t)$ in $t$.
\end{proof}
\noindent If the lower bound $K$ on the Ricci curvature of $\mathcal{X}$ is strictly positive, then we can take $C = 1/K$ in the proof above. In the general case, the compactness of $\mathcal{X}$ ensures the finiteness of $C$.
\begin{prop}
\label{prop:GammaLimsup}
Use the notation and assumptions of Theorem \ref{theo:sparse_data}.
Suppose $\mathbf{R} \in \mathcal{P}(\Omega)$ with $F(\mathbf{R}) < + \infty$ and $\mathcal{G}_0 \mathbf{R} = \mathbf{R}$. Then there exists a sequence $\tilde{\mathbf{R}}^{T}$ which converges to $\mathbf{R}$ as $T \to + \infty$ and such that
\begin{equation*}
\limsup_{T \to + \infty} F_T(\tilde{\mathbf{R}}^{T}) \leqslant F(\mathbf{R})
\end{equation*}
\end{prop}
\begin{proof}
Let $s > 0$. Combining Proposition \ref{prop:data_fitting_heat_flow} to handle the data-fitting term and Proposition \ref{prop:properties_regularization} to handle the law on the space of paths,
\begin{align*}
F(\mathcal{G}_s \mathbf{R}) & = \sigma^2 \mathrm{H}(\mathcal{G}_s \mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \int_0^1 \mathrm{DF}(\Phi_s \mathbf{R}_t, \overline{\rho}_t) \, \diff t \\
& \leqslant \sigma^2 e^{-2Ks} \mathrm{H}(\mathbf{R}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \int_0^1 \mathrm{DF}(\mathbf{R}_t, \overline{\rho}_t) \, \diff t + \frac{s}{4\lambda} \int_0^1\mathcal{I}(\overline{\rho}_t)dt.
\end{align*}
Thus it holds
\begin{equation*}
\limsup_{s \to 0} \, F(\mathcal{G}_s \mathbf{R}) \leqslant F(\mathbf{R}).
\end{equation*}
On the the other hand, as $- \log ([\mathcal{G}_s \mathbf{R}]_t)$ is a continuous function of $t$ and $x$ (this is Proposition \ref{prop:HF_reg}), we can use the weak convergence of the $\widehat{\rho}^{T}$ to $\overline{\rho}$ and write, for $s > 0$
\begin{equation*}
\lim_{T \to + \infty} \sum_{i=1}^T \omega^{T}_i \mathrm{DF} \left( \left[\mathcal{G}_s \mathbf{R}\right]_{t^{T}_i}, \widehat{\rho}^{T}_i \right) = \int_{0}^1 \mathrm{DF}( [\mathcal{G}_s \mathbf{R}]_t, \overline{\rho}_t ) \, \diff t.
\end{equation*}
This reads exactly as: for all $s > 0$, there holds $\lim_{T \to + \infty} F_T(\mathcal{G}_s \mathbf{R}) = F(\mathcal{G}_s \mathbf{R})$.
To conclude, it is enough to define $\tilde{\mathbf{R}}^{T} = \mathcal{G}_{s_T} \mathbf{R}$ for a sequence $(s_T)_{T \geqslant 1}$ which converges to $0$ slowly enough as $T \to + \infty$.
\end{proof}
\begin{prop}
\label{prop:GammaLiminf}
Use the notation and assumptions of Theorem \ref{theo:sparse_data}.
For each $T \geqslant 1$, let $\tilde{\mathbf{R}}^{T} \in \mathcal{P}(\Omega)$ and assume that it converges narrowly to some $\mathbf{R} \in \mathcal{P}(\Omega)$ as $T\to\infty$. Then
\begin{equation*}
F(\mathcal{G}_0 \mathbf{R}) \leqslant \liminf_{T \to + \infty} \, F_T(\tilde{\mathbf{R}}^{T}).
\end{equation*}
\end{prop}
\begin{proof}
The proof follows the same path as for Proposition \ref{prop:GammaLimsup}. Assume that $\liminf_{T \to + \infty} \, F_T(\tilde{\mathbf{R}}^{T}) < + \infty$ otherwise there is nothing to prove. In particular, (up to an extraction that we do not relabel), there holds $\sup_T \mathrm{H}(\tilde{\mathbf{R}}^{T}| \mathbf{W}^\sigma) < + \infty$.
Combining Proposition \ref{prop:data_fitting_heat_flow} to handle the data-fitting term and Proposition \ref{prop:properties_regularization} to handle the law on the space of paths, we have
\begin{align*}
F_T(\mathcal{G}_s \tilde{\mathbf{R}}^{T}) & = \sigma^2 \mathrm{H}(\mathcal{G}_s \tilde{\mathbf{R}}^{T}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \sum_{i=1}^T \omega^{T}_i \mathrm{DF} \left( \Phi_s \tilde{\mathbf{R}}^{T}_{t^T_i}, \widehat{\rho}^{T}_i \right) \\
& \leqslant \sigma^2 e^{-2Ks} \mathrm{H}(\tilde{\mathbf{R}}^{T}|\mathbf{W}^\sigma) + \frac{1}{\lambda} \sum_{i=1}^T \omega^{T}_i \mathrm{DF} \left( \tilde{\mathbf{R}}^{T}_{t^{T}_i}, \widehat{\rho}^{T}_i \right) + \frac{s}{4\lambda} \sum_{i=1}^T \omega^{T}_i\mathcal{I}(\widehat{\rho}^{T}_i).
\end{align*}
This time we rewrite it as
\begin{equation*}
F_T(\tilde{\mathbf{R}}^{T}) \geqslant F_T(\mathcal{G}_s \tilde{\mathbf{R}}^{T}) - C(s),
\end{equation*}
where
\begin{align*}
C(s) = | e^{-2Ks}-1| \sigma^2 \sup_T \mathrm{H}(\tilde{\mathbf{R}}^{T}| \mathbf{W}^\sigma) + \frac{s}{4\lambda} \sum_{i=1}^T \omega^{T}_i\mathcal{I}(\widehat{\rho}^{T}_i)
\end{align*}
is independent of $T$ (in particular, from the assumption (ii) of Theorem \ref{theo:sparse_data}) and $\lim_{s\to 0^+}C(s) =0$.
To consider the data-fitting term let $a^{T}_s (t, x)$ denote the family of functions $a^{T}_s(t,x) := - \log \left( \Phi_s \tilde{\mathbf{R}}^{T}_t (x) \right)$, parameterized by $T$. Notice that from the definition of $\mathrm{DF}$ we have
\begin{align*}
\sum_{i=1}^T \omega^{T}_i \mathrm{DF} \left( \Phi_s \tilde{\mathbf{R}}^{T}_{t^T_i}, \widehat{\rho}^{T}_i \right) & = \sum_{i=1}^T \omega^{T}_i \int_{\mathcal{X}} a^{T}_s \left( t^{T}_i, x \right) \, \widehat{\rho}^{T}_i(\diff x).
\end{align*}
For a given $s > 0$, the family of functions $a^{T}_s(t,x)$ indexed by $T$ is uniformly equicontinuous thanks to Proposition \ref{prop:HF_reg}. Up to extraction, as $T\to \infty$ it converges uniformly on $[0,1] \times \mathcal{X}$ to the function $a_s(t,x)= -\log\left( \Phi_s \mathbf{R}_t\right)$ which is equal to $- \log \left( [\mathcal{G}_s \mathbf{R}]_t \right)$. Combining this uniform convergence with the weak convergence of the $\widehat{\rho}^T_i$, we can pass to the limit of the data-fitting term:
\begin{align*}
\lim_{T \to + \infty} \, \sum_{i=1}^T \omega^{T}_i \mathrm{DF} \left( \Phi_s \tilde{\mathbf{R}}^{T}_{t^T_i}, \widehat{\rho}^{T}_i \right)
& = \lim_{T \to + \infty} \, \sum_{i=1}^T \omega^{T}_i \int_{\mathcal{X}} a^{T}_s \left( t^{T}_i, x \right) \, \widehat{\rho}^{T}_i(\diff x) \\
& = \int_{0}^1 \int_{\mathcal{X}} a_s(t,x) \, \overline{\rho}_t(\diff x) \diff t = \int_0^1 \mathrm{DF} \left( \mathcal{G}_s \mathbf{R}_t, \overline{\rho}_t \right) \diff t.
\end{align*}
Together with the lower semi continuity of the entropy, we have $F(\mathcal{G}_s \mathbf{R}) \leqslant \liminf_{T\to\infty} F_T(\mathcal{G}_s \tilde{\mathbf{R}}^T)$.
The results of above two paragraphs allow us to write (for each $s>0$)
\begin{equation*}
\liminf_{T \to + \infty} \, F_T(\tilde{\mathbf{R}}^{T}) \geqslant F(\mathcal{G}_s \mathbf{R}) - C(s).
\end{equation*}
To conclude we send $s \to 0^+$, using the lower semi continuity of $F$ and the convergence of $\mathcal{G}_s \mathbf{R}$ to $\mathcal{G}_0 \mathbf{R}$ when $s \to 0^+$ (from Proposition \ref{prop:properties_regularization}).
\end{proof}
\begin{proof}[\bf Proof of Theorem \ref{theo:sparse_data}]
Let $\mathbf{R}$ be the minimizer of $F$ and $\mathbf{R}^{T}$ the minimizer of $F_T$. Note that by optimality, there must hold $\mathcal{G}_0 \mathbf{R} = \mathbf{R}$ (and also $\mathcal{G}_0 \mathbf{R}^{T} = \mathbf{R}^{T}$). Using Proposition \ref{prop:GammaLimsup}, we can find a sequence $\tilde{\mathbf{R}}^{T}$ which converges narrowly to $\mathbf{R}$ as $T \to + \infty$ and such that
\begin{equation*}
F(\mathbf{R}) \geqslant \limsup_{T \to + \infty} F_T(\tilde{\mathbf{R}}^{T}) \geqslant \limsup_{T \to + \infty} \, \min_{\mathcal{P}(\Omega)} F_T = \limsup_{T \to + \infty} \, F_T(\mathbf{R}^{T}).
\end{equation*}
In particular, the sequence $F_T(\mathbf{R}^{T})$ is bounded, which implies (by Lemma \ref{lem:F_bd_below}) that the sequence $\mathrm{H}(\mathbf{R}^{T}|\mathbf{W}^\sigma)$ is bounded too, then from the compactness of sublevel sets of the entropy $\mathrm{H}$ we have an accumulation point, say, $\hat{\mathbf{R}}$ of the sequence $(\mathbf{R}^T)$. %
Using the optimality of $\mathbf{R}$ and Proposition \ref{prop:GammaLiminf}, we get
\begin{equation*}
F(\mathbf{R}) \leqslant F(\mathcal{G}_0 \hat{\mathbf{R}}) \leqslant \liminf_{T \to + \infty} F_T(\mathbf{R}^{T}).
\end{equation*}
Thus we have equalities everywhere and we conclude
\begin{equation*}
F(\mathbf{R}) = \limsup_{T \to + \infty} F_T(\tilde{\mathbf{R}}^{T})
= \lim_{T \to + \infty} F_T(\mathbf{R}^{T}).
\end{equation*}
In particular, it implies that
\begin{equation*}
F_T(\tilde{\mathbf{R}}^{T}) - F_T(\mathbf{R}^{T}) = F_T(\tilde{\mathbf{R}}^{T}) - \min_{\mathcal{P}(\Omega)} F_T
\end{equation*}
converges to $0$ as $T \to + \infty$. Thanks to the 1-convexity of $\mathrm{H}(\cdot|\mathbf{W}^\sigma)$ (Lemma \ref{lemma:entropy_strictly_convex}) as well as the convexity of the data-fitting term, $\| \tilde{\mathbf{R}}^{T} - \mathbf{R}^{T} \|_{\mathrm{TV}}$ converges to $0$ as $T \to + \infty$. Note that TV-convergence is stronger than narrow convergence, so combined with the narrow convergence of $\tilde{\mathbf{R}}^{T}$ to $\mathbf{R}$, we conclude that $\mathbf{R}^{T}$ converges narrowly to $\mathbf{R}$ as $T \to + \infty$.
\end{proof}
\subsection{Proof of Theorem \ref{theo:GammaConvergence_stdt}}
\begin{proof}[\bf Proof of Theorem \ref{theo:GammaConvergence_stdt}]
First, using $\mathbf{R} = \mathcal{G}_\epstheo \P$ as a competitor in $G_{\lambda,\epstheo}$ and using the contraction estimate given by Proposition \ref{prop:properties_regularization}, we get
\begin{equation*}
\min_{\mathcal{P}(\Omega)} \, G_{\lambda,\epstheo} = G_{\lambda,\epstheo}(\mathbf{R}^{\lambda,\epstheo}) \leqslant \sigma^2 \mathrm{H}(\mathcal{G}_\epstheo \P |\mathbf{W}^\sigma) \leqslant \sigma^2 e^{-2K\epstheo} \mathrm{H}(\mathcal{G}_0 (\P)|\mathbf{W}^\sigma).
\end{equation*}
In particular, $G_{\lambda,\epstheo}(\mathbf{R}^{\lambda,\epstheo})$ is uniformly bounded in $\lambda$ and $\epstheo$. As a direct consequence, $\mathrm{H}(\mathbf{R}^{\lambda,\epstheo}|\mathbf{W}^\sigma)$ is uniformly bounded. Due to Proposition \ref{prop:compact-sublevel-H} this implies that the family $\mathbf{R}^{\lambda,\epstheo}$ belongs to a compact set in the narrow topology. Let $\tilde{\mathbf{R}}$ be any accumulation point in the limit $\lambda \to 0, \epstheo \to 0$. We only need to show that necessarily $\tilde{\mathbf{R}} = \mathcal{G}_0 \P$.
Note that
$$\sigma^2 \mathrm{H}(\mathbf{R}^{\lambda, \epstheo}|\mathbf{W}^\sigma) \leqslant G_{\lambda, \epstheo} (\mathbf{R}^{\lambda, \epstheo}) \leqslant \sigma^2 e^{-2K\epstheo} \mathrm{H}(\mathcal{G}_0 (\P)|\mathbf{W}^\sigma), $$ thus
by sending $\epstheo \to 0$ and using the lower semi continuity of the entropy to get
\begin{equation*}
\mathrm{H}(\tilde{\mathbf{R}}|\mathbf{W}^\sigma) \leqslant \mathrm{H}( \mathcal{G}_0 \P | \mathbf{W}^\sigma).
\end{equation*}
Now, using Fatou's Lemma and the joint lower semi continuity of the entropy,
\begin{equation*}
\int_0^1 \mathrm{H}(\P_t | \tilde{\mathbf{R}}_t) \, \diff t \leqslant \liminf_{\lambda \to 0, \epstheo \to 0} \, \int_{0}^1 \mathrm{H}(\Phi_\epstheo \P_t | \mathbf{R}^{\lambda,\epstheo}_t) \, \diff t \leqslant \liminf_{\lambda \to 0, \epstheo \to 0} \, \left( \lambda \, \sup_{\lambda,\epstheo} G_{\lambda,\epstheo}(\mathbf{R}^{\lambda,\epstheo}) \right) = 0.
\end{equation*}
Thus we conclude that $\tilde{\mathbf{R}}_t = \P_t$ for almost every $t$; in fact, the equality holds for every $t$ due to the continuity of the marginals in $t$. Therefore by definition of $\mathcal{G}_0$ we deduce $\tilde{\mathbf{R}} = \mathcal{G}_0 \P$. This concludes the proof.
\end{proof}
\FloatBarrier
\section{Numerical Results}\label{sec:numerical_results}
\begin{paragraph}{Overview}
In this section, we investigate in detail the behavior and performance of our implementation of the computational method described in Section \ref{sec:methodology}, which we refer to as Global Waddington-OT (gWOT). Primarily, we will deal with the setting of simulated data in which we ensure that the assumptions described in Section \ref{sec:drift_diff_proc} are explicitly satisfied, first in the absence of branching and then with branching. From these numerical demonstrations, we find ample evidence that our regularization-based method is able to produce accurate estimates of laws on paths with significantly less error compared to the existing Waddington-OT approach. Finally, we present an example application to a subset of the scRNA-seq cellular reprogramming dataset published by Schiebinger et al. \cite{schiebinger2019}.
Following the convention used previously, for all results we show rescaled times so that the first and last time-points correspond to $t = 0, 1$ respectively. We denote by $\mathcal{N}(x, M)$ the Gaussian with center $x \in \mathbb{R}^d$ and covariance matrix $M$, and by $I_d$ the $d \times d$ identity matrix.
\end{paragraph}
\subsection{Simulated data without branching}\label{sec:tristable}
\begin{paragraph}{Simulation setup and parameters}
We test first the performance of gWOT in the absence of branching and consider a tri-stable diffusion-drift process in $\mathcal{X} = \mathbb{R}^4$, in which the evolution each particle $X_t$ over time is driven by the gradient of the potential function
\begin{align}
\Psi(x) &= 4\|x - x_0\|^2 \|x - x_1 \|^2 \|x - x_2\|^2 ,
\end{align}
where the three potential wells are located at
\begin{align*}
x_0 &= 0.95 (\cos(\pi/6), \sin(\pi/6), 0, 0), \\
x_1 &= 1.05 (\cos(5\pi/6), \sin(5\pi/6), 0, 0), \\
x_2 &= (\cos(-\pi/2), \sin(-\pi/2), 0, 0).
\end{align*}
Since these potential wells are positioned at differing distances away from the origin, the resulting potential landscape is asymmetric about the origin and so particles initialized about the origin will have a greater propensity to settle in closer wells -- namely $x_0$. To illustrate this, we show the potential $\Psi$ as a function of the first two dimensions of the space $\mathcal{X}$ in Figure \ref{fig:tristable_potential}. Note that although the asymmetry in this potential landscape is subtle and may be difficult to discern visually, it introduces appreciable asymmetry to the resulting probability law on trajectories.
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{tristable_potential.pdf}
\caption{Potential function $\Psi$ for tristable process in $\mathbb{R}^4$, shown in the first 2 dimensions, i.e. $z = \Psi(x, y, 0, 0)$. }
\label{fig:tristable_potential}
\end{figure}
At the initial time $t = 0$, particles are distributed isotropically about the origin following the law $X_0 \sim 0.15\mathcal{N}(0, I_4)$ and evolve following the diffusion-drift process \eqref{eq:diffusion_drift_sde} with diffusivity $\sigma^2 = 0.25$. To simulate this process in practice, we specify a temporal step size $\tau \ll 1$ and iteratively update particle positions $X_t$ following the Euler-Maruyama scheme \cite{higham2001}
\begin{align}
X_{t + \tau} = X_t - \tau \nabla \Psi(X_t) + \sigma \sqrt{\tau} Z, \quad Z \sim \mathcal{N}(0, I_d). \label{eq:euler_maruyama}
\end{align}
We specified $T = 50$ time-points $\{ t_i : i = 1, \ldots, 50 \}$ uniformly spaced in the interval $t \in [0, 0.4]$ and chose $\tau$ so that a total of $\approx\!\!10^3$ steps corresponded to the overall interval $t \in [0, 0.4]$. Independently at each time-point, snapshots of $N=20$ particles were sampled from independent realizations of the process to form the input data $\{ \widehat{\rho}_{t_i} : i = 1, \ldots, 50 \}$. At each time $t_i$, $\widehat{\rho}_{t_i}$ is thus an empirical measure supported on $N$ points in $\mathcal{X}$. Given this input, we applied gWOT by solving the dual problem \eqref{eq:dual} for a range of values of the regularization strength parameter $\lambda \in \texttt{logspace}(-4, -1, 10)$. All other parameters were taken to be constant: we used $\varepsilon_i^\mathrm{DF} = 0.025, m_i \equiv 1, g_i \equiv 1, \lambda_i = 1, w_i = 1/T$, and as discussed in Section \ref{sec:methodology} we took the initial distribution for the reference process $\pi_0$ to be uniform. The reader is directed to Section \ref{sec:preprocessing_and_params} for a detailed discussion about the choice of parameters.
\end{paragraph}
\begin{paragraph}{Results: estimated marginals}
In order to evaluate the quality of outputs produced by gWOT for varying choices of the regularization strength $\lambda$, we must be able to compare the reconstruction output to a ground truth. One straightforward route is to compare each reconstructed marginal $\mathbf{R}_{t_i}$ to the corresponding true marginal $\rho_{t_i}$ of the ground truth process using the 2-Wasserstein ($W_2$) metric (for the reader who is new to optimal transport, this is described in detail in Appendix \ref{sec:background_OT}). That is, we consider the quantity $d_{W_2}(\mathbf{R}_{t_i}, \rho_{t_i})$ for each time-point $t_i$ as a measure of the error in the estimated marginal $\mathbf{R}_{t_i}$. This choice is reasonable since the optimal law on paths $\mathbf{R}$ is uniquely characterized by its temporal marginals $\{\mathbf{R}_{t_i}\}_{i = 1}^{T}$ as was discussed at length in Section \ref{sec:discretization_time_space}. We reason therefore that improved estimates of the marginals (as measured by the $W_2$ metric) should correspond to improved estimates of the law on paths and vice versa. Furthermore, the $W_2$ distance between distributions supported on the discrete space $\overline{\mathcal{X}}$ can be computed exactly with relative ease \cite{peyre2019}. However, obtaining exactly the temporal marginals $\{\rho_{t_i}\}_{i = 1}^T$ of the ground truth process is in general computationally infeasible, and so we instead generate a Lagrangian approximation to this ground truth by simulating the evolution of 5000 particles according to the same generating SDE \eqref{eq:diffusion_drift_sde} and sampling the marginal empirical distributions. As a summary of overall performance over all time-points we take the mean $W_2$ error
\begin{align*}
\mathbb{E}[d_{W_2}(\mathbf{R}_{t_i}, \rho_{t_i})] = \frac{1}{T} \sum_{i = 1}^T d_{W_2}(\mathbf{R}_{t_i}, \rho_{t_i}),
\end{align*}
where for simplicity by $\rho_{t_i}$ we refer to the Lagrangian approximations to the ground truth marginals. This average marginal error was computed for each value of $\lambda$ as an average over 10 identical repeated simulations, and we found that it was minimized for $\lambda = 2.154 \times 10^{-3}$ (we show results for varying $\lambda$ in Figure \ref{fig:tristable_lamda_dep}(a)). This value of $\lambda$ was used for all our downstream analyses. In Figure \ref{fig:tristable_scatter} we show observed samples at selected time-points $t = 0.00, 0.29, 0.59, 0.90$ overlaid on corresponding ground truth approximations, as well as the reconstructed marginal distributions $\mathbf{R}_{t_i}$ obtained by solving \eqref{eq:dual} for the optimal $\lambda$.
\begin{figure}[h]
\centering
\includegraphics[width = 0.75\linewidth]{tristable_marginals_scatter_heat.pdf}
\caption{(Top) Sampled observations (red) overlaid on ground truth (grey) in the case $N = 20, T = 50$, at rescaled time coordinates $t = 0. 0.29, 0.59, 0.90$. (Bottom) Estimated marginals $\mathbf{R}_{t_i}$ at corresponding time-points found by gWOT for the optimal regularization parameter $\lambda_\mathrm{opt} = 2.154 \times 10^{-3}$. }
\label{fig:tristable_scatter}
\end{figure}
\end{paragraph}
\begin{paragraph}{Results: estimating laws on paths}
As we discussed previously in Section \ref{sec:intro}, the mathematical object of direct relevance to trajectory inference are the sample paths taken by particles, and so it is natural to regard both the underlying (ground truth) process and inference outputs as probability laws on the space of paths. We note that the ground truth and the inference outputs reside in different spaces, namely $\mathcal{P}(\Omega)$ and $\mathcal{P}(\overline{\mathcal{X}}^T)$ respectively. These are very large spaces -- even in the discrete case it scales exponentially in the number of time-points. It is therefore infeasible to deal directly with laws on paths, but since we are dealing with Markov processes, collections of sample paths can be sampled efficiently by consecutively sampling from the transition kernels. In Figure \ref{fig:tristable_sample_paths} we display sample paths obtained from the ground truth, as well as paths sampled from the estimated laws output by gWOT and Waddington-OT respectively. Visually, it is easy to observe that the low sampling density causes the performance of the Waddington-OT method to degrade since the marginals are treated as fixed and paths are therefore forced to pass through only observed particle locations at each time-point. This leads to suboptimal paths that thrash across the support. On the other hand, gWOT optimizes over marginals as well as paths, and therefore alleviates this effect by ``filling in'' missing data at each time-point which might otherwise result in spurious paths.
\begin{figure}[h]
\centering\includegraphics[width = 0.75\linewidth]{tristable_sample_paths.pdf}
\caption{Comparison of samples of 100 paths drawn from the ground truth simulation (green), gWOT output (blue) and Waddington-OT output (red) for $N=20$ observed particles at $T = 50$ time-points. The upper row of plots shows 100 paths displayed as ensembles in the first two dimensions of the space $\mathcal{X} = \mathbb{R}^4$. In the lower row of plots, we display the paths as functions of time. The vertical coordinate is a projection $x \cdot u$ of $\mathcal{X} = \mathbb{R}^4$ onto a chosen subspace, here chosen to be the one spanned by $u = (\cos(\pi/12), \sin(\pi/12), 0, 0)$.}
\label{fig:tristable_sample_paths}
\end{figure}
\begin{figure}[h]
\centering\includegraphics[width = 0.75\linewidth]{tristable_sample_paths_largeN.pdf}
\caption{Sample paths as in Figure \ref{fig:tristable_sample_paths}, but with $N = 250$. }
\label{fig:tristable_sample_paths_largeN}
\end{figure}
To go beyond visual observations and achieve a quantitative comparison of probability laws on paths, a natural metric of choice is the $W_2$ metric on the space $\mathcal{P}(\Omega)$ where the ground metric is chosen to be the $L^2$ norm on the space $\Omega = C([0, 1], \mathcal{X})$, i.e.
\begin{align*}
d(f, g)^2 = \int_0^1 \| f(t) - g(t) \|_2^2 \diff{t}.
\end{align*}
Unfortunately, again due to the size of the space of path-valued probability laws, exact computation of Wasserstein distances in this space quickly becomes computationally intractable. As an approximation, we compute instead an empirical $W_2$ distance between collections of paths sampled from underlying laws on paths. To be precise, in the discrete setting with $T$ evenly spaced time-points on $[0, 1]$ and for two collections of paths $\{ f_{ik} : k = 1, \ldots, T\}_{i = 1}^n$ and $\{ g_{ik} : k = 1, \ldots, T \}_{i = 1}^n$, we compute the $W_2$ distance between two empirical measures with a cost matrix
\begin{align*}
C_{ij} &= d(f_i, g_j)^2 = \frac{1}{T} \sum_{k = 1}^T \| f_{ik} - g_{jk} \|^2, \quad i, j \in \{1, \ldots, T\}.
\end{align*}
Throughout this paper, we will compute all empirical $W_2$ distances on paths as being between sample collections of $10^3$ paths and summarize over 10 independent samplings.
Importantly, since we are dealing with finite samples of paths the expected $W_2$ distance $\mathbb{E}\left[d_{W_2}(\hat{f}, \hat{g})\right]$ between any two distinct size-$10^3$ samples $\hat{f}, \hat{g}$ of paths drawn from the ground truth will be nonzero. To serve as a baseline for comparison, we compute 10 values of $d_{W_2}(\hat{f}, \hat{g})$ for random $\hat{f}, \hat{g}$ sampled from the ground truth. We summarize these empirical distances for $N = 20, T = 50$ in Figure \ref{fig:tristable_sample_paths_w2err}(a) in which we note that gWOT achieves performance close to the baseline, whereas Waddington-OT does markedly worse.
\begin{figure}[h]
\centering
\begin{subfigure}{0.25\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_sample_paths_w2err.pdf}
\caption{$N = 20, T = 50$}
\end{subfigure}
\begin{subfigure}{0.25\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_sample_paths_w2err_largeN.pdf}
\caption{$N = 250, T = 50$}
\end{subfigure}
\caption{$W_2$ estimates on sample paths for $(N, T) = (20, 50), (250, 50)$ respectively. We computed 10 repeats for samples of 1000 paths. }
\label{fig:tristable_sample_paths_w2err}
\end{figure}
Since gWOT is designed for the setting of few measurements per time-point with significant missing data, in the regime of large $N$ we expect that gWOT and Waddington-OT should perform similarly. To investigate this, we applied gWOT to a time-series sampled as described previously but with $N = 250$ sampled particles at each time-point. We display the sample paths in Figure \ref{fig:tristable_sample_paths_largeN} and we note that in comparison to the case of $N = 20$ in Figure \ref{fig:tristable_sample_paths}, in the case $N = 250$ the sample paths computed by Waddington-OT appear visually to be significantly improved. This is confirmed when we compute the $W_2$ distance estimates on sample paths, shown in Figure \ref{fig:tristable_sample_paths_w2err}(b), where we see that the difference in performance between gWOT and Waddington-OT is now significantly reduced.
\end{paragraph}
\begin{paragraph}{Estimation of the drift}
We remarked earlier in Section \ref{sec:reconstruction_of_drift} that estimates of the drift field $\bvec{v}_t$ may be extracted from the law on paths $\mathbf{R}$ estimated by gWOT. In the current example, the drift field does not vary with time and so we estimate the drift by averaging over all $T$ time-points:
\begin{align*}
\hat{\bvec{v}}(x) &= \frac{1}{T} \sum_{i = 1}^T \mathbb{E}_{\mathbf{R}_{t_i, t_{i+1}}} \left[ \left. \frac{X_{t_{i+1}} - X_{t_i}}{\Delta t_i} \right| X_{t_i} = x \right].
\end{align*}
In Figure \ref{fig:tristable_velocity} we show the respective drifts estimated from couplings computed by gWOT and Waddington-OT alongside the ground truth drift $\bvec{v}(x) = -\nabla \Psi(x)$, as well as the mean cosine similarities $$\mathbb{E}_x\left[\frac{1}{2}(1 - \cos\angle(\bvec{v}(x), \hat{\bvec{v}}(x)))\right]$$ of the estimated fields to the ground truth. We observe that gWOT estimates a drift field that is much closer to the ground truth, in keeping with our previous comparisons of the laws on paths.
\begin{figure}
\centering\includegraphics[width = 0.75\linewidth]{tristable_velocity_est.pdf}
\caption{Comparison of drift fields estimated by gWOT (blue) and Waddington-OT (red) to ground truth. We show the mean cosine similarity (scaled between 0 and 1) for both cases. }
\label{fig:tristable_velocity}
\end{figure}
\end{paragraph}
\begin{paragraph}{Performance for varying $N$ and $T$}
We next investigate the behavior of gWOT more generally for varying regimes of $(N, T)$ in addition to varying $\lambda$ in the setting of the same simulation. We allow $N$ and $T$ to vary from 5-100 and 10-100 respectively, and reconstruction performance was summarized in terms of the expected $W_2$ error on marginals as described earlier. As in the case of paths, we computed a baseline $W_2$ error on marginals to be the the expected $W_2$ distance between ground truth marginals across all time-points and over 10 repeated samplings of 5000 particles.
For each fixed value of $(N, T)$ we selected the optimal value $\lambda$ that minimized this error. In Figure \ref{fig:tristable_N_T}(a), we show the error as a function of $N$ for several fixed values of $T$. From this we see that both the sample marginals and gWOT marginal estimates improve with increasing $N$, but with the gWOT marginal estimates consistently achieving a significant reduction in the error relative to the raw samples. Also, increasing the number of time-points $T$ with $N$ fixed further improves the gWOT estimates at each individual marginal. In contrast, this has no effect on the error for samples as expected, since at each time-point the number of observed particles $N$ remains constant. In Figure \ref{fig:tristable_N_T}(b) we examine the behavior for fixed $N$ and varying $T$ and find that, as before, increasing the number of time-points $T$ leads to on average a reduction in the $W_2$ error for any single marginal. This confirms that information is being shared ``globally'' across time-points to improve estimates, hence the name of the method. Finally, we note that for $N$ and $T$ both large, gWOT achieves an error that is comparable to the baseline error.
In Figure \ref{fig:tristable_lamda_dep}(b) we show the optimal value of $\lambda$ found to minimize the mean $W_2$ error on marginals as a function of $(N, T)$. From this we observe that the optimal $\lambda$ has an inverse relationship with $N$ and $T$ which is as expected -- that is, with more data from observations the need for regularization diminishes. Although the approach used in practice for gWOT is not mathematically identical to the form \eqref{eq:opt_theory} used to prove the theoretical convergence result, our findings are evidence that gWOT is able to accurately reconstruct probabilistic trajectories especially in the regime where few particles are captured at many time-points (small $N$ and large $T$), and that gWOT improves in accuracy as the amount of data increases.
\begin{figure}[h]
\centering
\begin{subfigure}{0.25\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_N_20_T_50_lamda_dep.pdf}
\end{subfigure}
\begin{subfigure}{0.3\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_lamda_opt.pdf}
\end{subfigure}
\caption{(a) Average marginal $W_2$ error $T^{-1} \sum_{i = 1}^T d_{W_2}(\mathbf{R}_{t_i}, \widehat{\rho}_{t_i})$ of reconstruction $\mathbf{R}$ as function of regularization parameter $\lambda$. Note the presence of a clear minimum at $\lambda_\mathrm{opt} = 2.154\times 10^{-3}$. (b) Optimal value of $\lambda$ as a function of $(N, T)$.}
\label{fig:tristable_lamda_dep}
\end{figure}
\begin{figure}[h]
\centering
\begin{subfigure}{0.45\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_err_N.pdf}
\caption{}
\end{subfigure}
\begin{subfigure}{0.45\linewidth}
\centering\includegraphics[width = \linewidth]{tristable_err_T.pdf}
\caption{}
\end{subfigure}
\caption{(a) Expected marginal $W_2$ error as a function of $N$, for fixed $T$. Note that whilst both samples (red) and gWOT (blue) improve with increasing $N$, gWOT achieves a consistent improvement compared to the samples, especially for small $N$. Note that for $N, T$ both large, gWOT achieves close to baseline error. (b) Expected marginal $W_2$ error as a function of $T$, for fixed $N$. The decreasing trend for gWOT is evidence for cooperativity across time-points. }
\label{fig:tristable_N_T}
\end{figure}
\end{paragraph}
\begin{paragraph}{Tradeoff behavior between $N$ and $T$ }
Finally, we examine performance in the setting where the total number of measured particles $NT$ is fixed, and we vary the number of time-points at which to make observations. To investigate this in simulation, we generated data from simulations where the total number of sampled particles was fixed to be $NT = 1000$ (up to rounding error). This ``budget'' of observable particles was divided evenly into snapshots at $T$ time-points, where $T$ was varied from 10 to 100. At one extreme, few time-points are sampled but with many observations at each time (high spatial resolution; low temporal resolution), and at the other extreme many time-points are sampled, but very few measurements at each time (low spatial resolution; high temporal resolution). As previously, gWOT was applied with different values of the parameter $\lambda$ and for each $(N, T)$ we picked the value of $\lambda$ which minimized the average marginal $W_2$ error over 10 identical simulations. We summarize the performance of gWOT relative to the raw samples in Figure \ref{fig:tristable_fixed_budget}. As expected, the error for samples increases with the number of time-points since fewer measurements are made for each marginal. On the other hand, there is very little variation in the error for the gWOT method, indicating that relatively little is lost by sacrificing marginal sampling for more time-points.
\begin{figure}
\centering\includegraphics[width = 0.3\linewidth]{tristable_fixedbudget.pdf}
\caption{Expected marginal $W_2$ error as a function of $T$ with the total number of measured particles fixed to be 1000 (note that this is not exact and up to rounding error)}
\label{fig:tristable_fixed_budget}
\end{figure}
\end{paragraph}
\FloatBarrier
\subsection{Comparison to kernel smoothing}\label{sec:kernel}
\begin{paragraph}{Kernel smoothing approach}
Kernel smoothing is an extremely common procedure used in statistics, and in principle could be applied to the setting of time-series measurements which we consider in this paper. In what we will refer to as the ``kernel method'', we share information across time-points by Euclidean averaging of the sampled marginals $\{\widehat{\rho}_{t_i}\}_{i = 1}^T$, weighted by a kernel in the time domain. For input marginals $\{ \widehat{\rho}_{t_i} \}_{i = 1}^{T}$ and an input bandwidth $h$, we take the resulting \emph{kernel-averaged} marginal to be
\begin{align}
\tilde{\rho}_{t_i} &\propto \sum_{j = 1}^{T} k(t_i - t_j; h) \widehat{\rho}_{t_j},
\end{align}
where we choose the kernel $k$ to be Gaussian, $k(s; h) = \exp\left( -s^2/h^2\right)$. We reason that gWOT is a more natural and flexible approach than the kernel method by virtue of its formulation as an optimization problem over probability laws on paths. Intuitively, the kernel method relies on the assumption that the underlying process is stationary over a short timescale selected through the bandwidth $h$. On the other hand, gWOT is based on the assumption that the underlying process follows loosely a geodesic in the space of probability measures, and therefore may be approximated by piecewise composition of Schr\"odinger bridges.
\end{paragraph}
\begin{paragraph}{Comparison with gWOT}
As a toy example illustrating the distinction between the two methods, we consider in $\mathbb{R}^2$ the linear potential
\begin{equation}
\Psi(x, y) = -1.5(x + y).
\end{equation}
Particles are initially distributed following $X_0 \sim 0.5\mathcal{N}(0, I_2) + (1, 1)^\top$ and we take $\sigma^2 = 0.1$. We capture 5 time-points with 250, 1, 1, 1 and 250 particles respectively. We reconstruct marginals using $\lambda = 0.05$ and $\varepsilon_\mathrm{DF} = 0.025$ and $\pi_0$ chosen to be uniform. Default values were used for all other parameters as in Section \ref{sec:tristable}. For the kernel method, we chose the bandwidth to be $h = 0.25$, which gave the best results as judged by eye. As is clear from Figure \ref{fig:2blobs}, gWOT produces estimated marginals of the underlying process at times $t_2, t_3, t_4$ that recapitulate the underlying drift. This illustrates the fact that gWOT seeks to optimize over discrete probability laws on paths, and in doing so encourages paths to follow geodesics with respect to the Wasserstein distance. In contrast, at times $t_2, t_3$ and $t_4$ the kernel method produces weighted Euclidean averages which turn out to be poor estimates of the true process.
\begin{figure}[H]
\centering
\includegraphics[width = 0.75\linewidth]{twoblobs_scatter_marginals.pdf}
\caption{(Top) Observed samples provided as an input to both gWOT and kernel method; (Middle) Marginal estimates output by gWOT; (Bottom) Marginal estimates produced by kernel method }
\label{fig:2blobs}
\end{figure}
It is also informative to examine the sample paths in this scenario, shown in Figure \ref{fig:2blobs_paths}. Here, we see that the sample paths produced by gWOT are very similar to the ground truth paths. On the other hand, the sample paths produced by the kernel method and Waddington-OT appear visually to be very different.
\begin{figure}[H]
\centering\includegraphics[width = 0.75\linewidth]{twoblobs_sample_paths.pdf}
\caption{Sample paths drawn from the ground truth (green), gWOT estimates (blue), kernel estimates (purple) and Waddington-OT (red).}
\label{fig:2blobs_paths}
\end{figure}
\end{paragraph}
\FloatBarrier
\subsection{Simulated data with branching} \label{sec:branching}
\begin{paragraph}{Simulation setup and parameters} We now turn to consider processes with branching. As mentioned earlier, dealing with only normalized (probability) distributions introduces a fundamental issue of identifiability of branching and transport and necessitates the relaxation to general positive measures discussed in Section \ref{sec:growth}. As in Section \ref{sec:tristable}, we will take $\mathcal{X} = \mathbb{R}^4$, and consider the following bistable potential
\begin{align}
\Psi(x) &= \| x - x_0 \|^2 \| x - x_1 \|^2,
\end{align}
where potential wells are located at $x_0 = 1.15(1, 1, 0, 0)$ and $x_1 = (-1, -1, 0, 0)$. Particles are initially distributed according to $X_0 \sim 0.1 \mathcal{N}(0, I_4)$ and evolve following a diffusion-drift process \eqref{eq:diffusion_drift_sde} driven by $\Psi$ with diffusivity $\sigma^2 = 0.25$, subject to branching and death at spatially dependent exponential rates
\begin{align}
\beta(x, t) &= 5\left(\frac{\tanh(2\inner{x, e_1}) + 1}{2}\right), \\
\delta(x, t) &= 0,
\end{align}
where we write $e_1$ for the basis vector $(1, 0, 0, 0)$. We show a schematic of the potential $\Psi$ and birth rate function $\beta$ in the first dimension of $\mathcal{X}$ in Figure \ref{fig:growth_examples}(a). To simulate this process in practice we again employ the Euler-Maruyama method \eqref{eq:euler_maruyama} as previously, except at each time step of length $\tau$ particles first undergo a displacement as per \eqref{eq:euler_maruyama} followed by division with probability $\beta(X_{t + \tau}, t + \tau) \tau$ or annihilation with probability $\delta(X_{t + \tau}, t + \tau) \tau$. Note that in our setting since $\delta = 0$, particles are only subject to division.
The components of transport and branching in this problem result in two competing effects. First, particles are initialized isotropically about the origin and diffuse towards either of two wells $x_0, x_1$ with the well $x_0$ being further away. Thus in the absence of branching, more particles are expected to drift towards the well $x_1$. On the other hand, the spatial region near the well $x_0$ is subject to a much higher branching rate ($\beta(x_0, t) \approx 4.95$) than the well $x_1$ ($\beta(x_1, t) \approx 0.08$). The consequence of this is that overall, more particles will be observed near the well $x_0$. We illustrate this in Figure \ref{fig:growth_examples}(b) where we show the ground truth evolution of the processes with and without branching.
\begin{figure}
\begin{subfigure}{0.49\linewidth}
\includegraphics[width = \linewidth]{growth_potential_birthrate.pdf}
\caption{}
\end{subfigure}
\begin{subfigure}{0.49\linewidth}
\includegraphics[width = \linewidth]{growth_profile.pdf}
\caption{}
\end{subfigure}
\caption{(a) Potential $\Psi$ and branching rate $\beta$ as a function of the first dimension in $\mathcal{X}$. (b) Comparison of process without branching and with branching effects.}
\label{fig:growth_examples}
\end{figure}
As an initial investigation of the ability of gWOT to account for branching, we sampled $T = 50$ time-points $\{ \widehat{\rho}_{t_i} \}_{i = 1}^{T}$ at evenly spaced intervals in $0 \leq t \leq 0.75$, each with $N = 20$ particles sampled from the process with branching using the aforementioned discretization. As input branching rate estimates to gWOT, we computed here the true branching rates given estimated birth and death rates $\beta_\mathrm{est}, \delta_\mathrm{est}$ in the form of a matrix $g_{ij}$ with entries
\begin{align*}
g_{ij} = \exp(\Delta t_i (\beta_\mathrm{est}(x_{j}) - \delta_\mathrm{est}(x_{j}))) \approx 1 + \Delta t_i (\beta_\mathrm{est}(x_{j}) - \delta_\mathrm{est}(x_{j})) + \mathcal{O}(\Delta t_i^2)
\end{align*}
which describe the branching factor at each spatial location $x_j \in \overline{\mathcal{X}}$ at time instant $t_i$. We let $m_i = 1$, $\kappa = 5$ and chose $\lambda = 2.154 \times 10^{-3}$, the optimal value found earlier for the simulation with $N = 20, T = 50$ in Section \ref{sec:tristable}. We note that although the simulations are different, we find that roughly this value of $\lambda$ works well in a wide range of scenarios. Default values were used for all other parameters, i.e. the same as those used in Section \ref{sec:tristable}.
As a priori estimates for the branching rates, we use
\begin{align*}
\beta_\mathrm{est}(x) &= \beta_0 \left( \frac{\tanh(2 \inner{x, e_1}) + 1}{2} \right), \\
\delta_\mathrm{est}(x) &= 0,
\end{align*}
where the value $\beta_0$ essentially controls how quickly particles near the branch at $x_0$ grow, relative to particles near the branch at $x_1$. We will consider a scenario where we know the true branching rate exactly, i.e. we take $\beta_0 = 5$, and we will compare to estimates output by gWOT without a priori estimates for branching rates, i.e. we take $\beta_0 = 0$ and therefore $g_{ij} = 1$. In addition, we consider enforcing both an exact branching constraint ($\kappa = +\infty$) and soft branching constraint ($\kappa = 5$). To evaluate the outputs, we choose to use again the $W_2$ distance on paths. This will be particularly useful since the proportion of particles located near the well $x_0$ will increase with time due to branching, so a failure to appropriately account for branching should result in spurious transfer of mass from one branch to the other.
\begin{figure}[h]
\centering\includegraphics[width = 0.75\linewidth]{growth_sample_paths.pdf}
\caption{Sample paths drawn from ground truth without branching (green), gWOT output with no a priori branching rate ($\beta_0 = 0$, purple), and gWOT output with correct branching rates ($\beta_0 = 5$). For the reconstructions, we annotate the fraction of paths that terminate closer to the left or right wells, based off the first coordinate at the final time for 1000 sampled paths.}
\label{fig:growth_sample_paths}
\end{figure}
\end{paragraph}
\begin{paragraph}{Results: importance of accounting for branching }
Recall that our objective for inference in the presence of branching is to recover information about the underlying displacement law -- that is, given prior knowledge on the branching rate we seek to estimate the law on paths that results from only the diffusion-drift component \eqref{eq:diffusion_drift_sde_grad}, with branching switched off. Therefore, we take the ground truth process to be the process we sample from originally, but with all branching switched off. In Figure \ref{fig:growth_sample_paths}, we display collections of sample paths drawn from this ground truth, compared to sample paths drawn from the gWOT output with ($\beta_0 = 5$) and without ($\beta_0 = 0$) a priori information on the branching rate. A key distinction here is the proportion of paths that end up near the well at $x_1$ compared to the well at $x_0$ -- in the case $\beta_0 = 0, \kappa = +\infty$ we note the presence of a collection of artifactual paths that transfer additional mass to the faster growing branch near $x_0$ in order to explain the increase in mass in that branch due to branching. In contrast, these paths are not present in the gWOT output when the correct branching rate ($\beta_0 = 5$) is specified.
To quantitatively compare these laws on paths to the ground truth, we computed estimates of the $W_2$ distance on paths for each of the cases $\beta_0 = 0, 5$, $\kappa = 5, +\infty$. Figure \ref{fig:growth_sample_paths_w2} summarizes these results for empirical $W_2$ distances between samples of $10^3$ paths, repeated 10 times. From this it is clear that accurate estimates of the branching rate are essential to obtaining an accurate reconstruction, with $\beta_0 = 0$ resulting in significantly worse performance compared to $\beta_0 = 5$, which achieves near-baseline performance. Interestingly, we note that with $\beta_0 = 0$, performance is improved by allowing for deviation from the specified branching behavior by using a soft branching constraint.
\begin{figure}[h]
\centering\includegraphics[width = 0.25\linewidth]{growth_sample_paths_w2.pdf}
\caption{$W_2$ estimates on sample paths for gWOT with ($\beta_0 = 5$) and without ($\beta_0 = 0$) a priori branching rates}
\label{fig:growth_sample_paths_w2}
\end{figure}
\end{paragraph}
\begin{paragraph}{Misspecification of branching}
We now turn to further investigation of the effect of misspecification of the branching rate (namely, underestimation or overestimation of $\beta_0$) and choice of the branching constraint penalization parameter $\kappa$ on the quality of the estimated law on paths, as quantified by the estimated $W_2$ distance on paths. For the same generative process as earlier, we consider applying gWOT with $0 \leq \beta_0 \leq 10$ and $\kappa$ varying from 1 to 25. For each pair of values $(\beta_0, \kappa)$, we compute the empirical $W_2$ distance to the ground truth. We summarize these results over values of $(\beta_0, \kappa)$ in Figure \ref{fig:growth_g0_kappa_dependence}(a). As is reasonable to expect, the choice of $\beta_0$ has the largest effect on the quality of the reconstruction, with the best results when $\beta_0 = 5$ corresponding to a precise knowledge of the true branching rate. We show also the performance for varying $\beta_0$ in Figure \ref{fig:growth_g0_kappa_dependence}(b) by displaying for each choice of $\beta_0$ the empirical $W_2$ on paths for the corresponding optimal choice of $\kappa$. From this, we observe that when the branching rate estimate is specified correctly ($\beta_0 = 5$), gWOT with both exact and soft branching constraints perform similarly, but when $\beta_0$ is misspecified we observe that the soft constraint always results in better performance than the exact constraint. Finally, in Figure \ref{fig:growth_kappa_dep} we show the empirical $W_2$ error as a function of $\kappa$ and $\kappa = +\infty$ for various fixed $\beta_0$. From this we observe that picking larger values of $\kappa$ to enforce the branching constraint more strictly results in performance closer to the case of the hard branching constraint.
\begin{figure}
\centering
\begin{subfigure}{0.375\linewidth}
\centering\includegraphics[width = \linewidth]{growth_paths_w2_kappa_beta.pdf}
\caption{}
\end{subfigure}
\begin{subfigure}{0.375\linewidth}
\centering\includegraphics[width = \linewidth]{growth_g0_dependence.pdf}
\caption{}
\end{subfigure}
\caption{(a) Empirical $W_2$ on paths for gWOT with different values of $\beta_0$ and $\kappa$ (b) Summary of gWOT performance for varying values of estimated branching rate $\beta_0$.}
\label{fig:growth_g0_kappa_dependence}
\end{figure}
\begin{figure}
\centering\includegraphics[width = 0.75\linewidth]{growth_kappa_dependence.pdf}
\caption{Summary of gWOT performance for varying values of branching penalization $\kappa$ for soft branching constraint. For reference, we show also the performance for the exact branching constraint ($\kappa = +\infty$). }
\label{fig:growth_kappa_dep}
\end{figure}
\end{paragraph}
\bigskip
\FloatBarrier
\subsection{Reprogramming scRNA-seq time series} \label{sec:reprogramming}
\begin{paragraph}{Overview}
As a proof-of-principle application of gWOT to real-world datasets, we consider the stem-cell reprogramming time series dataset published by Schiebinger et al. \cite{schiebinger2019}, comprised of single-cell transcriptome profiles for a series of time-points sampled at 12 hour intervals from a growing population of cells over an 18-day reprogramming experiment. In Section \ref{sec:tristable} we remarked that Waddington-OT is accurate when each time-point consists of a large number of observations and is a good approximation of the population. Such a scenario is not particularly interesting as the resulting performance of gWOT and Waddington-OT would be very similar. Instead, we consider subsampling each time-point in the full dataset to 100 cells per time-point in order to make a comparison between the methods in the regime of limited sampling at each time-point. We consider for this example the subset of 14 time-points between days 6 and 11.5.
As an input to gWOT, we use a 10-dimensional PCA projection of the cellular gene expression profiles. Growth of the cellular population plays a major role in the stem-cell reprogramming process, and so we employ the branching rates estimated in \cite{schiebinger2019} for each cell from cell-cycle gene signatures. For the sake of clarity, we defer the details of our preprocessing steps and choice of model parameters in Appendix \ref{sec:reprogramming_details}. We repeated all computations over 25 independent subsamplings of the full dataset. We show the output marginals at several selected time-points for one of these subsamplings in Figure \ref{fig:reprog_marginals}, in both the force-layout embedding (FLE) dimensions computed in \cite{schiebinger2019} as well as the original input PCA coordinates.
\begin{figure}[h]
\centering
\begin{subfigure}{0.75\linewidth}
\centering\includegraphics[width = \linewidth]{reprogramming_scatter_fle.pdf}
\caption{}
\end{subfigure} \\
\begin{subfigure}{0.75\linewidth}
\centering\includegraphics[width = \linewidth]{reprogramming_scatter_pca.pdf}
\caption{}
\end{subfigure}
\caption{Inferred and sampled marginals at selected time-points for subsampled reprogramming data, (a) in the FLE coordinates from \cite{schiebinger2019} and (b) in the first two PCA coordinates. }
\label{fig:reprog_marginals}
\end{figure}
\end{paragraph}
\begin{paragraph}{Results: marginal reconstruction}
Since we are running gWOT on subsamplings of each time-point, we expect that gWOT should be able to produce improved estimates of the input marginals as part of the coupling estimation process. At each time-point, we may treat the full dataset as a proxy for the ``ground truth'', and use it as a reference for evaluating model performance. Thus, as a first assessment of the performance we compute the 2-Wasserstein distance between the full (non-subsampled) time-point and reconstructed marginal at each time. We reason that successful marginal reconstruction should reduce the noise introduced from subsampling. This is summarized as a ratio of 2-Wasserstein distances $d_{W_2, \text{reconstructed}}/d_{W_2, \text{sample}}$ in Figure \ref{fig:reprogramming_ratios}(a). From this we observe a visible but moderate improvement for all time-points we considered. We make particular note that this dataset was originally generated with the application of Waddington-OT in mind, and thus the relatively large (approx. 12 hour) temporal gap between time-points means that the amount of useful information that can be ``shared'' across times is quite limited.
\end{paragraph}
\begin{paragraph}{Results: hold-one-out validation}
We then used hold-one-out validation (as done in \cite{schiebinger2019}) to investigate the predictive value of the estimated couplings output by gWOT, taking Waddington-OT as a baseline. Excluding the first and last time-points, we held out successive time-points and applied gWOT to the remaining time-points following the procedure detailed in Appendix \ref{sec:reprogramming_details}. Using the obtained couplings, we approximated the held-out time-point by 5000 sampled points using the geodesic interpolation scheme as described in \cite[Supplementary materials]{schiebinger2019}. We reason that improved estimates of held-out time-points should be indicative of improved coupling estimates. As with the marginals, we compute the 2-Wasserstein distance from the estimate to the full (non-subsampled) time-point for each held-out time. We display the ratio $d_{W_2, \text{gWOT}}/d_{W_2, \text{WOT}}$ in Figure \ref{fig:reprogramming_ratios}(b), from which we observe that, with exception of a single time-point at day 8, our method performs roughly as well or better than Waddington-OT.
\end{paragraph}
\begin{figure}[h]
\centering
\begin{subfigure}{0.75\linewidth}
\includegraphics[width = \linewidth]{reprogramming_reconstruct_summary.pdf}
\caption{}
\end{subfigure} \\
\begin{subfigure}{0.75\linewidth}
\includegraphics[width = \linewidth]{reprogramming_holdout_summary.pdf}
\caption{}
\end{subfigure}
\caption{(a) Summary of marginal reconstruction performance for subsampled reprogramming data in terms of $W_2$ distance between sampled/reconstructed marginals to the full dataset for each time. (b) Summary of hold-one-out interpolation performance for subsampled reprogramming data in terms of $W_2$ distance between interpolated marginals and the full held-out time-point. We show summarized results over 25 independent repeats.}
\label{fig:reprogramming_ratios}
\end{figure}
\FloatBarrier
\subsection{Some remarks on data preprocessing and choice of parameter values} \label{sec:preprocessing_and_params}
In general, the optimal choice of parameters for a given application will depend on the specific data at hand. However, we will discuss a few guiding principles which may apply generally in practical settings.
\begin{paragraph}
{Preprocessing of the input}
In the simulated diffusion-drift examples discussed earlier in this section, simulation parameters such as the diffusivity were known exactly and therefore no pre-processing was necessary. However, for real-world datasets such as the scRNA-seq example, an initial PCA step is generally advisable \cite{schiebinger2019}. Furthermore, appropriate normalization of the optimal transport cost matrix $C_{ij} = \frac{1}{2}\| x_i - x_j \|^2$, while not consequential from a mathematical standpoint, may be helpful to ensure numerical stability of computations and also allows values of parameters and losses to occupy the same order of magnitude and therefore be roughly comparable across datasets. A common rule of thumb in the optimal transport literature \cite{schiebinger2019} as we also describe in Appendix \ref{sec:reprogramming_details} is to scale cost matrices by their mean or median so as to have entries that are order one.
\end{paragraph}
\begin{paragraph}
{Choice of the regularization strength and diffusivity}
The diffusivity $\sigma^2$ and the regularization strength $\lambda$ are the central parameters for the formulation of gWOT described in Section \ref{sec:methodology}. In most biological applications, the diffusivity $\sigma^2$ is unknown. Therefore, $\sigma^2$ may need to be heuristically chosen judging from the length scale of the data, and the time scale over which the process occurs. As a rule of thumb, $\sigma^2$ should correspond to the mean square displacement that can be expected of a diffusive particle in unit time. Alternatively, $\sigma^2$ may be chosen by empirically examining the resultant pairwise couplings in order to select an appropriate balance between diffusion and drift effects, as was done in \cite{schiebinger2019}.
Since the regularization counteracts effects introduced by having access to limited samples, the optimal $\lambda$ should be inversely related to the number of observed time-points and observed particles at each time-point, as discussed previously. As we found in Sections \ref{sec:tristable}, \ref{sec:branching} and \ref{sec:reprogramming} and illustrated in Figure \ref{fig:tristable_lamda_dep}(b), when cost matrices are normalized to order one, a reasonable range for initial guesses of $\lambda$ is on the scale of $10^{-2}-10^{-3}$. It may also be informative to consider the regularization loss $\mathrm{Reg}(\cdot)$ for various values of $\lambda$ to quantify the tradeoff between regularization and data-fitting. We do this for the example of Section \ref{sec:tristable} where $N = 20, T = 50$ in Figure \ref{fig:tristable_elbow}, where one may reasonably identify an ``elbow'' from the plot, corresponding to an optimal tradeoff between the regularization and data-fitting losses. In the end, some visualization or other downstream analysis with external knowledge of the application domain may be necessary to select a ``best'' value of $\lambda$.
\end{paragraph}
\begin{figure}[h]
\centering\includegraphics[width = 0.25\linewidth]{tristable_N_20_T_50_elbow.pdf}
\caption{Value of $\mathrm{Reg}(\mathbf{R})$ at the optimal point $\mathbf{R}$ for varying values of $\lambda$ in the example of Section \ref{sec:tristable} when $N = 20, T = 50$.}
\label{fig:tristable_elbow}
\end{figure}
\begin{paragraph}
{Choice of other parameters}
\begin{itemize}
\item Data-fitting regularization $\varepsilon_i$: This parameter arises in the smoothed entropy-regularized approximation to optimal transport in the data-fitting functional. Therefore, it should be chosen sufficiently small so as to have minimal blurring effect on the reconstruction output. For problems where the cost is order one, we have found that values from $0.005-0.05$ typically work well.
\item Time-point weights $w_i$: The weights $\{ w_i \}_{i = 1}^T$ specify the relative contribution of different time-points to the data-fitting functional. Although this may be tuned by the user, we recommend to weight each time-point $t_i$ proportional to the number of observations $N_i$ made, as was done in Section \ref{sec:kernel}. That is, we take
\begin{align*}
w_i = \frac{N_i}{\sum_{i = 1}^T N_i}.
\end{align*}
\item Soft branching constraint penalty $\kappa$: We discuss at length in Section \ref{sec:growth} the effect this parameter has in the case where branching is present. From the form written in Section \ref{sec:growth}, it is clear that $\kappa$ scales with the transport cost terms in the regularizing functional \eqref{eq:regfunc_growth} and therefore scales with the cost matrices. In our setting where cost matrices have order one, we find that values from 1-10 tend to work well.
\item Cross-entropy coefficient $\lambda_i$: For each time point $t_i$, the coefficient $\lambda_i$ controls the tradeoff between the transport and cross-entropy terms in the data-fitting functional \eqref{eq:dffunc_growth}. When the transport cost has order one, we have found that simply setting $\lambda_i = 1$ works well, corresponding to a 1:1 tradeoff.
\end{itemize}
\end{paragraph}
\subsection{Augmenting the support}\label{sec:aug_supp}
In order to obtain a finite dimensional approximation to~\eqref{eq:opt_theory}, we have been optimizing over measures supported on the discrete set $\overline{\mathcal{X}}$ constructed as the union of all sampled points, i.e. $\overline{\mathcal{X}} = \cup_{i = 1}^T \mathrm{supp}(\widehat{\rho}^{t_i})$.
However, restricting the support in this way can impair performance when we have few samples in a particular temporal window. For example, as in Section \ref{sec:kernel}, suppose that the true process $\rho_t$ is a geodesic in the Wasserstein space, and we obtain a high-fidelity estimate of $\rho_t$ (from a large number of samples) at times $t \in \{0,1\}$, but few samples at time $t=\frac 1 2$.
If the supports of $\rho_0$ and $\rho_1$ are both sufficiently different from $\rho_{\frac 1 2}$, then we would not be able to reconstruct an accurate estimate of $\rho_{\frac 1 2}$ supported on points from $\overline{\mathcal{X}}$.
To remedy this, we propose to add points to the support $\overline{\mathcal{X}}$ with the following scheme:
\begin{itemize}
\item Select first a noise level $s^2$: this is different from the typical $\sigma^2 \Delta t_i$ and indeed should be larger, as we seek to add points in regions of $\mathcal{X}$ which are not already represented well in $\overline{\mathcal{X}}$.
\item For a pair of time-points $(t_i, t_{i+2})$ compute $\gamma$, the entropy-regularized optimal transport coupling between the estimated marginals $\mathbf{R}_{t_i}, \mathbf{R}_{t_{i+2}}$ with $\varepsilon = s^2$.
\item To add $k$ points to the support, sample $k$ pairs $(X^{(i)}, Y^{(i)}) \sim \gamma$ and for each pair we add a point $Z_{1/2}^{(i)}$ sampled from the Brownian bridge conditioned at $Z_0^{(i)} = X^{(i)}, Z_1^{(i)} = Y^{(i)}$ at the midpoint:
\begin{align}
Z_{1/2}^{(i)} \sim \mathcal{N}\left(\frac{1}{2}(X^{(i)} + Y^{(i)}), \frac{s}{2} I_d\right)
\end{align}
\end{itemize}
We then form the augmented support
\begin{align*}
\overline{\mathcal{X}}' = \overline{\mathcal{X}} \cup \{ Z_{1/2}^{(i)} \}_{i = 1}^N.
\end{align*}
Using this augmented support $\overline{\mathcal{X}}'$, an improved estimate of the marginals may be obtained by solving again with gWOT. As an example, we consider again the simulation from Figure \ref{fig:datafitting_counterexample2}, in which we only have one sample per time-point. We display in Figure \ref{fig:aug_support} a scenario where the low number of observed samples results in a noticeable gap in the reconstructed marginals. We employ the method we describe to add points to the support, and solve gWOT again using the augmented support, thereby `filling in' the gap to obtain an improved estimate of the underlying process.
\begin{figure}[h]
\centering
\includegraphics[width = 0.75\linewidth]{augment_support.pdf}
\caption{Augmenting the support may increase the quality of reconstructed marginals. With very few samples per time-point (left), the reconstructed marginals display artifacts (middle). By augmenting the support with the scheme described in Section~\ref{sec:aug_supp}, we add points to $\overline{\mathcal{X}}$ that were not present before, improving the quality of the reconstructed marginals (right).}
\label{fig:aug_support}
\end{figure}
\section{Discussion}
\label{sec:discussion}
In this paper we have developed the beginning of a mathematical theory of trajectory inference for single cell datasets.
We have stated the trajectory inference problem in terms of reconstructing the law of a stochastic process from its temporal marginals, and have shown that the existing method Waddington-OT \cite{schiebinger2019} falls within this framework. Because this problem is not well posed without additional assumptions on the process, we have restricted to the case of a potential driven process in which cells follow a stochastic differential equation with the drift being the gradient of a potential, which may vary in time. Under this assumption, we showed that the ground truth can be characterized as the solution of a convex variational problem. This leads to a convex optimization-based approach to recover trajectories from empirical estimates of temporal marginals. As the number of distinct temporal snapshots grows, this approach is guaranteed to recover the correct trajectories, even if each individual time-point contains only a few sampled cells.
We devised an efficient algorithm to solve the problem in practice, and we test our approach on both synthetic and real data. We refer to this method as gWOT, for ``global Waddington-OT'', because it shares information across time-points in a global optimization problem.
Cellular proliferation and death is challenging to incorporate directly in the framework of reconstructing the law of a stochastic process. We overcome this issue in gWOT by alternating ``branching'' and ``transport'' phases which is reminiscent of splitting schemes of numerical analysis. We demonstrate that our method is able to produce improved estimates of the ground truth process in the setting of simulated potential-driven diffusion-drift processes with and without branching.
\subsection{Prospects for future work}
We hope this theoretical framework lays a rigorous foundation for further development of theory and methods and also helps guide experimental design.
We envision several directions for future work:
\paragraph{Theoretical directions}
On the theoretical front, it remains open to build a satisfying theory in the case of branching. A building block in this direction is the work in progress of the first author with Aymeric Baradat \cite{AymericHugo} about entropy minimization for laws of branching processes. Second, one would also hope to establish a quantitative rate of convergence, building on our asymptotic consistency results.
Intuitively the rate of convergence should depend on some notion of the ``curvature'' of the developmental curve shown in Figure~\ref{fig:curve_perspective}. From sharp bounds, one might deduce optimal strategies for experimental design.
For more on this, see below.
Third, our perspective on developmental curves might be extended to consider families of curves.
For example, one could consider the curve disease progression (or wound healing) in individuals of age $a$.
By collecting data over a variety of ages, one could recover a separate curve for each age. However, one might use additional prior information that curves from similar ages behave similarly to better reconstruct the entire family of curves.
Recent related work has developed methodology to recover higher dimensional manifolds, for modeling single cell datasets in cancer patients~\cite{chen2020uncovering} or in COVID-19 patients~\cite{Kuchroo2020}.
\paragraph{Methods and algorithms}
Future algorithmic work might close the gap between our theoretical results, where we analyze an infinite dimensional convex problem, and our practical implementation, which involves a heuristic discretization over space.
Ideally, one would search for alternative numerical methods for entropy minimization of laws of stochastic processes. A promising way to approach this might be to follow the conditional gradient approach of~\cite{bredies2020generalized}. Second, our methodology could be extended to incorporate additional information such as lineage tracing, as in \cite{forrow2020}.
This could increase the accuracy of reconstructed trajectories, especially for difficult settings like ``convergent trajectories''~\cite{packer2019lineage}. Third, we could develop uncertainty quantification for our method. To do this, we would need to adopt a (non parametric) Bayesian perspective. The main difficulty is then to find a prior on laws on the space of paths (or on potential functions) which are quite large spaces.
\paragraph{Experimental design}
Finally, our theoretical framework motivates the collection of high-density time-courses, with a large number of time-points and relatively few sampled cells per time-point.
Intuitively, one can view each time-point as a data-point along the curve (Figure~\ref{fig:curve_perspective}); the number of cells sampled determines the noise-level of the time-point. This raises several natural questions: {\em For a fixed budget of $n$ total cells, how should one choose the number of time-points? And how should these time-points be selected?}
Intuitively, finer time-resolution should be collected over periods of sharper ``curvature'' (i.e. periods of rapidly changing development). Note that this is different from periods of rapid change, which could still be described by a geodesic, without significant curvature. In order to better answer these questions, one would need to establish a quantitative rate of convergence, as mentioned above.
Our methodology enables the analysis of these high-density time-course datasets.
\section*{Acknowledgments}
\addcontentsline{toc}{section}{Acknowledgments}
This work was supported in part by a UBC Affiliated Fellowship to S.Z., an Exploration Grant to G.S. and Y.H.K. from the New Frontiers in Research Fund (NFRF), a Career Award at the Scientific Interface from the Burroughs Wellcome Fund to G.S., and NSERC Discovery Grants to Y.H.K. and G.S. Part of this work was done while H.L. was supported by the Pacific Institute for the Mathematical Sciences (PIMS) through a PIMS postdoctoral fellowship.
\noindent The authors wish to thank Aymeric Baradat and Jonathan Niles-Weed for stimulating discussions.
\section*{Code availability}\label{sec:code_avail}
\addcontentsline{toc}{section}{Code availability}
Global Waddington-OT is implemented in the open-source software package gWOT available at \url{https://github.com/zsteve/gWOT}.
\newpage
|
1,314,259,995,009 | arxiv | \section{Exciting chiral dynamics in bound states}
A hallmark of helical motion of bound electrons is the appearance of an induced dipole orthogonal to the polarization plane of the exciting circular light. We first show that an ultrashort pulse creates
such a dipole in a randomly oriented molecular ensemble.
Let the electric field of the pulse, rotating in the x-y plane, coherently excites two states (Fig.1b) of a chiral molecule.
As shown in the Supplementary Information (SI), the orientation-averaged induced dipole acquires the desired component along
the light propagation direction $z$:
\begin{eqnarray}
\label{PXCD}
{d_z^{PXCD}}\propto\sigma[\vec{d}_{01}\times \vec{d}_{02}]\vec{d}_{12}\sin(\Delta E_{21}t),
\end{eqnarray}
Here $\vec{d}_{01}$, $\vec{d}_{02}$ and $\vec{d}_{12}$ are the dipole transition vectors connecting
the ground $|0\rangle$ and the two excited states $|1\rangle ,|2\rangle$ (Fig. 1b), $\Delta E_{21}$ is
the energy spacing between the excited states. For more than two states, Eq.(1) will contain the sum over all pairs of excited states $n,m$, leading to oscillations
at all relevant
frequencies $\Delta E_{nm}$. As a function of time the induced dipole vector maps out a helix (Fig. 1b) and the z-component of the helical current is
\begin{eqnarray}
\label{PXCD_cur}
{j_z^{PXCD}}\propto \sigma[\vec{d}_{01}\times \vec{d}_{02}]\vec{d}_{12}\Delta E_{21}\cos(\Delta E_{21}t).
\end{eqnarray}
Both $d_z^{PXCD}$ and $j_z^{PXCD}$ are quintessential chiral observables (see e.g. \cite{barron1986true,tang2010optical}).
Indeed, both are proportional to the light helicity $\sigma=\pm1$ and to the triple product of three vectors $[\vec{d}_{01}\times \vec{d}_{02}]\vec{d}_{12}$. This product presents a fundamental measure of chirality: it changes sign upon reflection and thus has an opposite sign for left and right enantiomers. For randomly oriented non-chiral molecules $d_z^{PXCD}=j_z^{PXCD}=0$.
Eqs.(\ref{PXCD},\ref{PXCD_cur}) lead to the following conclusions. First, the coherent excitation of electronic states leads to a charge displacement in the light propagation direction. Hence, a macroscopic dipole $d_z^{PXCD}$ and the corresponding chiral density are created in the excited states, with a chiral current oscillating out of phase for the two enantiomers. Second,
PXCD requires no magnetic or quadrupole effects. Hence, it is orders of magnitude stronger than standard photoabsorption CD.
While photoabsorption CD exploits the helical pitch of
the laser field in \textbf{space}, PXCD takes advantage of the
sub-cycle rotation of the light field
in \textbf{time} and is inherently ultrafast. Indeed, PXCD arises only if
the excitation dipoles $\vec{d}_{01}$, $\vec{d}_{02}$ are non-collinear:
for the angle $\phi$ between the two transition dipoles, the PXCD (Eqs. (\ref{PXCD},\ref{PXCD_cur})) is proportional to
$\sigma\sin(\phi)$. Since $\sigma=\pm 1$, $\sigma\sin(\phi)=\sin(\sigma\phi)=\sin(\sigma\omega\tau)$, where $\omega$ is light frequency and $\tau=\phi/\omega$ is the required time for the light field to rotate by the angle $\phi$.
PXCD vanishes if the coherence between excited states $|1\rangle$ and $|2\rangle$ is lost and reflects dynamical symmetry breaking in an isotropic medium.
The oscillations of the PXCD signal
Eqs.(\ref{PXCD},\ref{PXCD_cur}) appear to suggest that probing it requires the combination of ultrafast time resolution and chiral sensitivity.
We now show that time-resolving PXCD does not, in fact, require a chiral probe. The coherence underlying PXCD allows a chiral object to 'interact with itself', albeit in a different quantum state, thus mimicking interaction with "another chiral object" and removing any need for other chiral interactions during the probe step. One such non-chiral probe, termed PhotoeXcitation-induced photo-Electron Circular Dichroism (PXECD), is introduced below.
\section{Probing chiral dynamics in bound states}
One way to probe the excited chiral density is to promote the chiral wave-packet to the electron continuum using a \textbf{linearly} polarized pulse (Fig 1c). As shown in the SI, the standard photoionization observable, the photoelectron current averaged over molecular orientations, is:
\begin{eqnarray}
\label{PXECD}
J_z^{PXECD}(k)
\sigma[\vec{d}_{01}\times \vec{d}_{02}]\vec{D}_{12}^{r}(k)\sin(\Delta E_{21}\tau)-\sigma[\vec{d}_{01}\times \vec{d}_{02}]\vec{D}_{12}^{i}(k)\cos(\Delta E_{21}\tau),
\end{eqnarray}
with $J_x^{PXECD}(k)=J_y^{PXECD}(k)=0$.
Here $\tau$ is the pump-probe delay, $\vec{D}_{12}(k)=\vec{D}_{12}^{r}(k)+i\vec{D}_{12}^{i}(k)$ is the Raman-type photoionization vector (see the SI) which connects the excited bound states via the common continuum and plays the role of $\vec d_{12}$ of Eq.(\ref{PXCD},\ref{PXCD_cur}) and $k$ is the photoelectron momentum.
First, the electron current Eq. (\ref{PXECD}) is proportional to the helicity $\sigma$ of the pump pulse. Second, as transitions to the continuum are described by complex dipole vectors, it contains two triple products. Just like the triple product
$[\vec{d}_{01}\times \vec{d}_{02}]\vec{d}_{12}$ earlier, both $[\vec{d}_{01}\times \vec{d}_{02}]\vec{D}_{12}^{r}$ and $[\vec{d}_{01}\times \vec{d}_{02}]\vec{D}_{12}^{i}$ will change sign upon reflection. Thus, the electron current reverses its direction if the handedness of the pump pulse or of the enantiomer is swapped, showing that PXECD is a genuine chiral effect.
The chiral nature of the response arises only if the participating bound states are coherently excited.
Once the coherence is lost, the chiral signal will also disappear.
Importantly, the state of the continuum (Fig 1c) does not need to be chiral, as it only provides a link between the two chiral bound states. $J_z^{PXECD}(k)$ remains chiral even for a plane wave continuum (see the SI), in this case $\vec{D}_{12}(k)$ only has an imaginary component:
\begin{eqnarray}
\label{PXECD_PW}
J_{z,PW}^{PXECD}(k)
-\sigma[\vec{d}_{01}\times \vec{d}_{02}]\vec{D}_{12}^{i,PW}(k)\cos(\Delta E_{21}\tau).
\end{eqnarray}
The total photoelectron current $J_{tot}^{PXCD} = \int J_{z,PW}^{PXECD}(k)dk$ measures the helical current excited in bound states $j_z^{PXCD}$ (Eq. \ref{PXCD_cur}) distorted by the partial alignment of the molecular ensemble induced by the pump (see the SI).
One might think that partial alignment of the excited molecular ensemble could already be fully responsible for enabling non-chiral probes of chiral dynamics. It is not true in our case. Indeed, the effect of alignment persists for a single excited electronic state and for the two excited electronic states with collinear dipoles, but in both cases it leads to zero PXECD current.
Finally, removing the effect of partial alignment from Eq.(4) shows that the PXECD current remains chiral for every $k$, while $J_{tot}^{PXCD}$ becomes directly proportional to the chiral component of the helical current in bound states: $J_{tot}^{PXCD}\propto j_z^{PXCD}$ (see the SI).
Probing the created chiral excitation using photo-electron imaging with linearly polarized light constitutes yet another new phenomenon, PhotoeXcitation-induced photoElectron Circular Dichroism (PXECD). PXECD is reminiscent of the Photoelectron Circular Dichroism (PECD) \cite{ritchie76,powis00, Bowering01,nahon15,garcia13,nahon16,comby2016relaxation}, which arises when a circularly polarized light is used to photoionize a chiral molecule. However, there is a fundamental difference.
PECD can only exist if the molecular potential felt by the emitted electron is chiral \cite{ritchie76} (the effect becoming negligible for photoelectron energies above 10 eV), while the initial orbital may or may not be chiral at all \cite{ulrich2008giant}. It is also clear from the diagram of PECD in Fig 1 (d). The continuum state cannot merely serve as a non-chiral link, as in this case it will only mediate the coupling of the chiral object, the molecule in the ground state, "to itself" rather than to another chiral object
In contrast to PECD, the PXECD requires neither chiral light, nor chiral scattering of the photo-electron. Since PXECD does not require the technically challenging use of
ultrashort circularly polarized XUV pulses \cite{wang12_femto,Spezzani11_coher,allaria12_highly,fleischer2014spin,ferrea.15}, it opens unique perspectives for ultrafast chiral-sensitive measurements using readily available linearly polarized UV and XUV light from table-top high harmonic generation sources, with no restrictions on photoelectron energies.
We shall now confirm both numerically and experimentally that our scheme provides a sensitive time-resolved probe of chiral molecular dynamics in bound states.
\section{Theoretical analysis in fenchone}
To quantify the PXECD effect we performed quantum mechanical calculations on fenchone molecules (see the SI). First, we simulated the PXCD phenomenon and calculated the excitation of the s- and p-manifold of Rydberg states in fenchone by a circular pump pulse. The resulting electron density of the Rydberg wave-packet is asymmetric in the $z$-direction in the momentum space. The asymmetry reverses if the helicity of the pump pulse or the handedness of the molecule is reversed.
The strength of the PXCD can be quantified by the magnitude of the chiral component of the excited electron density. It is obtained by subtracting the momentum space density $D$ obtained with right (R) and left (L) polarized light: $PXCD=2(D(L)-D(R))/(D(L)+D(R))$.
Even after averaging over molecular orientations, the calculated PXCD reaches very high values (35$\%$, Fig. 2(a)). The asymmetry of the charge distribution corresponds to a macroscopic dipole moment $d_z^{\rm PXCD}$ which reaches 3 Debye (Fig. 2(b)) and oscillates at frequencies determined by the energy differences between the states forming the electronic wave-packet.
The calculated pump-probe PXECD signal reveals these oscillations (Fig. 2c). While few-femtosecond pulses would be needed to resolve them,
the PXECD signal can also be detected with much longer pulses. Fig. 2(d) shows that both PXCD and PXECD survive temporal averaging over 100 fs duration of a probe pulse.
\section{Observation of PXECD in fenchone}
In our experiment, a circularly polarized femtosecond pump pulse at 201 nm (6.17 eV photon energy, 80 meV at 1/e bandwidth) photoexcites enantiopure fenchone molecules from a supersonic gas jet in the interaction zone of a velocity map imaging spectrometer. The molecules are excited to their first (\textit{s-} and \textit{p-}) Rydberg bands through single-photon absorption (Fig. 3 (a), see the SI). A time-delayed, linearly polarized probe pulse at 405 nm (3.1 eV photon energy, 35 meV FWHM bandwidth) induces one-photon ionization of the excited molecules. The cross-correlation of the pump and the probe pulses is 170 fs. The photoelectrons are accelerated and projected by an electrostatic lens onto a set of dual microchannel plates and imaged by a phosphor screen and a CCD camera.
The photoelectron images are recorded alternatively using left (LCP) and right (RCP) circularly polarized pump pulses.
The difference (LCP-RCP) and sum (LCP+RCP) of these two images are reconstructed using a least-square fitting algorithm (see the SI).
We define the PXECD signal as
$PXECD=\frac{2(LCP-RCP)}{(LCP+RCP)}$ and the photoelectron spectrum (PES) as $PES=(LCP+RCP)/2$. Both are shown in Fig. 3(b) for a 200 fs pump-probe delay. As expected, a significant PXECD signal is observed, reaching 1 $\%$ \cite{footnote3}
in good agreement with our calculations (Fig. 2(d)).
The photoelectron spectrum contains a single broad component, corresponding to ionization from the outermost orbital (vertical ionization potential ~8.72 eV). The position of this component does not shift with the pump-probe delay (Fig. 4 (b)) and decays in 3.3 ps, reflecting simple vibronic relaxation of the Rydberg population onto lower states which cannot be photoionized by the 3.1 eV probe photons. The temporal evolution of the PXECD image shows much richer spectroscopic features, which can be analyzed by decomposing it in odd Legendre polynomials (Fig. 4(a)). We note that a sum of first- and third-order Legendre polynomials, with coefficients $\alpha$ and $\alpha'$, is enough to get the PXECD images. Both coefficients maximize around $\sim$ 50 meV below the maximum of the PES.
The PXECD signal (Fig. 4(b)) can be decomposed into two components: below and above the maximum of the PES. The low-energy component of $\alpha$ undergoes a rather smooth decay. On the contrary, its high-energy component decays very quickly and even changes sign around 1 ps. For $\alpha'$ the
behaviour is opposite, \textit{i.e.} the high-energy component shows much slower dynamics than the low-energy part.
Such time- and electron energy- dependent behaviour is characteristic of internal vibrational torsional motion and may indicate the change of the chiral structure of the molecule induced by such motion. Indeed, the electronic excitation of the molecules is expected to be accompanied by a significant vibrational excitation, since the equilibrium geometries of the 3s and 3p Rydberg states are quite different from that of the ground state. The molecules will tend to relax towards the equilibrium geometry of the Rydberg states, and oscillate around it. Figure 5 illustrates the influence of this change of molecular geometry on the calculated PXECD signal.
Even small bond length changes ($\leq 7 \%$) lead to significant modification of the PXECD signal.
This demonstrates the remarkable sensitivity of PXECD to molecular vibrations, which follow the electronic excitation.
At 4 ps (not shown), the PXECD completely vanishes while the Rydberg population is still significant. This result unambiguously reflects the loss of wave-packet coherence which halts chiral dynamics in our experiment.
\section{Vibrational PXCD: experiments in camphor}
Is it possible to create PXCD from purely vibrational excitation of a chiral molecule?
Theoretically, the two excited states in Eqs.(1,2) needed for PXCD do not have to be different electronic states. Vibrational states within the same electronic state can also fulfil the PXCD condition as long as their dipoles are not collinear, see Eqs. (1,2).
As shown in the SI, this requires the breakdown of the Franck-Condon approximation, which is caused by a strong dependence of the electronic wave-function on the position of the nuclei. In turn, such dependence leads to the appearance of electronic currents stimulated by the nuclear motion, which is triggered by the pump pulse. Thus, vibrational PXCD is intertwined with the underlying chiral motion of electrons. Note that this strong dependence of the electronic wave-functions on the nuclear positions naturally arises in the vicinity of conical intersections between electronic potential surfaces. Thus, we expect that PXECD could be used to excite and reveal coherent chiral dynamics at conical intersections.
To gain further insight into the role of electronic versus vibrational dynamics in PXECD, we performed measurements in (1R)-(+)-camphor, a very similar structural isomer of fenchone. The \textit{s-} and \textit{p-} Rydberg bands of camphor are upshifted by additional several tens of meV compared to fenchone, preventing direct excitation of the \textit{p-} states and thus of an electronic chiral wave-packet. Nevertheless, the experiment still reveals a strong PXECD signal, indicating that a chiral vibronic wave-packet has been created in the \textit{s-} Rydberg band of camphor. The $\alpha'$ coefficients in camphor and fenchone are of opposite sign as seen in multiphoton \cite{Lux2015_photo} and one-photon PECD \cite{nahon15}.
In our experiment, this could be a consequence of PXECD sensitivity to isomerism (see Figure 5 to gauge the sensitivity to nuclear configuration), but it could also be a signature of the different nature of the excited chiral electronic currents in fenchone and camphor.
Changing the excitation wavelength from 202 nm to 200 nm does not affect the monoexponential decay of the PES.
In contrast, a strong change is observed in the PXECD: the $\alpha'$ magnitude is almost twice as large and it is shifted in energy towards the red wing of the photoelectron spectrum.
The drastic change observed in the PXECD signal in camphor once the pump photon energy is increased by only 60 meV illustrates the extreme sensitivity of this measurement to the excited vibrational dynamics.
\section{Conclusions and outlook}
We have demonstrated two new phenomena.
First, we have shown the efficient excitation of a macroscopic bound chiral electron density in the excited states of randomly oriented chiral molecules without the help of magnetic interactions (the PXCD phenomenon).
In the dipole approximation the chiral pitch of circularly polarized light vanishes. This means that the creation of the macroscopic chiral density in the isotropic ensemble of chiral molecules is based not on the helical structure of light, but on its planar rotation.
Second, we have shown that the resulting chiral dynamics can be probed without the help of further chiral interactions and thus in an
efficient and versatile way.
The detection relies on photoelectron circular dichroism arising from the ionization of excited molecules by linearly polarized light pulses (the PXECD phenomenon), but is not limited to this scheme. The application of a linearly polarized XUV probe in PXECD would enable genuine probing of ultrafast chiral bound dynamics, since PXECD does not require chiral interaction in the continuum, which becomes negligible for sufficiently high-energy electrons.
The ensemble-averaged chiral charge density arising in PXCD implies asymmetry in charge distribution along the light propagation direction. Depending on the medium density, this could lead to a very large coherently oscillating macroscopic dipole. The phase of this oscillation is opposite for two enantiomers, leading to macroscopic enantio-sensitive effects. The existence of the enantio-sensitive macroscopic dipole opens the way to the separation of enantiomers in isotropic racemic mixtures in the gas phase.
The PXCD phenomenon opens the way to direct visualization of chiral electronic density using time-resolved X-ray diffraction imaging, both in the gas and condensed phase. Intense ultrafast sources of X-ray radiation, such as Free Electron Lasers, combined with measurements, sensitive to valence-shell dynamics in the gas phase \cite{bredtmann2014x} should lead to few-fs time resolution of chiral charge dynamics
Finally, PXCD could be used to drive molecular reactions in chiral systems in a stereo-specific way, by imprinting a chiral torque via the helicity of the exciting circularly polarized pulse. The ultrafast charge dynamics triggered by coherent electronic excitation is reminiscent of ultrafast charge migration triggered by photo-ionization \cite{Lunnemann,Breidbach,Remacle,Kuleff,Lepine,Leone,Kuleff2} recently observed in ref. \cite{Calegari} and speculated to underlie
charge-directed reactivity in cations \cite{Weinkauf}.
Chiral electron stereo-dynamics in neutral molecules may open similar opportunities for controlling charge and energy flow in molecules at the level of electrons, offering new perspectives for such intriguing problems as asymmetric synthesis, a major challenge in stereochemistry.
\begin{methods}
An Even-Lavie valve is used as a pulsed enantiopure fenchone source with helium as carrier gas to avoid cluster formation. (1R)-(–-) and (1S)-(+)-fenchone correspond to (1R,4S) and (1S,4R) fenchone respectively. The 170 fs cross-correlation time as well as the 0 fs delay are determined on the lightest fragment C$_4$H$_5$O$^+$ produced by dissociative ionization with both linearly polarized pump and probe. The high voltage of the repeller electrode was -3kV for the experiment done in fenchone and only -2kV for the experiment done in Camphor, which increases the energy resolution. Note that the latter, along with the energy calibration, that has been determined by photoionizing krypton. Typically the energy resolution is 80 meV at 0.7 eV kinetic energy. The presented results are obtained by scanning the pump-probe delays typically 30 times. At each delay, helicity is permuted each 45000 laser shots (=45 seconds) to record several images.
\end{methods}
|
1,314,259,995,010 | arxiv | \section{Introduction}
Following the first weak lensing measurements \citep{BRE00,Wittman00,KWL00,Waerbeke00} ,
the field of weak lensing has witnessed a tremendous progress in all fronts (see \citet{MuPhysRep08} for a review).
Currently, in terms of cosmological observations, weak lensing plays a role complementary to both Cosmic Microwave
Background (CMB) studies and studies involving large scale structure (LSS) surveys.
The ability of weak gravitational lensing to reveal cosmological information, particularly the dark energy equation
of state is considerably enhanced by the inclusion of tomographic information.
The impotance of weak lensing has spurred tremendous progress on the technical front in terms of specification
and control of systematics. There are many ongoing and future weak lensing surveys such as the
CFHT{\footnote{http://www.cfht.hawai.edu/Sciences/CFHLS/}}
legacy survey, the Pan-STARRS{\footnote{http://pan-starrs.ifa.hawai.edu/}}
and the Dark Energy survey{\footnote{https://www.darkenergysurvey.org/}},
and further in the future, the Large Synoptic Survey Telescope{\footnote{http://www.lsst.org/llst\_home.shtml}},
Joint Dark Energy Mission or JDEM{\footnote{http://jdem.gsfc.nasa.gov/}} that will map the dark matter and dark energy distribution
of the entire sky in unprecedented details. In particular, owing to the large fraction of the sky coverage
and tighter control on systematics as well as dense sampling of source galaxy populations it will be
soon possible to study gravity induced non-Gaussianity with extreme accuracy.
The gravity induced non-Gaussianity is typically probed using real space correlation
functions as well as in the harmonic domain using their harmonic counterparts i.e.
the multispectra (see e.g. \cite{Pen03}). These correlation functions provide
a set of tools to go beyond the usual power spectrum analysis. The higher-order correlation functions
are important not only to break the parameter degeneracy inherent in power spectrum analysis
(e.g. between the amplitude of the matter power spectrum $\sigma_8$ and the matter density parameter $\Omega_{\rm M}$)
but also to understand error-estimates of lower-order correlations functions.
Starting with the study of the three-point correlation function \citep{Vil96,JainSeljak97}
higher order statistics of weak lensing shear, convergence or flexions are now well understood
from a theoretical point of view.
The power spectrum of density perturbations remains the most commonly
used statistic in many cosmological studies. Weak lensing surveys probe the non-linear
regime and are sensitive to non-Gaussianity which can not be
probed using only the two-point correlation function or its harmonic analog the power spectrum.
The statistics of shear or convergence probe the statistics of underlying
mass distribution in an unbiased way \citep{JSW00,MuJai01,Mu00,MuJai00,Valageas00,
VaMuBa05,TakadaWhite03,TakadaJain04}, sensitive to
nonlinear evolution due to gravitational clustering. Various analytical schemes
from perturbative calculations to halo models have been employed to model
the weak lensing statistics \citet{Fry84,Schaeffer84, BerSch92,SzaSza93, SzaSza97, MuBaMeSch99,
MuCoMe99a, MuCoMe99b, MuMeCo99, MuCo00, MuCo02, MuCo03, CooSeth02}).
In addition to studying the statistics in projection they have also been studied in 3D using photometric
redshifts. This approach can further tighten the constraints on e.g. the neutrino mass
as well as the dark energy equation of state \citep{Heav03,HRH00, HKT06, HKV07, Castro05, Kit08}.
Tomographic techniques have also been employed as an intermediate strategy between projected
surveys and 3D mapping \citep{Hu99,TakadaJain04,TakadaJain03,Semboloni08}.
In this paper we extend previous results \citep{JSW00,MuJai01,Mu00,MuJai00,Valageas00}
on projected surveys by analysing the entire one-point PDF and
the two-point PDF with tomographic information.The PDF contains information about
the correlation hierarchy to an arbitrary order; the correlation hierarchy of
the convergence field is directly related to that of the underlying mass distribution.
We employ a generating function formalism that relies on {\em hierarchical ansatz} on
smaller angular smoothing scales and on perturbative results on larger scales.
We define a reduced convergence for each bin and show that the different bins
sample the same underlying PDF and bias functions (to be defined later) for the density contrast.
The entire joint two-point PDFs for different pairs of redshift bins and
individual PDF for each bins can be constructed from the PDF and the bias associated
with individual bins because the joint PDF is factorisable in terms of the individual
PDFs, bias and cross-correlations among various bins and different angular scales.
We will show that individual redshift-resolved tomographic maps can be used
to map out the PDF of the underlying mass distribution for a wide range of
variance. This underlying PDF of the density contrast can be used to
recover the tomographic PDF with the use of just two individual variables $\kappa_{min}$
and the reduced variance for each bin; both of these variables are uniquely determined by the geometry and
matter content of the Universe. The results are applicable not only to the PDFs
as determined under hierarchical ansatz but also for other well motivated approximations
for PDF such as the lognormal distribution.
Recent cosmological observations favour an accelerating Universe. This implies existence of
energy of unknown nature (dark energy) which has negative pressure \citep{Amen10,Wang10}. Current data
continues to be consistent with dark energy being a non-zero cosmological constant.
Though many other alternative dark energy candidates have been consider which are
consistent with data as well, e.g. quinessence, k-essence, spintessence.
Different dark energy models can be classified according to the equation of state of
of the dark energy component $w_{\rm X}$. For quintessence model $dw_{\rm X}/dz>0$ while
for k-essence models $dw_{\rm X}/dz<0$. There are many complimentary probes for dark energy,
the distance-redshift relation of cosmological standard candles;
Cosmic Microwave Background Anisotropy; volume redshift relations using galaxy counts;
the evolution of galaxy clustering; weak lensing, etc. The different methods to probe
dark energy are complementary to other and can provide important consistency check.
Weak lensing surveys are particularly suitable for dark energy studies. All major
weak lensing surveys has dark energy as their one of prime science driver. We
will use the techniques developed in this paper to study two different dark energy
model and compare the predictions against those of standard $\Lambda$CDM model.
The methods presented here are complementary to the usual Fisher matrix based approach
that rely on two-point correlation functions or the power spectrum as it includes
non-Gaussian information upto order .
This paper is organised as follows. In \textsection2 we introduce our notation and present some standard results.
In \textsection3 we link the lower order statistics of weak lensing convergence to that of the underlying density
distribution. In \textsection4 we briefly review the hierarchical ansatz in the context of
generating function formalism. In \textsection6 we discuss the lognormal model in the context of weak lensing
statistics. In \textsection7 we derive the PDF and bias for various tomographic bins. The results
are quite generic and can be used for arbitrary source redshift distribution. Finally the \textsection8
is left for discussion of our results. In an appendix we outline how in the context of tomographic binning
the evolution topological estimators such as Minkowski Functionals can be studied using the lognormal distribution.
\section{Notation}
The statistics of the weak lensing convergence $\kappa_{}({\hat\Omega})$
represents that of the projected density contrast $\delta({\bf x})$ along the line of sight.
In our analysis we will consider a small patch
of the sky where we can use the plane parallel approximation or small
angle approximation to replace the spherical harmonics by Fourier modes.
The 3-dimensional density contrast $\delta$
along the line of sight when projected onto the
sky with the weight function $\omega_{\rm S}(r,r_s)$ gives
the weak lensing convergence in a direction ${\hat\Omega}$ which we have denoted by $\kappa_{}({{\hat\Omega}})$:
\begin{equation}
{\rm Single\;\; Source\;Plane:~~}\kappa_{\rm }({{\hat\Omega}}) = {\int_0^{r_s}} {dr}\;
\omega_{\rm S}(r)\;\delta(r,{{\hat\Omega}}); \quad
\omega_{\rm S}(r,r_s) = {3\over 2} {H_0^2\over c^2}\
\Omega_{\rm M} a^{-1} \ {d_{\rm A}(r) d_{\rm A}(r_s - r)\over d_{\rm A}(r_s)} ; \quad
\kappa_{\rm S}^{\rm min}(r_s) = -\int_0^{r_s}\omega_{\rm S}(r,r_s) dr.
\end{equation}
Here $d_A(r)$ is the angular diameter distance at a comoving
distance $r$. The subscript $_{\rm S}$ in $\omega_{\rm S}(r,r_s)$ refers to a single source plane.
We have also introduced a parameter $\kappa^{\rm min}$ which will be
useful in parametrization the PDF and represents the minimum value of the convergence $\kappa$;
$H_0$ is the Hubble parameter and $a$ represents the scale factor. The comoving radial distance
is denoted by $r$. For a distribution of sources represented by $p_s(z)$ we can write the
projected convergence $\kappa({\hat\Omega})$ as follows:
\begin{equation}
\omega_{\rm S}(r,r_s) = {3 \over 2}{H_0^2 \over c^2} {\Omega_{\rm M}}a^{-1}(r) {1 \over \bar n_g} d_{\rm A}(r)\int_r^{r_{\rm H}} dr_s\;p_s(z) {dz \over dr_s}
{d_{\rm A}(r-r_s)\over d_{{\rm A}}(r_s)}; \quad\quad p_s(z) = \bar n_g {z^2 \over 2 z_0^3} \exp(-z/z_0).
\end{equation}
\noindent
In a tomographic analysis the source population is divided into several redshift bins and each of which is treated
separately. The contribution from the individual bins are taken into account when computing the cumulants or
the cumulants correlators. It is also possible to compute the cross-covariance of these redshift bins.
The convergence $\kappa_{(i)}({\hat\Omega})$ from $i$-th tomographic bin can be expressed as:
\begin{eqnarray}
&& {\rm Tomography:~~} \kappa_{(i)}({\hat\Omega}) = \int_0^{r_{\rm H}} w_{(i)}(r)\delta[r,{\hat\Omega}]; \quad\quad
w_{(i)}(r) = {3 \over 2}{H_0^2 \over c^2} \Omega_{\rm M}{1 \over \bar n_i} a^{-1}(r)\;d_{\rm A}(r)\; \int^{r_{i+1}}_{max\{r,r_i\}}\; dr_s \; p_s(z) {dz \over d r_s}
{d_A(r_s-r) \over d_{\rm A}(r_s)}
\label{eq:omegai}
\end{eqnarray}
\noindent
The ``bin average" of the source population is denoted by $\bar n_i$ and is defined accordingly $\bar n_i = \int_{r_i}^{r_{i+1}} dr_s p_s(z) {dz/dr_s}$.
We will consider different bin sizes and source distributions. To incorporate the photometric redshift error we can write
\begin{equation}
{\rm Photometric\; Redshift\; Errors:~~} w_{(i)}(r) = r\int_{r_i}^{r_{i+1}} dr' \left [ \sum_{h} p_h(z'|z_h)\right ] {\rm F}_{\rm K}(r',r).
\label{eq:omegai_photo}
\end{equation}
Here ${\rm F}_{\rm K} = [S_{\rm K}(r-r')/S_{\rm K}(r)S_{\rm K}(r')]$ with $S_k(r) = \sinh(r), r, \sin(r)$
for open, flat and closed geometries. The probability distribution $p_h$ signifies posterior
probability distribution of redshift given a photometric redshift of $z_h$.
In our calculation we will need to define a new variable $\kappa^{\rm min}$ (or $\kappa^{\rm min}_{(i)}$ for tomographic bins) which will be useful later:
\begin{equation}
\kappa_{\rm S}^{\rm min}(r_s) = \int_0^{r_s}\;dr\; w_{S}(r,r_S); \quad \kappa_{(i)}^{\rm min} = \int_0^{r_{\rm H}}\;dr\; w_{(i)}(r);
\end{equation}
In evaluation of $\kappa_{(i)}^{min}$ we use the following approximate form for the window $w_i(r)$:
\begin{equation}
w_{(i)}(r) \approx \Delta r_s {3 \over 2}{H_0^2 \over c^2}\Omega_{\rm M} {1 \over {\bar n_i}} d_{\rm A}(r) p_s(z(r_i)) \left [{dz \over dr_s} \right ]_{r=r_i}
{d_{{\rm A}}(r_i-r) \over d_{{\rm A}}(r_i)} \approx w(r,r_{(i)})
\end{equation}
Using these results it is easy to see that $\kappa^{min}_{(i)} = \kappa^{min}_{\rm S}(r_i)$.
We will adopt two example survey configurations to make definitive calculation. For DES we will take $z_{0}=0.3$ and for LSST we will take $z_0=0.4$.
The range of source distribution that we consider for each survey is $z_s=0.2-1.6$. The bin-size we take is $\Delta z_s = 0.2$.
The constant $\bar n_g$ is set by imposing the normalized condition $\int_0^{\infty}\; dz\; p_s(z)=1$.
For our purpose we have $\bar n_g = 1.2 \times 10^7 \bar n_g'$ ($n_g$ specifies the galaxy number density per square arc-minutes). We will vary $n_g$ from few galaxies per arcmin$^2$ to tens of galaxies per arcmin$^2$.
The noise power spectrum $C_l^{\rm N}$ in terms of the intrinsic ellipticity $\gamma_i^2=0.1$ is expressed as $C_l^{\rm N} = \gamma_i^2/\bar n_g$.
Next we consider the lower order cumulants for individual bins as well as projected catalogs. These results will be
eventually be useful for the construction of the entire PDF and bias.
The particular cosmology that we will adopt for numerical study are
specified by the following parameter values: $\Omega_\Lambda = 0.741, h=0.72, \Omega_b = 0.044,
\Omega_{\rm CDM} = 0.215, \Omega_{\rm M} = \Omega_b+\Omega_{\rm CDM}, n_s
= 0.964, w_0 = -1, w_a = 0, \sigma_8 = 0.803, \Omega_\nu = 0$.
In addition to the ordinary $\Lambda$CDM model we will also use two dark energy models in our study.
The angular diameter distance for a dark energy dominated comsology with dark energy equation of state $\Omega_{\rm X}$
can written as:
\begin{equation}
d_{\rm A}(z) = cH_0^{-1}\int_0^z\; dz'\; [ \Omega_{\rm M}(1+z')^3 + \Omega_{\rm K}(1+z')^2 + \Omega_{\rm X} f(z)]^{-1/2}
\end{equation}
Here $\Omega_{\rm X}$ denotes the dark energy component and $\Omega_{\rm K} = 1-\Omega_{\rm M} -\Omega_{\rm X}$. The function $f(z)$ parametrizes
the time-dependence of the dark energy density and $f(z=0)=1$. For dark energy with
constant equation of state $w_{\rm X} = p_{\rm X}/\rho_{\rm X}$ we have $f(z) = (1+z)^{3(1+\omega_X)}$. The $\Lambda$CDM is
a limiting case when $w_X=-1$ or $f(z)=1$. In general for an arbitrary dark energy equation of state
can be represented as \citep{WaGa01} $w_{\rm X}(z) = {1 /3}(1+z)f'(z)/f(z)-1$. The popular parametrization is given
by $w_{\rm X}(z)=w_0 + w_1 z/(1+z)$. We will consider two different models: (i) constant equation of state $w_0=-0.95$; and
(ii) with evolving equation of state $w_{\rm X}(z)=-1 + {z/(1+ z)}$.
For the computation of the power spectrum we use the scaling ansatz of \citep{PD94}. The ansatz
consists of postulating a non-local mapping $4\pi k^3P(k) = f_{nl}[4\pi\;k_l^3 P_l(k_l)]$
of linear power spectrum $P_l(k_l)$ at a wavenumber $k_l$ to nonlinear power spectrum $P(k)$
to another wave number $k$. The wave numbers $k$ and $k_l$ are related by an implicit relation $k_l=(1+4\pi k^3 P(k))^{-1/3}k$.
The functional form for $f_{nl}$ is determined from numerical simulations (see \textsection\ref{sec:logn} for more related discussions).
The evolution of the linear power spectrum in a dark energy dominated model can be charetcerised using a function $g(z)$
i.e. $P_l(k,z)= [g(z)/(1+z)]^2 P_l(k,z=0)$. Where $g(z)$ can be expressed as:
\begin{equation}
g(z) = {5 \over 2}\Omega_{\rm M}(1+z)E(z)\int_z^{\infty} dz' {1+z' \over [E(z')]^3}; \quad\quad
E(z) = \sqrt{ \Omega_{\rm M}(1+z)^3 + \Omega_{\rm K}(1+z)^2 + \Omega_{\rm X} f(z)}.
\end{equation}
We will use these expressions to compute the variance of smoothed convergence field $\kappa(\theta_0)$ as a function of source redshift
and smoothing radius $\theta_0$. We will use top-hat smoothing window $W_{\rm TH}(l\theta_0)$ for our study.
In Figure (\ref{fig:kmin}) we plot the parameter $\kappa_{\rm min}$ as a function of redshift for different cosmologies (left panel).We also show the number distribution of source galaxies (right panel).
\section{Lower Order Statistics of Tomographic Convergence Maps}
\label{lower}
Using these definitions we can compute the
projected two-point correlation
function in terms of the dark matter power spectrum
$P_{\delta}(k,r)$ (Peebles 1980, Kaiser 1992):
\begin{equation}
\langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2) \rangle_c = {\int_0^{r_s}} d {r}
{\omega_{(i)}(r){\omega_{(j)}(r)} \over d^2_A(r)} \int {d^2 {\bf l} \over (2
\pi)^2}~\exp ( i{\bf \theta}_{12} \cdot {\bf l} )~ {\rm P}_{\delta} { \left [ {l\over d_A(r)}, r \right ]}W^2_{\rm TH}(l\theta_0).
\end{equation}
\noindent
Here $\theta_{12}$ is the angular separation projected onto the surface of the sky
($\cos|\theta_{12}|=\hat\Omega_1\cdot\hat\Omega_2$) and we have also introduced ${\bf l} = d_A(r){\bf
k}_{\perp}$ to denote the scaled projected wave vector; $\omega_i(r)$ are the weak lensing projection weights for the
ith photometric bins defined in Eq.(\ref{eq:omegai}); to include photometric redshift errors we simply need to use
$\omega_i(r)$ defined in Eq.(\ref{eq:omegai_photo}).
Using Limber's approximation \citep{Limb54} the variance of $\kappa_{(i)}(\theta_0)$
smoothed using a top-hat window $W_{\rm TH}(\theta_0)$ with a radius $\theta_0$ can be written as:
\begin{equation}
\langle \kappa_{(i)}^2 \rangle = {\int_0^{r_s}} d {r}
{\omega_{(i)}^2(r) \over d_A^2(r)} \int {d^2 {\bf l} \over (2
\pi)^2}~ {\rm P}_{\delta} { \Big ( {l\over d_{\rm A}(r)}, r \Big )} W_{\rm TH}^2(l\theta_0).
\label{kappa_variance}
\end{equation}
The variance is plotted for different redshift bins in Figure (\ref{fig:var}).
Similarly the higher order moments of the smoothed temperature
field relate $\langle \kappa^p_{} ({\theta_0}) \rangle$ to the
3-dimensional multi-spectra of the
underlying dark matter distribution $B_p$ (Hui 1999, Munshi \& Coles 2000):
\begin{eqnarray}
&& \langle \kappa_{(i)}^3 \rangle_c = {\int_0^{r_s}} dr
{\omega_{(i)}^3(r) \over d_{\rm A}^6(r)} \int {d^2 {\bf l}_1 \over (2\pi)^2}
W_{\rm TH}(l_1\theta_0) \int {d^2{\bf
l_2}\over (2\pi)^2} W_{\rm TH}(l_2\theta_0) \int {d^2 {\bf l}_3 \over
(2\pi)^3} W_{\rm TH}(l_3\theta_0) ~ {\rm B}_{\delta} \Big ( {l_1\over d_A(r)},
{l_2\over d_A(r)}, {l_3\over d_{\rm A}(r)}, r \Big )_{\sum {\bf l}_i = 0} \\
&& \langle \kappa_{(i)}^4 \rangle_c = {\int_0^{r_s}} d {r}
{\omega^2_{(i)}(r) \omega^2_{(j)}(r) \over d_{\rm A}^8(r)} \int {d^2 {\bf l}_1 \over (2\pi)^2}
{\rm W}_{\rm TH}(l_1\theta_0) \int {d^2{\bf
l_2}\over (2\pi)^2} {\rm W}_{\rm TH}(l_2\theta_0) \int {d^2 {\bf l}_3 \over
(2\pi)^2} {\rm W}_{\rm TH}(l_3\theta_0)\int {d^2 {\bf l}_4 \over (2 \pi)^2} {\rm W}_{\rm TH}(l_4\theta_0)
{\nonumber} \\ &&\quad\quad\quad\quad\quad\quad ~ \times{\rm T}_{\delta} \Big ( {l_1\over d_A(r)},
{l_2\over d_A(r)}, {l_3\over d_{\rm A}(r)}, {l_4\over d_{\rm A}(r)}, r \Big )_{\sum {\bf l}_i = 0}.
\end{eqnarray}
\noindent
The subscripts $\sum {\bf l}_i = 0$ represent the delta function $\delta_{\rm D}(\sum l_i)$.
We will use these results to show that it is possible
to compute the complete probability distribution function
of $\kappa_{(i)}$ from the underlying dark matter probability
distribution function. Details of the analytical results presented
here can be found in \citep{MuCo00}.
A similar analysis for the higher order cumulant correlators of the
smoothed convergence field relating $\kappa_{(i)}^p ({\hat\Omega}_1) \kappa_{(j)}^q ({\hat\Omega}_2) \rangle_c$ with
multi-spectra of underlying dark matter distribution $B_{p+q}$ can be expressed as \citep{SzaSza97,MuCo00,MuCo02}:
\begin{eqnarray}
&& \langle \kappa_{(i)}^2({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2)
\rangle_c =
\int_0^{r_s} { \omega_{(i)}^2 (r)\omega_{(j)}(r) \over d_{\rm A}^6(r) } dr \int
\frac{d^2{\bf l}_1}{(2\pi)^2} \int \frac{d^2{\bf l}_2}{(2\pi)^2} \int \frac{d^2{\bf l}_3}{(2\pi)^2} {\rm W}_{\rm TH}(
l_1\theta_0) {\rm W}_{\rm TH}(l_2\theta_0) {\rm W}_{\rm TH}(l_3\theta_0) {\nonumber} \\ && \quad\quad\quad\quad \quad\quad\quad\quad \times\exp(i
{\bf \theta}_{12} \cdot {\bf l}_3){\rm B}_{\delta} \Big ( {l_1\over d_{\rm A}(r)},
{l_2\over d_{\rm A}(r)}, {l_3\over d_{\rm A}(r)}, r \Big )_{\sum {\bf l}_i = 0}.
\end{eqnarray}
We will use and extend these results in this paper to show that it is possible
to compute the whole bias function
$b(>\kappa_{})$, i.e. the bias associated with those spots in
convergence map which $\kappa_{}$ is above certain threshold (which acts as a generating function for these
cumulant correlators) from the statistics of underlying over-dense dark objects \citep{MuCoMe99a,MuCoMe99b}.
\section{Hierarchical {\em Ansatze}}
The spatial length scales corresponding to
small angles are in the highly non-linear regime of gravitational clustering. Assuming a "tree" model
for the matter correlation
hierarchy in the highly non-linear regime, one can write the
general form of the $N$th order correlation function $\xi^{(p)}_{\delta}$ as
(Peebles 1980, Bernardeau \& Schaeffer 1992, Szapudi \& Szalay 1993):
\begin{eqnarray}
&& \xi^{(3)}_{\delta}( {\bf r}_1,{\bf r}_2, {\bf r}_3)= Q_3(\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_2)\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_3)+ {\rm cyc. perm.} );\\
&& \xi^{(4)}_{\delta}( {\bf r}_1,\cdots, {\bf r}_4)=
R_a(\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_2)\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_3)\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_4)
+ {\rm cyc. perm.}) + R_b(\xi_{\delta}^{(2)}({\bf r}_1,{\bf r}_2)\xi_{\delta}^{2}({\bf r}_2,{\bf r}_3)\xi_{\delta}^{(2)}({\bf r}_3,{\bf r}_4)
+ {\rm cyc. perm.}).
\end{eqnarray}
In general for correlation functions of arbitrary order are constructed by taking a sum over all possible {\em topologies} with
respective amplitudes parameters $Q_{N,\alpha}$, which in general will be different:
\begin{equation}
\xi^{({\rm p})}_{\delta}( {\bf r}_1, \dots {\bf r}_{\rm p} ) = \sum_{\alpha, \rm p-trees}
Q_{p,\alpha} \sum_{\rm labellings} \prod_{\rm edges}^{(\rm p-1)}
\xi^{(2)}_{\delta}({\bf r}_i, {\bf r}_j) .
\end{equation}
To simplify the notation we will use $\xi^{(2)}_{\delta}(r_1,r_2) \equiv \xi_{12}$ and $\bar\xi_2$ for its
volume average over a volume $v$. It is interesting to note that a similar hierarchy
develops in the quasi-linear regime in the limit of vanishing variance
(Bernardeau 1992); however the hierarchical amplitudes $Q_{p, \alpha}$
become shape dependent functions in the quasilinear regime. In the
highly nonlinear
regime there are some indications that these functions become
independent of shape, as suggested by studies of the
lowest order parameter $Q_3 = Q$ using high resolution numerical
simulations (Sccocimarro et al. 1998). In Fourier space such an
ansatz means that the hierarchy of multi-spectra
can be written as sums of products of the matter power-spectrum:
\begin{eqnarray}
&&{\rm B}^{(3)}_{\delta}({\bf k}_1, {\bf k}_2, {\bf k}_3)_{\sum k_i = 0} = Q_3 ( P_{\delta}({\bf
k}_1)P_{\delta}({\bf k}_2) + P_{\delta}({\bf k}_2)P_{\delta}({\bf k}_3)
+ P_{\delta}({\bf k}_3)P_{\delta}({\bf k}_1) ) ;\\
&&{\rm B}^{(4)}_{\delta}({\bf k}_1, {\bf k}_2, {\bf k}_3, {\bf k}_4)_{\sum k_i = 0} = R_a
\ P_{\delta}({\bf k}_1)P_{\delta}({\bf k}_1 +
{\bf k}_2) P_{\delta}({\bf k}_1 + {\bf k}_2 + {\bf k}_3) + {\rm cyc. perm.} + R_b(\ P_{\delta}(
{\bf k}_1)P_{\delta}({\bf k}_2)P_{\delta}({\bf k}_3) +
{\rm cyc. perm.}
\end{eqnarray}
In general for p-the order poly-spectra $B^{(p)}_{\delta}( {\bf k_1}, \dots {\bf k_p} )$ we can write:
\begin{equation}
{\rm B}^{({\rm p})}_{\delta}( {\bf k}_1, \dots {\bf k}_{\rm p} ) = \sum_{\alpha, \rm p-trees}
Q_{{\rm p},\alpha} \sum_{\rm labellings} \prod_{\rm edges}^{(p-1)}
P^{}_{\delta}({\bf k}_i, {\bf k}_j) .
\end{equation}
Different hierarchical models differ in the way they predict the
amplitudes of different tree topologies. Bernardeau \&
Schaeffer (1992) considered the case where amplitudes in general are
factorisable, at each order one has a new ``star'' amplitude
and higher order ``snake'' and ``hybrid'' amplitudes can
be constructed from lower order ``star'' amplitudes (see Munshi,
Melott \& Coles 1999a,b,c for a detailed description). In models proposed by
Szapudi \& Szalay (1993) it was assumed that all hierarchical amplitudes of any
given order are degenerate. Galaxy surveys
have been used to study these {\em ansatze}. Our goal here is to
show that weak-lensing surveys can also provide valuable information
in this direction, in addition to constraining the matter power-spectra and
background geometry of the universe. We will use the model proposed by
Bernardeau \& Schaeffer (1992) and its generalization to the
quasi-linear regime by Bernardeau (1992, 1994) to construct the PDF
of the weak lensing field $\kappa_{(i)}$. We express
the one-point cumulants as:
\begin{equation}
\langle \kappa_{(i)}^3\rangle_c = (3Q_3){\cal C}^{(i)}_3[\kappa^2_{\theta_0}]
= S_3^{(i)} \langle \kappa_{(i)}^2 \rangle_c^2 \label {hui} \quad\quad
\langle \kappa_{(i)}^4\rangle_c = (12R_a + 4
R_b){\cal C}^{(i)}_4[\kappa^3_{\theta_0}] = S_4^{(i)} \langle \kappa_{(i)}^2\rangle_c^3,
\end{equation}
where we have introduced the following notation:
\begin{eqnarray}
&& {\cal C}^{ij}_{p+q}[[{\cal J}_{\theta_0}(r)]^{p+q-2} [{\cal J}_{\theta_{12}}(r)] ] =
\int_0^{r_s} { \omega_{(i)}^{p}(r)\omega_{(j)}^q(r) \over
d_A^{2(p+q-1)}(r)}[{\cal J}_{\theta_0}(r)]^{p+q-2}[{\cal J}_{\theta_{12}}(r)] dr; \\
&& [{\cal J}_{\theta_0}(r)] \equiv \int
\frac{d^2\bf l}{(2\pi)^2} P_{\delta} \left( {l \over d_{\rm A}(r)} \right)
{\rm W}_{\rm TH}^2(l\theta_0).\quad\quad
[{\cal J}_{\theta_{12}}(r)] \equiv \int
\frac{d^2\bf l}{(2\pi)^2} P_{\delta} \left( {l \over d_{\rm A}(r)} \right)
{\rm W}_{\rm TH}^2(l\theta_0) \exp ( {\bf l} \cdot {\bf \theta_{12}}).
\end{eqnarray}
The normalised cumulants for convergence in the i-th bin are denoted by (skewness) $S_3^{(i)}$ and (kurtosis) $S_4^{(i)}$
and are plotted in Figure (\ref{fig:sn}).
Eq.({\ref {hui}}) was derived by Hui (1998) in the context of weak lensing surveys. He
showed that his result agrees well with the ray tracing
simulations of Jain, Seljak and White (1998). Later studies
extended this result to the entire family of two-point statistics such as cumulant correlators
(Munshi \& Coles 1999, Munshi \& Jain 1999).
\begin{eqnarray}
\langle \kappa_{(i)}^2({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2) \rangle_c & = &
2Q_3 {\cal C}_3^{(ij)} [{\cal J}_{\theta_0}(r) {\cal J}_{\theta_{12}}(r)] =
C_{21}^{\eta}{\cal C}_3^{(ij)} [{\cal J}_{\theta_0}(r) {\cal J}_{\theta_{12}}(r)] \equiv C_{21}^{(ij)} \langle
\kappa_{(i)}^2 \rangle_c \langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2) \rangle_c, \\
\langle \kappa_{(i)}^3({\hat\Omega}_1) \kappa_{(j)}( {\hat\Omega}_2) \rangle_c & = &
(3R_a + 6 R_b){\cal C}_4^{(ij)} [{\cal J}_{\theta_0}(r) {\cal J}_{\theta_{12}}(r)] =
C_{31}^{\eta}{\cal C}_4^{(ij)} [{\cal J}_{\theta_0}(r)^2 {\cal J}_{\theta_{12}}(r)]
\equiv C_{(31)}^{(ij)} \langle
\kappa_{(i)}^2 \rangle_c^2 \langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{j}({\hat\Omega}_2) \rangle_c
,\\
\langle \kappa_{(i)}^2({\hat\Omega}_1) \kappa_{(j)}^2({\hat\Omega}_2) \rangle_c & =
& 4 R_b{\cal C}_4^{(ij)} [{\cal J}_{\theta_0}(r)^2 {\cal J}_{\theta_{12}}(r)]
= C_{(22)}^{\eta}{\cal C}_4^{(ij)} [{\cal J}_{\theta_0}(r)^2 {\cal J}_{\theta_{12}}(r)]
\equiv C_{22}^{(ij)} \langle
\kappa_{(i)}^2 \rangle_c \langle \kappa_{(j)}^2 \rangle_c\langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2) \rangle_c ,\\
\langle \kappa_{(i)}^4({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2)\rangle_c & = &
(24S_a + 36S_b + 4 S_c){\cal C}_5^{(ij)} [{\cal J}_{\theta_0}(r)^3
{\cal J}_{\theta_{12}}(r)] =
C_{41}^{\eta} {\cal C}_5^{(ij)} [{\cal J}_{\theta_0}(r)^3 {\cal J}_{\theta_{12}}(r)]
\equiv C_{41}^{(ij)} \langle
\kappa_{(i)}^2 \rangle_c^3 \langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2)
\rangle_c
,\\ \langle \kappa_{(i)}^3({\hat\Omega}_1) \kappa_{(j)}^2({\hat\Omega}_2) \rangle_c & = &
(12S_a + 6 S_b){\cal C}_5^{(ij)}[{\cal J}_{\theta_0}(r)^3 {\cal J}_{\theta_{12}}(r)] =
C_{32}^{\eta}{\cal C}_5[{\cal J}_{\theta_0}(r)^3 {\cal J}_{\theta_{12}}(r)]
\equiv C_{32}^{(ij)} \langle
\kappa_{(i)}^2 \rangle_c^2 \langle
\kappa_{(j)}^2 \rangle_c \langle \kappa_{(i)}({\hat\Omega}_1) \kappa_{(j)}({\hat\Omega}_2)
\rangle_c.
\end{eqnarray}
where $C_{pq}^{\eta}$ denotes the cumulant correlators for the
underlying mass distribution. These results essentially employ the small angle approximation or Limber's approximation \citep{Limb54}
that are routinely used in computation of higher order cumulants in many cosmological contexts.
Other approximations such as the Born approximation that we use have been verified by testing
against simulations.
In a related, but slightly different context, these lower order statistics can also be helpful in probing the pressure bias
as a function of scale for the study of thermal Sunyaev-Zel'dovich (tSZ) effect or its
cross-correlation
against tomographic weak lensing maps \citep{Mu11b}. The thermal Sunyaev-Zel'dovich effect probes the line-of-sight
integral of electronic pressure fluctuations. Cross-correlating (frequency-cleaned) $y$ maps
from ongoing CMB experiments such as Planck \footnote{http://www.rssd.esa.int/index.php?project=SP}\citep{PC06} against
the weak lensing tomographic maps can provide a redshift resolved picture of reionization
history of the Universe. The signal-to-noise will however decrease with increasing order
of these statistics. Indeed the study of PDF or bias that we undertake next will essentially
combine information from all orders.
In Figure (\ref{fig:var}) and Figure (\ref{fig:sn}) we have plotted the variance and the lower order $S_p$ parameters
respectively as a function of smoothing scale $\theta_0$.
\begin{figure}
\begin{center}
{\epsfxsize=10 cm \epsfysize=5 cm
{\epsfbox[27 440 584 709]{kmin.eps}}}
\end{center}
\caption{The parameter $\kappa_{\rm min}$ is plotted as a function of redshift $z_s$ in the left panel for various
background cosmologies. The right panel shows the source density distribution (not normalised) for the two different surveys. The
lines along the x-axis denotes the positions of the tomographic bins considered in our analysis.Notice that
the parameter $k_{\rm min}$ do not depend on smoothing angular scales and only depend on the depth of the survey
as well as on redshift distribution of source population. We consider two different dark energy models along with
$\Lambda$CDM cosmology. The curve qCDM correspond to $w_0=-0.95$ and the model w0w1 correspond to an evolving dark
energy model $w(x)=-1 + z/(1+z)$.}
\label{fig:kmin}
\end{figure}
\section{The Generating Function}
\label{gen}
In a scaling analysis of the count probability distribution function (CPDF) the
void probability distribution function (VPF) plays a fundamental
role. It can be related to the generating function of the cumulants
or $S_p$ parameters, $\phi(y)$ (White 1979, Balian \& Schaeffer 1989) :
\begin{equation}
P_v(0) = \exp ( -\bar N \sigma(N_c) ) = \exp \Big ( - { \phi (N_c) \over
\bar \xi_2} \Big ).
\label{eq:scale}
\end{equation}
\noindent
Where $P_v(0)$ is the probability of having no ``particles'' in a cell of
of volume $v$, $\bar N$ is the average occupancy of these ``cells'', and
$N_c = \bar N {\bar \xi}_2$.
All statistical quantities correspond to underlying density contrast $\delta$. The VPF $P_v(0)$ is a special case of the count probability
distribution function (CPDF) $P_v(N)$. The VPF $P_v(0)$ contains information of the entire CPDF $P_v(N)$
The VPF is meaningful only for a discrete
distribution of particles and can't be defined for smooth density
fields such as $\delta$ or $\kappa(\theta_0)$. However the scaling
functions $\sigma(y)$ and $\phi(y)$, defined above in Eq.(\ref{eq:scale}),
$\sigma(y) = -{\phi(y)/ y}$, are very useful even for
continuous distributions where they can be used as a generating
function of one-point cumulants or $S_p$ parameters: $\phi(y) = \sum_{p=1}^{\infty} { S^{\delta}_p/p! } y^p$.
The function $\phi(y)$ satisfies the constraint $S^{\delta}_1 = S^{\delta}_2 = 1$
necessary for proper normalization of PDF. The other generating function
which plays a very important role in such analysis is the generating
function for the vertex amplitudes $\nu_n$ associated with nodes appearing in the
tree representation of higher order correlation hierarchy ($Q_3 =
\nu_2$, $R_a = \nu_2^2$ and $R_b = \nu_3$). In practice it is possible
to work with a perturbative expansion of the vertex generating function
${\cal G}(\tau)$. In terms of the vertices this is defined as:
${\cal G}(\tau) = \sum_{n=0}^{\infty} (-1)^{n} { \nu_n/ n! }$.
However in the highly nonlinear regime a closed form is used.
A more specific model for ${\cal G}(\tau)$, which
is useful to make more specific predictions (Bernardeau \& Schaeffer
1979) is given by ${\cal G}(\tau) = \Big ( 1 + {\tau / \kappa_a} \Big )^{-\kappa_a}$.
We will relate $\kappa_a$ with other parameters of scaling models.
While the definition of VPF does not involve any specific form of
hierarchical {\em ansatz} it is to realize that writing the tree
amplitudes in terms of the weights associated with nodes is only
possible when one assumes a factorisable model of the tree hierarchy
(Bernardeau \& Schaeffer 1992) and other possibilities which do not
violate the tree models are indeed possible too (Bernardeau \&
Schaeffer 1999). The generating functions for tree nodes can be
related to the VPF by solving a pair of implicit equations
(Balian \& Schaeffer 1989),
\begin{equation}
\phi(y) = y {\cal G}(\tau) - { 1 \over 2} y {\tau} { d \over d
\tau} {\cal G}(\tau); \quad\quad \tau = -y { d \over d\tau} {\cal G}(\tau).
\end{equation}
The above description has been limited to the level of constructing
one-point PDF. A more detailed analysis is needed to include the effect of
correlation between two or more correlated volume element which will
provide information about bias and cumulant correlators. The bias $b(\delta)$
can be introduced the following expression for the joint or two-point PDF:
\begin{equation}
p(\delta_1,\delta_2)d\delta_1d\delta_2=p(\delta_1)p(\delta_2)(1+ b(\delta_1)\xi_{12}b(\delta_2))d\delta_1d\delta_2
\end{equation}
The function $\tau(y)$ - sometimes denoted by $\beta(y)$ in the literature -
plays the role of a generating function for the
factorized cumulant correlators $C^{\eta}_{p1}$ ($C^{\eta}_{pq} = C^{\eta}_{p1}C^{\eta}_{q1}$) \citep{BerSch92,B92,B94}:
$\tau(y) = \sum_{p=1}^{\infty} y^p {C^{\eta}_{p1}/p!}$.
We will next consider two different regimes; the quasilinear regime valid at large angular scales
and the highly nonlinear regime valid at smaller angular scales.
\begin{figure}
\begin{center}
{\epsfxsize=6 cm \epsfysize=6 cm
{\epsfbox[27 426 316 709]{variance.eps}}}
\end{center}
\caption{The plots shows the variance in convergence $\langle \kappa^2(\theta_0)\rangle_c$ as a function of
smoothing angular scales $\theta_0$. A top-hat window has been assumed. The curves from top to bottom
correspond to various tomographic bins. The redshift bins correspond to $\Delta z_s =0.2$ and covers a range $z_s=0.2-1.4$.The curve qCDM correspond to $w_0=-0.95$.
The higher curves correspond to the deeper redshift bins. See text for more details.}
\label{fig:var}
\end{figure}
\subsection {The Highly Non-linear Regime}
\label{subsec:non}bb
The PDF $p(\delta)$ and bias $b(\delta)$ can be related to their
generating functions VPF $\phi(y)$ and $\tau(y)$ respectively
by following equations (Balian \& Schaeffer 1989, Bernardeau \&
Schaeffer 1992, Bernardeau \& Schaeffer 1999),
\begin{equation}
p(\delta) = \int_{-i\infty}^{i\infty} { dy \over 2 \pi i} \exp \Big [ {(
1 + \delta )y - \phi(y) \over \bar \xi_2} \Big ]; \quad
b(\delta) p(\delta) = \int_{-i\infty}^{i\infty} { dy \over 2 \pi i} \tau(y) \exp \Big [ {(
1 + \delta )y - \phi(y) \over \bar \xi_2} \Big ] \label{ber}.
\end{equation}
The function $\phi(y)$ ($\tau(y)$) plays an important role in any calculation involving hierarchical
ansatz because it completely determines
the behavior of the PDF $p(\delta)$ (bias $b(\delta)$ ) for all values of $\delta$. The
different asymptotic expressions of $\phi(y)$ govern the behavior
of $p(\delta)$ for different intervals of $\delta$. For large $y$ we
can express $\phi(y)$ as: $\phi(y) = a y^{ 1 - \omega}$.
No theoretical analysis has been done so far to link the newly introduced parameter $\omega$ and the initial power spectral index $n$. In the highly nonlinear regime, numerical simulations are generally
used to fix $\omega$ for a specific initial condition.
(Colombi et. al. (1992, 1994, 1995), \cite{MuBaMeSch99}). Typically for a power law initial power spectrum
with pictorial index $n=-2$ one obtains $\omega=0.3$. The VPF
$\phi(y)$ and its two-point analog $\tau(y)$
both exhibit singularity for small but negative value of $y_s$,
\begin{equation}
\phi(y) = \phi_s - a_s \Gamma(\omega_s) ( y - y_s)^{-\omega_s}; \quad\quad
\tau(y) = \tau_s - b_s ( y - y_s )^{-\omega_s - 1}.
\end{equation}
For the factorisable model of the hierarchical clustering the
parameter $\omega_s$
takes the value $-3/2$ and $a_s$ and $b_s$ can be expressed in terms
of the nature of the generating function ${\cal G}(\tau)$ and its
derivatives near the singularity $\tau_s$
(Bernardeau \& Schaeffer 1992):
\begin{equation}
a_s = {1 \over \Gamma(-1/2)}{\cal G}'(\tau_s) {\cal G}''(\tau_s) \left [
{ 2 {\cal G}'(\tau_s) {\cal G}''(\tau_s) \over {\cal G}'''(\tau_s)}
\right ]^{3/2}; \quad\quad
b_s = \left [
{ 2 {\cal G}'(\tau_s) {\cal G}''(\tau_s) \over {\cal G}'''(\tau_s)}
\right ]^{1/2}.
\end{equation}
\begin{figure}
\begin{center}
{\epsfxsize=6 cm \epsfysize=6 cm
{\epsfbox[27 426 316 709]{sn.eps}}}
\end{center}
\caption{The skewness parameter $S^{\kappa}_3$ and the kurtosis parameter $S^{\kappa}_4$ is plotted for different redshift bins.
Three different cosmologies are displayed as before.}
\label{fig:sn}
\end{figure}
As mentioned before the parameter $k_a$ which we have introduced in
the definition of
${\cal G}(\tau)$ can be related to the parameters $a$ and $\omega$ appearing
in the asymptotic expressions of $\phi(y)$ (Balian \& Schaeffer 1989,
Bernardeau \& Schaeffer 1992),
\begin{equation}
\omega = k_a / ( k_a + 2),\label{ka}; \quad\quad
a = {k_a + 2 \over 2} k_a^{ k_a / k_a + 2}.
\end{equation}
Similarly the parameter $y_s$ which describes the behavior
of the function $\phi(y)$ near its singularity can be
related to the behavior of
${\cal G(\tau)}$ near $\tau_s$ which is the solution of the equation
(Balian \& Schaeffer 1989, Bernardeau \& Schaeffer 1992),
$\tau_s = {{\cal G}'(\tau_s)/ {\cal G}''(\tau_s) }$,
finally we can relate $k_a$ to $y_s$ by following expression (see eq. (\ref{ka})):
$y_s = - { \tau_s / {\cal G}'(\tau_s)}$, or we can write:
\begin{equation}
-{ 1 \over y_s} = x_{\star} = {1 \over k_a } { (k_a + 2)^{k_a + 2} \over (k_a + 1)^{k_a+1}}.
\end{equation}
The newly introduced variable $x_\star$ will be useful to define the
large $\delta$ tail of the PDF $p(\delta)$ and the bias $b(\delta)$.
The asymptotes of $\phi(y)$
are linked with the behavior of $p(\delta)$ for various regimes of
$\delta$. For very large values of the variance $\bar\xi_2$
it is possible to define a scaling function $p(\delta) = { h(x)/\bar xi_2^2
} $ which will encode
the scaling behavior of the PDF, where $x$ plays the role of the scaling
variable and is defined as $x={(1 + \delta)}/\bar \xi_2$. We list below
different ranges of $\delta$ and specify the behavior of $p(\delta)$
and $b(\delta)$ in these regimes (Balian \& Schaeffer 1989).
\begin{equation}
{\bar \xi }^{ - \omega/( 1 - \omega)} \gg 1 + \delta \gg \bar \xi;
~~~~~~
p(\delta) = { a \over \bar \xi_2^2} { 1- \omega \over \Gamma(\omega)}
\Big ( { 1 + \delta \over \xi_2 } \Big )^{\omega - 2}; ~~~~~
b(\delta) = \left ( {\omega \over 2a } \right )^{1/2} { \Gamma
(\omega) \over \Gamma [ { 1\over 2} ( 1 + \omega ) ] } \left( { 1 +
\delta \over \bar \xi_2} \right)^{(1 - \omega)/2}
\end{equation}
\begin{equation}
1+ \delta \gg {\bar \xi}_2; ~~~~
p(\delta) = { a_s \over \bar \xi_2^2 } \Big ( { 1 + \delta \over \bar
\xi_2} \Big ) \exp \Big ( - { 1 + \delta \over x_{\star} \bar \xi_2}
\Big ); ~~~~~ b(\delta) = -{ 1 \over {\cal G}'(\tau_s)} {(1 + \delta)
\over { {\bar \xi}_2}}
\label{eq:bias}
\end{equation}
\begin{figure}
\begin{center}
{\epsfxsize=10. cm \epsfysize=5. cm
{\epsfbox[27 426 590 709]{error.eps}}}
\end{center}
\caption{The left panel shows the $1\sigma$ error in estimation of the variance $\langle\kappa^2\rangle_{\rm c}$ and
the right panel shows the error of estimation in the skewness paramter $S_3$ as a function of tomographic redshift. The angular scale
is fixed at $\theta_s=5'$. Various curves correspond to a choice of
the intrinsic ellipticity distribution of galaxies $\sigma_{\epsilon}$
and number of galaxies $n_{g}$ (=number of galaxies/arcmin$^2$) are as depicted. The solid lines are the difference between the $\Lambda$CDM model and the
qCDM model. The scatter is computed using the formalism developed in \citep{VaMuBa05}.}
\label{fig:sn_err}
\end{figure}
The integral constraints satisfied by scaling function $h(x)$ are
$ S^{\eta}_1 = \int_0^{\infty} x h(x) dx = 1$ and
$ S^{\eta}_2 = \int_0^{\infty} x^2 h
(x) dx = 1$. These take care of
normalization of the function $p(\delta)$. Similarly the
normalization constraint over $b(\delta)$ or equivalently $b(x)$ can be expressed as
$C^{\eta}_{11} = \int_0^{\infty} x b(x)h(x)dx = 1$, which translates into
$\int_{-1}^{\infty} d\delta b(\delta)p(\delta) = 0$ and
$\int_{-1}^{\infty} d\delta b(\delta)p(\delta) = 1$.
Several numerical
studies have produced the behavior of $h(x)$ and $b(x)$
for different initial conditions (e.g. Colombi et al. 1992,1994,1995; Munshi et
al. 1999, Valageas et al. 1999). For very small values of $\delta$ the behavior of
$p(\delta)$ is determined by the asymptotic behavior of $\phi(y)$
for large values of $y$, and it is possible to define another scaling function
$g(z)$ which is completely determined by
$\omega$, the scaling parameter can be expressed as $z = (1+
\delta)a^{-1/(1-\omega)}{\bar \xi}_2^{\omega /(1 - \omega)}$.
However numerically it is much easier to determine $\omega$
from the study of $\sigma(y)$ compared to the study of $g(z)$
(e.g. Bouchet \& Hernquist 1992).
\begin{equation}
1 + \delta \ll \bar \xi_2;~~~~
p(\delta) = a^{ -1/(1 - \omega)} [{\bar \xi}_2]^{ \omega/(1 -
\omega) } \sqrt { ( 1 - \omega )^{ 1/\omega } \over 2 \pi \omega z^{(1
+ \omega)/ \omega } } \exp \Big [ - \omega \Big ( {z \over 1 - \omega}
\Big )^{- {{1 - \omega} \over \omega}} \Big ]; ~~~~~~~b(\delta) = -
\left ( {2 \omega \over \bar{ \xi}_2} \right )^{1/2} \left ({ 1 -
\omega \over z} \right )^{(1 - \omega)/2 \omega}
\end{equation}
To summarize, the entire behaviour of the PDF
$p(\delta)$ and bias $b(\delta)$ are
encoded in two different scaling functions, $h(x)$ and $g(z)$.
These scaling functions are relevant for small and large $\delta$ behavior
of the function $p(\delta)$ and $b(\delta)$. Typically the PDF
$p(\delta)$ shows a cutoff at
both large and small values of $\delta$ and it exhibits a
power-law in the middle. The power law behavior is prominent in highly non-linear
regime. With the decrease in $\bar \xi_2$ the range of $\delta$
for which $p(\delta)$ shows such a power law behavior decreases
finally to vanish for the case of very small variance i.e. in the
quasi-linear regime. Similarly the bias is a very small and slowly
varying function for moderately over dense objects but increases
rapidly for over-dense objects. These deductions are in qualitative agreement
with results from the halo model based approaches.
\subsection{The Quasi-linear Regime}
\label{subsec:quasi}
The Generating function formalism was used by \citep{B92,B94} in the quasilinear
regime. Unlike highly nonlinear regime the quasilinear regime can be dealt with
using perturbative analysis. This particular analysis assumes the variance
of the smoothed density contrast is smaller than unity. The generating function
formalism was use to construct the PDF and bias using the tree-level
perturbation theory to arbitrary order. In this regime the scaling parameters
$\omega$ and $k_a$ can be expressed in terms of the initial power spectral index $n$.
In general the numerical values of the parameters $k_a$ or $\omega$ characterizing VPF
or CPDF are different from there highly non-linear values.
The PDF and bias now can be expressed in terms of $G_{\delta}(\tau)$
(Bernardeau 1992; Bernardeau 1994):
\begin{eqnarray}
&&p(\delta)d \delta = { 1 \over -{\cal G}_{\delta}'(\tau) } \Big [ { 1 - \tau {\cal G}_{\delta}''(\tau)
/{\cal G}_{\delta}'(\tau) \over 2 \pi {\bar \xi}_{2} } \Big ]^{1/2} \exp \Big ( -{ \tau^2
\over 2 {\bar \xi}_{2}} \Big ) d \tau; ~~~~~ b(\delta) = - \left (
{k_a \over \bar \xi_2} \right ) \left [ ( 1 + {\cal G}_{\delta}(\tau)
)^{1/k_a} - 1 \right ] , \\
&&{\cal G}_{\delta}(\tau) = {\cal G}(\tau) - 1 = \delta.
\end{eqnarray}
The above expression is valid for $\delta < \delta_c$ where the $\delta_c$
is the value of $\delta$ which cancels the numerator of the pre-factor
of the exponential function appearing in the above expression. For
$\delta > \delta_c$ the PDF develops an exponential tail which is
related to the presence of singularity in $\phi(y)$ in a very similar
way as in the case of its highly non-linear counterpart (Bernardeau
1992; Bernardeau 1994).
\begin{equation}
p(\delta) d \delta = { 3 a_s \sqrt {{\bar \xi}_2} \over 4 {\sqrt \pi} }
\delta^{-5/2} \exp \Big [ -|y_s|{ \delta \over {\bar \xi}_{2}} + {|\phi_s|
\over {\bar \xi}_{2}} \Big ] d \delta; ~~~~~b(\delta) = -{ 1 \over
{\cal G}'(\tau_s)} {(1 + \delta)
\over { {\bar \xi}_2}}
\end{equation}
These expressions were used by \cite{MuJai01, MuJai00} and \cite{Valageas00} for the construction of
weak lensing PDF in projection. The bias was studied in \citep{Mu00} for projected surveys.
The tests against numerical simulations show remarkable agreement for a range of angular scales.
Later studies refined these results as well as incorporated various different smoothing windows
\citep{MuVaBa04,VaMuBa05}. We extend these results derived for surveys in
projection to tomographic surveys in this paper.
It is worth mentioning that,
there have been various attempts to extend the perturbative results to the highly nonlinear
regime (see e.g. \citep{CBBH97,VaMu04})
\begin{figure}
\begin{center}
{\epsfxsize=7. cm \epsfysize=7. cm
{\epsfbox[27 426 316 709]{eta.eps}}}
\end{center}
\caption{The PDF $p(\eta)$ of the {\em reduced} convergence $\eta$ as a function $\eta=1+\delta$. The plots
with decreasing peak height correspond to lower redshift bins. Two different approximations
are being compared. The solid line correspond to the lognormal approximation and the
dashed line correspond to the perturbative calculations. The results are shown for a smoothing
angular scale $\theta_s=5'$.}
\label{fig:eta_pdf}
\end{figure}
\section{The Lognormal Distribution}
\label{sec:logn}
An alternative to the hierarchical ansatz, the {\em lognormal} distribution, for the description
of the matter PDF which
has long been known as a successful
empirical prescription for the characterization of the dark matter distribution as well as the
observed galaxy distribution \citep{Ham85,CJ91,Bouchet93,Kf94}. Detailed discussion for comparison
of lognormal distribution and the
perturbative calculations can be found in \citep{BK95}. The lognormal distribution
was further generalized to the {\it skewed}-lognormal distribution \citep{Col94}. In general a
variable might be modelled as lognormal if it can be thought of as the multiplicative product of
many independent random variables.
Although inherently local in nature, the lognormal distribution can provide a good fit to both
one-point PDF and its generalisation to compute its two-point analog and hence the bias \citep{TTHF02}.
The one- and two-point lognormal PDF can be expressed as \citep{KTS01}:
\begin{eqnarray}
&& p_{\rm ln}(\delta)d\delta = {1 \over \sqrt {2\pi \bar\Sigma}} \exp \left [ -{\Delta^2 \over 2\Sigma^2}\right ]{d\delta \over 1+\delta}; \quad\quad
\Sigma^2=\ln(1+\sigma^2); \quad\quad \Delta = \ln[(1+\delta)\sqrt{(1+\sigma^2)};\label{eq:logn1a} \\
&& p_{\rm ln}(\delta_1,\delta_2)d\delta_1 d\delta_2 = {1 \over 2\pi \sqrt {\Sigma^2 - X_{12}^2}}\exp \left [ -{\Sigma(\Delta_1^2 + \Delta_2^2) -2X_{12}\Delta_1\Delta_2 \over 2(\Sigma^2 - X_{12}^2)}\right ] {d\delta_1\over 1+\delta_1} {d\delta_2\over 1+\delta_2}; \label{eq:logn2}\\
&& \Delta_i = \ln[(1+\delta_i)\sqrt{(1+\bar\xi_2^2)}; \quad \Sigma_{12}=\ln(1+\xi_{12})
\label{eq:logn2a}
\end{eqnarray}
\noindent
In the limiting case of large separation $X_{12}\rightarrow 0$ we can write down the two point PDF
\begin{equation}
p_{\rm ln}(\delta_1,\delta_2)= p_{\rm ln}(\delta_1)p_{\rm ln}(\delta_2)[1+ b_{\rm ln}(\delta_1)\xi_{12}b_{\rm ln}(\delta_2)]; \quad\quad b_{\rm ln}(\delta_i)= \Delta_i/\Sigma_{i}.
\end{equation}
\noindent
It is however easier to estimate the cumulative or integrated bias associated with objects beyond a certain density threshold $\delta_0$.
This is defined as $ b_{\rm ln}(\delta>\delta_0)=\int_{\delta_0}^{\infty} p_{\rm ln}(\delta) b_{\rm ln}(\delta) d\delta / \int_{\delta_0}^{\infty} p_{\rm ln}(\delta) d\delta$.
In the low variance limit ${\bar\xi}_2 \rightarrow 0$ the usual Gaussian result is
restored $b(\delta)= \delta/{\bar\xi}_2$. The parameters $\Lambda,\Lambda_i, X_{12}, \Sigma$ that
we have introduced above can be expressed in terms of the two-point (non-linear) correlation function
$\xi_{12} = \langle \delta_1\delta_2 \rangle$ and the nonlinear variance $\sigma^2 = \langle \delta^2 \rangle$ of the smoothed density field.
To understand the construction of lognormal distribution, we introduce a Gaussian PDF in variable $x$;
$p(x) = (2 \pi \Sigma^2)^{1/2} \exp[-(x-\mu)^2/2\Sigma^2] $. With a change of variable $x = \ln(t)$ we can
write down the PDF of $y$ which is a lognormal distribution $p(t) = (2\pi\Sigma^2)^{1/2} \exp [-(\ln(t) -\mu)^2/2\Sigma^2]/t$.
The extra factor of $(1/x)$ stems from the fact: $dt/t = dx$. Note that $y$ is positive definite and
is often associate with $\rho/\rho_0 = 1 +\delta$ which means $\langle t \rangle =1$.
The moment generating function for the lognormal in terms of the mean $\mu$ and the variance $\Sigma$ has the
following form: $\langle t^n \rangle = \exp(n\mu + n^2 \Sigma^2/2)$.
This however leads to the fact
that if the underlying distribution of $x$ or the density is Gaussian we will have to impose the
condition: $\mu=-\Sigma^2/2$. Here in our notation above $\Sigma$ is the variance of the underlying
Gaussian field. The variance of $t$ defined as $\langle t^2 \rangle - \langle t \rangle^2 = \exp(\Sigma^2) -1 = \sigma^2$. So we can write
$\Sigma^2 = \ln(1+\sigma^2)$. This is the result that was used above. The generalization to
two-point or bi-variate PDF can be achieved following the same arguments and can be found in \citep{KTS01}.
In contrast to the lognormal model the widely-used non-local ansatz for the evolution of the variance that
was introduced by \citep{HKLM}, the evolved nonlinear two-point correlation function is
linked to that of the initial or linear two-point correlation at a different scale:
$\bar\xi_2^{nl}(R_{\rm nl})=f_{nl}[\bar\xi_2^{lin}(R_{lin})]$. The two different length scales are related by the following
expression: $R_{\rm nl}^3= (1+\bar\xi^{nl}_2(R_{\rm nl}))R_{lin}^3$. Such as an ansatz is derived using pair conservation equation.
The non-linear (Eulerian) length scale $R_{\rm nl}$ is linked to the linear (Lagrangian)
length scale $R_{\rm lin}$ from where the
structure has collapsed. Numerical simulations are typically used for the determination of
the fitting function $f_{\rm nl}$ \citep{PD94}. However, simpler asymptotic power-law
forms exist in different regimes of gravitational clustering \citep{MuPa97}.
The validity and limitations of the one-point and two-point PDFs have been
studied extensively in the literature against N-body simulations.
In \cite{B92,B94} it was shown that the PDF computed from the perturbation theory
in a weakly nonlinear regime approaches the lognormal distribution function only when
the primordial power spectrum is locally of the form $P(k) \propto k^{n_{e}}$ with the
effective local spectra slope of the power spectrum $n_{e} \sim -1$. It was also
shown that in the weakly nonlinear regime the lognormal distribution is equivalent
to the hierarchical model with a generating function $\cal G(\tau) = \exp(-\tau)$.
This leads to the following skewness and kurtosis parameters:
\begin{equation}
S^{\eta}_3 = 3 + \sigma^2; \quad\quad S^{\eta}_4 = 16 + 15\sigma^2 + 6 \sigma^4 + \sigma^6.
\end{equation}
In general the $\sigma^2 \rightarrow 0$ leads to $S_p^{\eta} = p^{p-2}$.
On this basis \cite{BK95} argues that the agreement of lognormal PDF with
numerical simulations should be interpreted as purely accidental and the
success of the lognormal model is simply related to the fact that for all
scales relevant to cosmology the CDM power spectrum can be approximated with
a power law with effective slope $n_{e} \approx -1$. However subsequent
studies using numerical simulation it was shown by various authors
that the lognormal distribution very accurately describes the cosmological
distribution functions even in the nonlinear regime $\sigma \le 4$ for a relatively
high values of density contrast $\delta < 100$ (see e.g. \cite{KTS01}).
There is no complete analytical description of gravitational clustering in the highly nonlinear
regime. However several dynamical approximations were proposed in the past to mimic
certain features of gravitational clustering neyond the weakly nonlinear clsutering.
The {\it Frozen Flow} Approximation (FFA) approximation proposed by \citep{MLMS92} is one such
approximation. using perturbative techniques it was shown by \citep{MSS94} that the
FFA develops exactly the same generating function as the lognormal approximation
in the quasilinear regime.
The error estimates for various lower order $S_p$ (right panel) and the variance (left-panel)
are shown in Figure (\ref{fig:sn_err}). A complete analytical formalism for calculation of error are
given in \cite{VaMuBa05}. It requires the knowledge of higher order $S_p$ parameters.
In our calculation of error the higher order $S_p$ parameters are modelled according to
lognormal distribution. Different levels of noise as chareterized by the parameters that
describe intrinsic ellipticity distribution $\epsilon$ and number density of galaxies/arcmin$^2$ or
$n_g$ are considered.
\section{ The PDF and bias of smoothed redshift-resolved convergence maps}
\label{sec:pdf_bias}
For computing the probability distribution function of the smoothed
convergence field for individual tomographic maps $\kappa^{(i)}(\theta_0)$, we will begin by constructing
its associated cumulant generating function for individual tomographic bins $\Phi^{(i)}_{1+\kappa(\theta_0)}(y)$.
The construction is based on modelling of the volume-averaged higher order correlation function $\langle
\kappa_{(i)}^p(\theta_0) \rangle_c$ in terms of the matter correlation hierarchy:
\begin{equation}
\Phi^{(i)}_{1 + {\kappa(\theta_0)}}(y) = y + \sum_{p=2}^ {\infty} {{\langle
\kappa_{(i)}^p(\theta_0) \rangle_c} \over \langle \kappa_{(i)}^2 (\theta_0 )
\rangle_c^{p-1}} y^p.
\end{equation}
\begin{figure}
\begin{center}
{\epsfxsize=6. cm \epsfysize=6. cm {\epsfbox[27 426 315 709]{eta_bias.eps}}}
\end{center}
\caption{The cumulative bias $b(>\eta)$ of the {\em reduced} convergence $\eta^{ }=(1+\delta)$ is plotted as a function $\eta$
for various redshift bins. The smoothing angular scale is $\theta_0=5'$. As before two different approximations are
considered. The lognormal approximation (solid lines) and the perturbative calculations (dashed lines) reproduce
nearly identical results. The curves that saturates at a higher values of cumulative bias for higher values of $\eta$ correspond
to larger smoothing angular scales. }
\label{fig:eta_bias}
\end{figure}
Now using the expressions for the higher moments of the convergence $\kappa(\theta_0)$
in terms of the matter power spectrum, Eq.\ref{kappa_variance}
and Eq.\ref{hui} gives:
\begin{equation}
\Phi^{(i)}_{1 + {\kappa({\theta_0})}}(y) \equiv \sum_{p=1} {S^{(i)\kappa}_p \over p!}y^p = y + \int_0^{r_s} \sum_{p=2}^{\infty}
{ 1 \over p!} S^{\eta}_p{\omega_{(i)}^p(r) \over d_A(r)^{2(p-1)} (r)}
\Big [ {{\cal J}_{\theta_0}(r) \over\bar\xi_2^{(i)\kappa}(\theta_0)} \Big ]^{ (p-1)} y^p; \quad\quad \bar\xi_2^{(i)\kappa} \equiv \langle \kappa^2(\theta_0)\rangle_c .
\end{equation}
We can now use the definition of $\phi(y)$ for the matter cumulants to
express $\Phi_{1 + \kappa(\theta_0)}(y)$, in terms of $\phi(y)$:
\begin{equation}
\Phi^{(i)}_{1+\kappa_{\theta_0}}(y) = \int_0^{r_s} dr
\Big[ { d_A^2(r) \bar\xi_2^{(i)\kappa}(\theta_0) \over {\cal J}_{\theta_0}(r)} \Big
] \phi \Big [{\omega_{(i)} (r) \over d_A^2 (r)} {{\cal J}_{\theta_0}(r) \over \bar\xi_2^{(i)\kappa}(\theta_0)}y \Big ] -y \kappa_{(i)}^{\rm min}; \quad
\kappa^{\rm min}_{(i)} = - \int_0^{r_s}\; dr\;\omega_{(i)}(r) .
\end{equation}
The extra term comes from the $p=1$ term in the expansion
of $\Phi_{1+\kappa_{\rm }(\theta_0)}$.
Note that we have used the fully non-linear generating function $\phi$
for the cumulants, though we will use it to construct a generating
function in the quasi-linear regime. The analysis becomes much easier if we define a new reduced convergence
field:
\begin{equation}
\eta_{(i)}({\theta_0}) = {{( \kappa^{\rm min}_{(i)}-\kappa_{(i)}({\theta_0})}) /
\kappa^{\rm min}_{(i)}} = 1 + {\kappa_{(i)}({\theta_0})/ |\kappa^{\rm min}_{(i)}| }.
\end{equation}
Here the minimum value of $\kappa_{(i)}(\theta_0)$ i.e. $\kappa^{\rm min}_{(i)}$ occurs
when the line-of-sight goes through regions that are completely empty of
matter (i.e. $\delta = -1$ all along the line of sight in a redshift window that defines a specific bin i.e. Eq.(\ref{eq:omegai})).
While $\kappa_{(i)}({\theta_0})$ depends on the smoothing
angle, its minimum value $\kappa^{\rm min}_{(i)}$
depends only on the source redshift and background geometry of the
universe and is independent of the smoothing radius. With the reduced
convergence $\eta$, the cumulant generating function is given by,
\begin{equation}
\Phi^{(i)}_{\eta} (y) = { 1 \over [{\kappa^{\rm min}_{(i)}}]} \int_0^{r_s} dr
\Big [{ d_A^2(r) \over {\kappa^{\rm min}_{(i)}}}{ \bar\xi_2^{(i)\kappa}(\theta_0) \over {\cal J}_{\theta_0}(r) }\Big ] \phi \Big [
{{\kappa^{\rm min}_{(i)}} \over d_A^2(r)} {{\cal J}_{\theta_0}(r) \over \bar\xi_2^{(i)\kappa}(\theta_0)}y \Big ].
\end{equation}
The thus constructed cumulant generating function
$\Phi_{\eta}(y)$ satisfies the normalization constraints $S^{\eta}_1 = S^{\eta}_2 = 1$.
The scaling function associated with $P(\eta)$ can now be
easily related with the matter scaling function $h(x)$
introduced earlier:
\begin{equation}
h^{(i)}_{\eta} (x) = - \int_{-\infty}^{\infty} { dy \over 2 \pi i} \exp (x
y) \Phi^{(i)}_{\eta} (y); \quad\quad
h_{\eta}^{(i)} (x) = { 1 \over [{\kappa^{\rm min}_{(i)}}]}
\int_0^{r_s} dr
\Big [ {\bar\xi_2^{(i)\kappa}(\theta_0) \over {\cal J}_{\theta_0}(r)
{\kappa^{\rm min}_{(i)}} } \Big ]^2
h \Big ({\bar\xi_2^{(i)\kappa}(\theta_0) x \over \omega_{(i)}(r)
{\cal J}_{\theta_0}(r) {\kappa^{\rm min}_{(i)}} } \Big ).
\end{equation}
\noindent
While the expressions derived above are exact, and are derived for the most
general case, using only the small angle approximation, they can be
simplified considerably using further approximations. In the following we will
assume that the contribution to the $r$ integrals can be
replaced by an average value coming from the maximum of $\omega_{(i)}(r)$,
i.e. $r_c$ ($0<r_c<r_s$). So we replace $\int f(r) dr$
by $1/2 f(r_c)\Delta_{r}$ where $\Delta_{r}$ is the
interval of integration, and $f(r)$ is the function of comoving radial distance $r$ under
consideration. Similarly we replace the $\omega(r)$ dependence in
the ${\bf k}$ integrals by $\omega_{(i)}(r_c)$.
\begin{equation}
|\kappa_{(i)}^{\rm min}| \approx {1\over 2} r_s \omega_{(i)}(r_c),
\quad\quad |\bar\xi^{(i)\kappa}| \approx {1\over 2} r_s {\omega_{(i)}(r_c)\over d_A(r_c)}{\omega_{(i)}(r_c)\over d_A(r_c)} \Big [ \int {d^2 {\bf l} \over
(2\pi)^2} {\rm P_{\delta}(k)} W_{\rm TH}^2(l\theta_0) \Big ].
\label{eq:approx}
\end{equation}
Under these approximations we can write: $\Phi^{(i)}_{\eta}(y) = \phi^{(i)}(y)$ and $h^{(i)}_{\eta}(x) = h(x)$.
Thus we find that the statistics of the underlying field $\eta=1+\delta$ and the
statistics of
the reduced convergence $\eta$ are exactly the same under this
approximation. Though we derived the results from considering a
specific form of hierarchical ansatz, the final result is remarkably
general. It simply means that independent of detailed modelling
the reduced convergence will always follow the statistics of underlying
mass density distribution.
Finally, we can express the relations connecting the probability distribution
function for the smoothed convergence statistics
$\kappa_{\rm }({\theta_0})$, the reduced
convergence $\eta({\theta_0})$, i.e. for individual bins we can write:
\begin{equation}
p_{(i)}(\kappa) ={p(\eta^{(i)})/|\kappa^{\rm min}_{(i)}|}.
\label{eq:pdf}
\end{equation}
\begin{figure}
\begin{center}
{\epsfxsize=12. cm \epsfysize=6. cm {\epsfbox[27 426 585 709]{kappa_pdf_bias.eps}}}
\end{center}
\caption{The left panel shows the PDF of the redshift resolved convergence and the right panel shows the associated cumulative
bias. The smoothing angular scales considered is $\theta_0 = 5'$. Only three tomographic bins are chosen for display to avoid cluttering.
Two different approximations are used; the lognormal distribution and the perturbative calculations. The approximations
give near identical results. Three different redshift bins are displayed $z_s=0.698,1.095,1.493$ (top,middle and bottom curve
respectively).}
\label{fig:kappa_pdf_bias}
\end{figure}
This is one of the most important results in this paper.
A few comments are in order, it is possible to integrate the exact expressions of the scaling functions,
there is some uncertainty involved in the actual determination of these functions and
associated parameters such as $\omega, k_a, x_{\star}$ from N-body simulations and its
unclear how much is there to gain by doing exact calculations that involve approximate picture for
the underlying mass distribution; see e.g. Munshi et al. (1999), Valageas et al. (1999) and
Colombi et al. (1996) for a detailed description of the effect of the finite volume correction involved in their
estimation. Throughout our analysis we have used a top-hat filter for smoothing
the convergence field, but the results can easily be extended to
compensated or Gaussian filters \citep{BV00}. Using Eq.(\ref{eq:pdf}) one can
derive an approximate expression for the lower order moments which can, in turn, be used
to derive order of magnitude relations $S^{\kappa}_{\rm p} = S^{\eta}_{\rm p}/[k^{\rm min}_{(i)}]^{{\rm p}-2}$ for $\rm p>2$.
The parameters $S^{\eta}_{\rm p}$ for the underlying density contrast $\delta$ are specified
by the choice of a specific hierarchical model. The computation of error bars for these lower order
moments can be done using formalism developed in \cite{MuCo03}.
In Figure (\ref{fig:eta_pdf}) we show the PDF of the reduced convergence $\eta$ for smoothing angular scale $\theta_0=5'$ for the lognormal and hierarchical approximation discussed above,
for individual redshift bins as well as for a projected survey.
\subsection{The bias associated with convergence maps}
To compute the bias associated with the peaks in the smoothed convergence $\kappa$ field we
have to first develop an analytic expression for the generating field
$\beta_{\kappa}(y_1, y_2)$ for the convergence field $\kappa^{\rm }(\theta_0)$. We will
avoid displaying the smoothing angular scale $\theta_0$ for brevity. Throughout the statistics are
for smoothed convergence fields. For that we will use the usual definition for the two-point
cumulant correlator $C_{pq}$ for the convergence field (for a complete
treatment see Munshi \& Coles, 1999b):
\begin{equation}
C^{(ij)}_{pq} = {\langle \kappa_{(i)}({\hat\Omega}_1)^p \kappa_{(j)}({\hat\Omega}_2)^q \rangle_c / [{\bar\xi_2^{(i)}}]^{p-1}[{\bar\xi_2^{(j)}}]^{q-1}
\xi^{(ij)}_{12} }.
\end{equation}
We will show that, like its density field counterpart the
two-point generating function for the convergence field $\kappa_{\rm }$
can be
expressed (under certain simplifying assumptions) as a product
of two one-point generating functions $\beta^{\rm }(y)$
which can then be directly related to the bias associated with
``hot-spots''in the convergence field.
\begin{equation}
\beta^{(ij)}_{\eta}(y_1,y_2) = \sum_{p,q}^{\infty} {C^{\eta(ij)}_{pq} \over p! q!} y_1^p y_2^q =
\sum_{p}^{\infty} {C^{\eta(i1)}_{p1} \over p!} y_1^p \sum_{q}^{\infty} {C^{\eta(j1)}_{q1}
\over q!} y_2^q = \beta^{(i)}_{\eta}(y_1) \beta^{(j)}_{\eta}(y_2).
\end{equation}
\noindent
It is clear that the factorization of generating function
depends on the factorization property of the cumulant correlators i.e.
$C^{\eta}_{pq} = C^{\eta}_{p1} C^{\eta}_{q1}$. Note that such a factorization is
possible when the correlation of two patches in the directions
${\hat\Omega}_1$ and ${\hat\Omega}_2$ ${[\xi^{(ij)\kappa}_{12}]}$ is smaller compared to the variance
${[\bar \xi^{(ij)}]}$ for the smoothed patches
We will now use the integral expression for cumulant correlators
(Munshi \& Coles 1999a) to
express the generating function which in turn uses the hierarchical
{\em ansatz} and the far field approximation as explained above
\begin{equation}
\beta^{(ij)}_{\kappa}(y_1, y_2) = \sum_{p,q}^{\infty} {C^{\eta(ij)}_{pq} \over p! q! } { 1 \over
[\bar\xi_{2}^{(i)}]^{p-1}}{ 1 \over
[\bar\xi_{2}^{(j)}]^{q-1}} { 1 \over \xi^{12}_{ij}}
\int_0^{r_s} dr
{ \omega_{(i)}^{p}(r) \omega_{(j)}^q(r) \over d_A(r)^{2(p+q -1)} }[{\cal J}_{\theta_{12}}(r)]
[{{\cal J}_{\theta_0}(r)}]^{p+q-2} y_1^p y_2^q.
\end{equation}
\noindent
It is possible to further simplify the above expression by separating the
summation over dummy variables $p$ and $q$, which will be useful to
establish the factorization property of the two-point generating function
for bias $\beta^{(ij)}(y_1, y_2)$.
We can now decompose the double sum over the two indices into two
separate sums over individual indices. Finally, using the definition of
the one-point generating function for the cumulant correlators
we can write:
\begin{equation}
\beta^{(ij)}_{\kappa}(y_1, y_2) = {\int_0^{r_s}} dr\; d_A^2(r) {{{\cal J}_{\theta_{12}}(r)} \over {[\xi^{(ij)\kappa}_{12}]} } { {[\bar \xi_2^{(i)\kappa}]} \over {{\cal J}_{\theta_0}(r)} }{ {[\bar \xi_2^{(j)\kappa}]} \over {{\cal J}_{\theta_0}(r)} } \nonumber
\beta^{(i)}_{\eta} \Big ( { y_1 \over {[\bar \xi_2^{(i)\kappa}]}} {\omega_{(i)}(r) \over d^2_A(r)
} {{\cal J}_{\theta_0}(r)} \Big ) \beta^{(j)}_{\eta} \Big ( { y_2 \over {[\bar \xi_2^{(j)\kappa}]}} {\omega_{(j)}(r) \over d^2_A(r)
} {{\cal J}_{\theta_0}(r)} \Big ).
\end{equation}
\begin{figure}
\begin{center}
{\epsfxsize=12. cm \epsfysize=6. cm {\epsfbox[27 426 584 709]{dark_energy.eps}}}
\end{center}
\caption{We compare the PDF and cumulative bias associated with tomographic convergence maps for various tomographic bins
for two different dark energy models against the $\Lambda$CDM model. The left panel plots the ratio
$\Delta_p(\kappa)= p(\kappa)-p_{\Lambda{\rm CDM}}(\kappa)$ for a smoothing angular scale of $\theta_0=5'$. The right panel
depicts the ratio $\Delta^{(i)}_b(>\kappa)= b^{(i)}(>\kappa)-b^{(i)}_{\Lambda{\rm CDM}}(>\kappa)$ for the cumulative bias.
Three different redshift bins are displayed $z_s=0.698,1.095$. For a given smoothing angular scale and
a fixed redshift, the (thick) curves with higher positive peak heights
correspond to the model $w_0=-1, w_1=1$ and the (thin) ones with lower peak heights correspond to $w_0=-0.9$, $w_1=0$.}
\label{fig:dark}
\end{figure}
\noindent
The above expression is quite general within the small
approximation and large separation approximations, and is valid for any
given specific model for the generating function ${\cal G}_{\delta}(\tau)$. However it is easy
to notice that the projection effects as encoded in the line of sight
integration do not allow us to write down the two-point generating
function $\beta_{\kappa}(y_1, y_2)$ simply as a product of two one-point generating functions $\beta_{\eta}(y)$ as
was the case for the density field $1+ \delta$.
\begin{equation}
\beta_{\kappa}^{(ij)}(y_1, y_2) = {\int_0^{r_s}} \; dr\; {d_A(r)\over [\kappa^{min}_{(i)}]}{d_A(r)\over [\kappa^{min}_{(j)}]}
{{{\cal J}_{\theta_{12}}(r)} \over {[\xi^{(ij)\kappa}_{12}]} } { {[\bar \xi^{(ij)}]}^2 \over {{\cal J}_{\theta_0}(r)}^2
}
\beta^{(i)}_{\eta} \Big ( { y_1 \over {[\bar \xi_2^{(i)\kappa}]}} {\omega_{(i)}(r) \over d^2_A(r)
} {{{\cal J}_{\theta_0}(r)} \over |{\kappa^{\rm min}_{(i)}}|} \Big )
\beta^{(j)}_{\eta} \Big ( { y_2 \over {[\bar \xi_2^{(j)\kappa}]}} {\omega_{(j)}(r) \over d^2_A(r)
} {{{\cal J}_{\theta_0}(r)} \over |{\kappa^{\rm min}_{(i)}}|} \Big ).
\end{equation}
We use the follwoing equation in assocition with Eq.(\ref{eq:approx}) to simplify the above expression:
\begin{equation}
{[\xi^{(ij)\kappa}_{12}]} \approx {1\over 2} r_s {\omega_{(i)}(r_c) \over d_A(r_c) } {\omega_{(j)}(r_c) \over d_A(r_c) }
\Big [ \int {d^2 {\bf l} \over (2\pi)^2} {\rm P_{\delta}(k)} W_{\rm TH}^2(l \theta_0) \exp [il \cdot \theta_{12}] \Big ].
\end{equation}
\noindent
Use of these approximations gives us the leading order contributions
to these integrals and we can check that to this order we recover the
factorization property of the generating function i.e. $\beta^{(ij)}_{\eta}(y_1,
y_2) = \beta^{(i)}_{\eta}(y_1) \beta^{(j)}_{\eta}(y_2) =
\beta^{(i)}_{1+\delta}(y_1) \beta^{(j)}_{1+\delta}(y_2)$.
So it is clear that at this
level of approximation, due to the factorization property of the cumulant
correlators, the bias function $b_{\eta}(x)$ associated with the peaks
in the convergence field $\kappa_{\rm }$, beyond certain threshold, possesses a
similar factorization property too as its
density field counterpart. Earlier studies have established such a
correspondence between convergence field and density field
in the case of one-point probability distribution function $p(\delta)$
(Munshi \& Jain 1999b),
\begin{equation}
b_{\eta}^{(i)}(x_1) h_{\eta}^{(i)}(x_1) b_{\eta}^{(i)} (x_2) h_{\eta}^{(j)} (x_2) =
b^{(i)}_{1 + \delta}(x_1) h^{(i)}_{1 + \delta}(x_1) b^{(j)}_{1+\delta} (x_2) h^{(j)}_{1+\delta} (x_2),
\end{equation}
\noindent
where we have used the following relation between $\beta_{\eta}(y)$
and $b_{\eta}(x)$. For all practical purpose we found that the differential bias
as defined above is more difficult to measure from numerical
simulations as compared to its
integral counterpart where we concentrate on the bias associated with
peaks above certain threshold. The cumulative bias $b_{\eta}(>x)$
can also be defined in an analogus manner:
\begin{equation}
b_{\eta}(x) h_{\eta}(x) = -{ 1 \over 2 \pi i}
\int_{-i\infty}^{i\infty} dy \tau (y) \exp (xy); \quad
b_{\eta}(>x) h_{\eta}(>x) = -{ 1 \over 2 \pi i}
\int_{-i\infty}^{i\infty} dy {\tau (y)\over y} \exp (xy).
\label{eq:bias1}
\end{equation}
\noindent
It is important to notice that, although the bias $b(x)$
associated with the convergence field $\kappa_{\rm }$ and the underlying density
field are identical, the variance associated with the density field is
very high, while projection effects substantially reduce the variance in the convergence field.
This indicates that we have to use
the integral definition of bias to recover it from its generating
function; see Eq.(\ref{eq:bias1}).
Now writing down the full two point probability distribution function
for two correlated spots in terms of the convergence field $\kappa_{\rm }(\theta_0)$ and
its reduced version $\eta$:
\begin{eqnarray}
\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad p^{(ij)}(\kappa^{\rm }_1,\kappa^{\rm }_2)d\kappa^{\rm }_1 d\kappa^{\rm }_2 = p^{(i)}(\kappa^{\rm }_1) p^{(j)}(\kappa^{\rm }_2)( 1
+ b^{(i)}(\kappa^{\rm }_1) \xi^{(ij)}_{12} b^{(j)}(\kappa_2)) d\kappa^{\rm }_1 d\kappa^{\rm }_2, \\
\quad\quad\quad\quad\quad\quad p_{\eta}(\eta_1, \eta_2)d\eta_1 d\eta_2 = p^{(i)}_{\eta}(\eta_1) p^{(j)}_{\eta}(\eta_2)( 1
+ b^{(i)}_{\eta}(\eta_1) \xi^{\eta}_{12} b^{(j)}_{\eta}(\eta_2)) d\eta_1 d\eta_2.
\end{eqnarray}
\noindent
Using Eq.(\ref{eq:pdf}) that $p^{(i)}(\kappa)
= {p_{\eta}(\eta)/{|\kappa^{(i)}_{\rm min}}|}$ we also notice that $\xi^{(ij)}_{12} =
{\xi^{\eta}_{12}/{[\kappa^{(i)}_{\rm min}]}{[\kappa^{(j)}_{\rm min}]}}$,
from which we can now write:
\begin{equation}
b_{(i)}(\kappa) = {b^{(i)}_{\eta}(\eta)/|{\kappa^{(i)}_{\rm min}}|}.
\label{eq:bias}
\end{equation}
Together with Eq.(\ref{eq:pdf}), Eq.(\ref{eq:bias}) can be used to construct analytical estimates
of pdf and bias about individual bins. In addition these results are applicable to the modelling
of joint PDFs involving two separate redshift bins.
Figure (\ref{fig:eta_bias}) shows the bias associated with the reduced convergence for individual bins
as well as for the entire survey. The smoothing angular scale is $\theta_0=5'$. In Figure (\ref{fig:kappa_pdf_bias})
shows the PDF and bias associated with the convergence $\kappa$. In Figure (\ref{fig:dark}) we plot the
difference in PDF between various cosmological scenarios.
\section{Effect of Noise on one- and two-point PDF}
The PDF we have considered so far are free from noise.
In this section we will present the results of estimates of error relating to the PDFs, those at the level
of one-point and two-point PDFs. These results will generalise the ones found by \cite{MuCo03}
for lower order moments and later by \cite{VaMuBa05}. Inclusion of noise can be incorporated
through a convolution. We will assume the noise to be Gaussian and uncorrelated with the signal.
However the variance of the noise will depend on the surface density of galaxies in individual bins.
With these simplifying assumption, for the i-th tomographic bin we can write:
\begin{equation}
p^{(i)}_n(\kappa) = \int_{-\infty}^{\infty}\;p^{(i)}(\kappa-n)\;p^{(i)}_G(n)\;dn.
\end{equation}
Here $p^{(i)}_G(n)$ is the noise PDF assumed Gaussian, and $p^{(i)}_n(\kappa)$ is the convergence PDF
in the presence of noise (the subscript $G$ denotes Gaussian). We take $\sigma^2_{\kappa} = \sigma^2_{\epsilon}/(2 n_g \pi\theta_0^2)$.
Here $\sigma_{\epsilon}$ is intrinsic ellipticity distribution of galaxies and $n_g$ is the number denisty of galaxies and
$\theta_0$ is the smoothing angular scale.
The two-point PDF can also be modified to include the effect of noise in a similar manner.
The equivalent expression for 2PDF can be written as:
\begin{equation}
p^{(ij)}_n(\kappa_1,\kappa_2) = p^{(i)}_n(\kappa_1)p^{(j)}_n(\kappa_2)( 1 + b^{(i)}_n(\kappa_1)\xi^{ij}_{12}b^{(j)}_n(\kappa_2)),
\label{eq:2pt1}
\end{equation}
Which is obtained by convolving the noise PDF with the 2PDF:
\begin{equation}
p^{(ij)}_n(\kappa_1,\kappa_2) = \int_{-\infty}^{\infty}\;p^{(ij)}(\kappa_1-n_1,\kappa_2-n_2)\; p^{(i)}_G(n_1)\; p^{(j)}_G(n_2)\;dn_1\;dn_2.
\label{eq:2pt2}
\end{equation}
Comparing Eq.(\ref{eq:2pt1}) and Eq.(\ref{eq:2pt2}) we can write the expression for the noisy bias $b_n(\kappa)$ as:
\begin{equation}
b^{(i)}_n(\kappa) = \int_{-\infty}^{\infty} p^{(i)}(\kappa-n)b^{(i)}(\kappa-n)p^{(i)}_G(n)dn \; / \;\int_{-\infty}^{\infty} p^{(i)}(\kappa-n)p^{(i)}_G(n)\;dn.
\end{equation}
Notice that depending on redshift distribution of sources, the noise maps $n^{(i)}$ and $n^{(j)}$ can be different for two tomographic bins. We also
assumed that noise in different bins are statistically independent. The cumulative bias for the i-th tomographic bin that include noise $b^{(i)}_n(>\kappa)$
can be expressed in terms of $p^{(i)}_n(\kappa)$ $b^{(i)}_n(\kappa)$ just as its noise-free counterpart:
\begin{equation}
b^{(i)}_n(>\kappa)= \int_{\kappa}^{\infty} p^{(i)}_n(\kappa) b^{(i)}_n(\kappa) dk / \int_{\kappa}^{\infty} p^{(i)}_n(\kappa) dk.
\end{equation}
Errors associated with binned tomographic noisy PDF can be analysed using following quantites:
\begin{equation}
{\rm N} = n_g \pi \theta_0^2 = 314 \left ( {n_g \over 100 \;{\rm arcmin}^{-2}}\right ) \left ( \theta_0 \over 1 \;{\rm arcmin} \right )^2
\end{equation}
Here $n_g$ is the number density of galaxies, $\theta_s$ is the smoothing angular scale in arc-minutes for a given survey strategy.
For a given survey we denote the area covered by $A$ and introduce a parameter $N_c$ which will be used in expressing the
signal-to-noise estimates of the PDF $p(\kappa)$. We define the following variable that will be useful in qunatifying scatter in a noisy PDF:
\begin{equation}
{\rm N}_c = {A \over (2\theta_0)^2} = 2.7 \times 10^4 \left( {A \over 300 \;{\rm degree}^2} \right ) \left ( {\theta_0 \over 1 \;{\rm arcmin}} \right )^{-2}.
\end{equation}
Finally the scatter $\sigma(p(\kappa))$ in the measured convergence power sepctra $p(\kappa)$ can be expressed as \citep{VaMuBa05}:
\begin{equation}
{\sigma(p^{(i)}_n(\kappa)) \over p^{(i)}_n(\kappa)} = \left [ {1 \over {\rm N}_c} \left ( {1 \over 2 p_n^{(i)}(\kappa) \Delta } -1 \right ) \right ]^{1/2}
\label{eq:s2n}
\end{equation}
These expression can be modified and used to compute the scatter in individual redshift bins by simply changing $\bar n_g$ to
surface density of individual bins $\bar n_g^{(i)}$ and $p(\kappa)$ to $p^{(i)}(\kappa)$. The source density for individual
bins for a given survey can be computed using $\bar n^{(i)} = \int_{z_i}^{z_{i+1}} p_s(z) dz$. The bin width $\Delta$ is left as a
free parameter. In Figure (\ref{fig:s2n1}) we have plotted the scatter $\sigma(p^{(i)}_n(\kappa)$ as a
function of intrinsic ellipticity
distribution and bin width for a smoothing angular scale of $\theta_0 =5'$ and redshift $z_s=0.698$.
The results for the difference in noisy PDFs are plotted in Figure (\ref{fig:10}).
\begin{figure}
\begin{center}
{\epsfxsize=12.5 cm \epsfysize=6. cm {\epsfbox[25 434 585 709]{ng.eps}}}
\end{center}
\caption{The sacatter in estimation of binned PDF for a given intrinsic ellipticity distribution and sky coverage
is displayed. We assume an all-sky coverage. The effect of intrinsic ellipticity distribution is displayed
in the left panel. The right panel depicts the effect of binning. The angular scale in each case is $\theta_0=5'$
and the redshift is $z_s=0.698$. The expression for $\sigma(p(\kappa))$ is given in Eq.(\ref{eq:s2n}). For the
left panel we consider $\Delta=0.02$ and for the right panel $n_g=25$. A background $\Lambda$CDM cosmology is
accumed for these calculations.}
\label{fig:s2n1}
\end{figure}
\section{Conclusions}
\label{sec:conclu}
Previous tomographic studies of weak lensing have typically worked with
the lower order cumulants; we have generalized here these
results to the case of the entire one- and two-point PDF, which contain
information about the cumulants to an arbitrary order.
The construction was performed using a generating function formalism
based on hierarchical ansatz and a lognormal model. Our analysis generalizes previously
obtained results derived for projected or 2D surveys. Though
we have considered a top-hat filer convergence maps due to their simplicity, similar results
can be obtained for related statistics such as the shear components or aperture mass $M_{ap}$ \citep{BV00}.
The PDFs for the individual bins are constructed by generalization of the
previously introduced global variable $\kappa^{\rm min}$, for individual bins i.e.$\kappa^{\rm min}$,
that was used in the context of 2D projected maps.
Next, using $\kappa_{(i)}^{\rm min}$, reduced variable $\eta^{(i)}$ is defined for
each individual bins whose statistics can directly be linked
to that of underlying density contrast $\delta$. The convergence in individual bins
can then be mapped to unique values of $\eta=1+\delta$ for a given smoothing angular scales $\theta_0$.
For modelling the statistics of underlying density contrast $\delta$ we have assumed two
completely different model: the hierarchical ansatz along with its perturbative
counterpart as well as the lognormal distribution. Both these approximations
have been used successfully in various cosmological contexts. There are a wide class of models that
are available under the general category of hierarchical ansatz.
The main motivation behind our choice of a specific hierarchy is simplicity.
In recent years more sophisticated models of hierarchical clustering have been
proposed which rely more on inputs from numerical simulations. The generic results
we have derived here indeed can be improved using such modelling though the fundamental
characteristics will remain unchanged.
\begin{figure}
\begin{center}
{\epsfxsize=6. cm \epsfysize=6. cm {\epsfbox[27 426 315 709]{noise_diff.eps}}}
\end{center}
\caption{The difference of noisy $\Lambda$CDM PDF and dark energy models $\Delta_p(\kappa)=p(\kappa)-p_{\Lambda \rm CDM}(\kappa)$
is plotted as a function of $\kappa$. The smoothing angular scale, bin size and galaxy number density is as depicted.
The scatter in estimation is smaller compared to the difference in the PDFs considered. The comsological parameters
considered are the same as the ones in Figure-(\ref{fig:dark}). The two survey configurations that we have considered
both produces near identical results.}
\label{fig:10}
\end{figure}
In our treatment we find, in agreement with \cite{MuWa03}, the dynamical and geometrical
contribution can be treated separately. The geometrical effects are completely encoded in
a parameter $\kappa^{min}$. The reduced convergence as defined is independent of
the background geometry of the universe and essentially probe the evolution of
gravitational clustering. We showed that a set of $\kappa^{\rm min}_{(i)}$ defined
for a given set of redshift slices are adequate to characterize not only
individual PDFs for each bin but it is also sufficient to study the
joint two-point PDF among two different bins. The PDF of the reduced convergence
$\eta^{(i)}$ for individual bins or joint PDFs for a pair of bins
generalizes the earlier studies where the projected or 2D maps
were considered in a straight forward manner.
We also note that the construction of convergence maps is difficult compared to the direct evaluation of
non-Gaussian statistics from shear maps. On the other hand convergence statistics
can directly be modelled at arbitrary order whereas for shear field the computation
is done mostly order by order manner. An independent analysis of convergence
maps constructed from shear maps should therefore be useful in constraining
various errors which might get introduced during various stages of data reduction.
In our analysis we have ignored the noise from intrinsic ellipticity distribution of galaxies
as well as from shot noise resulting from finite number of galaxies that are used
to trace the underlying mass distribution. These issues have been dealt with
in great detail in \cite{MuCo03,VaBaMu04}. Dividing the source population into bins reduced the
number-density of sources. This in turn will increase the level of noise or the
scatter in the estimator. In our analysis we have considered two different survey configurations,
i.e. LSST and DES and found that for our choice of tomographic bins the one- and two-point
PDFs are very similar in nature.
The lognormal distribution has already been used to model
the statistics of weak lensing observables \citep{Mu00,TTHF02} and the clustering of Lyman alpha
absorption systems e.g. \citep{BD97}. One-to-one mapping of initial density fields to
evolved density fields using maps that are consistent with lognormal distribution
function was not found to be very successful and the success of a lognormal distribution
function in reproducing the statistics of gravitational clustering still remains
somewhat unclear.
Tomographic weak lensing surveys can be cross-correlated with external data sets
including frequency cleaned maps of secondaries from ongoing CMB surveys;
e.g. the thermal Sunyaev-Zeldovic (tSZ) maps or $y$-maps that will be available from
CMB surveys such as Planck. The cross-correlation with tomographic
information can help to understand the evolution of cosmological pressure fluctuations
responsible for tSZ effect with redshift. The formalism presented here is
perfectly suitable for such analysis. Detailed results of such analysis will be presented
elsewhere. In addition to the weak lensing surveys the Supernova pencil beam surveys
might also benefit for the results presented here.
To summarize, we have extended results derived in three different previous papers
\citep{MuCo03,VaMuBa05,VaBaMu04} to take into
account tomographic bins within which the photometric redshift are available. The results
obtained previously for one-point PDF are now extended to two-point PDF.
These results can provide an alternative to usual Fisher-matrix analysis
that is employed to optimize survey strategies. We have concentrated mainly on
analytical results in this paper. The numerical results regarding optimization of survey strategy using
these results will be considered elsewhere.
\section{Acknowledgements}
\label{acknow}
DM and PC acknowledges support
from STFC standard grant ST/G002231/1 at School of Physics and
Astronomy at Cardiff University where this work was completed.
We would like to thank Alan Heavens, Patrick Valageas, Ludo van Waerbeke and Sanaz Vafei for many useful discussions.
The numerical results were obtained using a modified version of a code made available to us
for computing the PDF and bias by Francis Bernardeau.
|
1,314,259,995,011 | arxiv | \section{Introduction}
The main feature of $\lambda$ Boo stars is a notable underabundance of most Fe-peak elements
and near solar abundances of lighter elements (C, N, O and S).
They comprise main-sequence late-B to early-F stars, where a maximum of about 2\% of all objects
are believed to be $\lambda$ Boo stars \citep{gray-corbally98,paunzen01b}.
Classification-resolution spectroscopy shows promising $\lambda$ Boo candidates \citep[e.g.,][]{murphy15,gray17},
and a more detailed abundance determination, especially including the lighter elements,
is considered a ultimate test to confirm that a candidate is indeed a bonafide member of the class
\citep[e.g.,][]{andrievsky02,heiter02}.
The origin of the $\lambda$ Boo peculiarity still remains as a challenge
\citep[see, e.g., the recent discussion of ][and references therein]{murphy-paunzen17}.
Their rotational velocities do not necessarily point toward lower values,
marking thus a difference with chemically peculiar Am and Ap stars \citep{abt-morrell95,murphy15}.
A possible explanation consist in the interaction of the star with a diffuse interstellar cloud
\citep{kamp-paunzen02,mg09}.
In this work, we refer to this model as the "accretion scenario", in which
the underabundances are produced by different amounts of volatile accreted material,
and the more refractory species are possibly separated and repelled from the star.
More recently, \citet{jura15} proposed that this peculiar pattern possibly originates from the
winds of hot-Jupiter planets\footnote{Hot-Jupiter planets present short orbital periods ($<$10 d) and large masses ($>$ 0.1 M$_{Jup}$).}.
In this case, the planet acts as the source of gas poor in refractory species.
However, \citet{saffe21} have recently shown that eight early-type stars hosting hot-Jupiter planets
do not display the $\lambda$ Boo peculiarity.
This would let the interaction of the star with a diffuse cloud, as the more plausible
scenario to explain the $\lambda$ Boo phenomena in main-sequence stars.
Under the accretion scenario, two early-type stars passing through a diffuse cloud should display, in principle,
the same superficial peculiarity \citep[e.g.,][]{paunzen12a,paunzen12b}. At the same time, hotter stars
(T$_\mathrm{eff}$ $>$ $\sim$12000 K) with strong winds, and cooler stars (T$_\mathrm{eff}$ $<$ $\sim$6500 K)
with larger convective zones, should not notably change their composition.
These predictions make the analysis of binary and multiple systems an important tool to test the accretion scenario.
However, the number of known candidate $\lambda$ Boo stars in binary/multiple systems is limited to a dozen of objects
\citep[e.g.,][]{paunzen12a,paunzen12b}, where most of them are spectroscopic binary (SB) systems.
To our knowledge, only five of these systems present a detailed chemical analysis of the two components
(see the Appendix for a more detailed review).
Notably, some stars of these binary systems were recently identified as non-members or uncertain members of the $\lambda$ Boo class
\citep[see ][]{gray17}.
Based on literature data, we selected for this study three binary/multiple systems
that possibly confront the accretion scenario. In addition, they are spatially resolved
\citep[in contrast to most candidate $\lambda$ Boo stars that belong to SB systems, ][]{paunzen12a,paunzen12b},
allowing a individual analysis without a strong contribution from the companion.
We also review all known binary or multiple systems with candidate $\lambda$ Boo stars,
with data taken from the literature (see Appendix).
In this work, we present an analysis of the remarkable triple system HD 15165.
It is composed by HD 15165, HD 15164 and HD 15165C (stars A, B and C)
with spectral types "F2 V kA2mA2 $\lambda$ Boo?", "F1 V kA7mA6 ($\lambda$ Boo)?" and "K2 V" \citep{murphy15}.
Some previous works suggest that the A star belong to the $\lambda$ Boo class \citep{andrievsky95,cherny98},
while the B star seem to display, notably, a solar composition \citep{andrievsky95}.
If these abundances are confirmed, this could seriously defy the accretion scenario.
In addition, currently there is no analysis of the 3$^{rd}$ star, the late-type component of the system.
Therefore, we take the opportunity and perform a detailed abundance analysis including for the first time the three stars
of the system, using a spectra with higher resolving power than previous works.
We also present an analysis of the binary systems HD 193256/281 and HD 198160/161.
Both systems show solar values for C and subsolar Fe, similar to other candidate $\lambda$ Boo stars \citep{sturenburg93}.
However, more recent classification spectra suggest that only one star of the system belong to the $\lambda$ Boo
class \citep[see Tables 1 and 4 of ][]{murphy15,gray17},
which would be difficult to explain under the accretion scenario.
This convert both systems in very interesting targets to study in detail, and are included in our analysis.
This work is organized as follows. In Sect. 2 we describe the observations and data reduction, while in Sect. 3
we present the stellar parameters and chemical abundance \mbox{analysis}. In Sect. 4 we show the results and discussion,
and finally in Sect. 5 we highlight our main conclusions.
\section{Observations}
We present in Table \ref{table.parallax} the visual magnitude V (from Hipparcos), coordinates, proper motions
and parallax \citep[from Gaia DR2, ][]{gaiaDR2} for the stars studied in this work.
Spectral data of the triple system HD 15165 were obtained with the Max Planck Gesselschaft (MPG) 2.2 meter telescope
at the European Southern Observatory (ESO) in La Silla, Chile, on October 10, 2021 (Program ID: 0108.A-9012, PI: Marcelo Jaque Arancibia).
We used the Fiber-fed Extended Range Optical Spectrograph (FEROS), which provides a high-resolution (R$\sim$48.000) spectra
when illuminated via the 2.0 arcsec aperture on the sky in the unbinned mode.
Three individual spectra for each object were obtained, followed by a ThAr lamp in order to obtain an appropriate wavelength solution.
The data were reduced using the FEROS Data Reduction System\footnote{https://www.eso.org/sci/facilities/lasilla/instruments/feros/tools/DRS.html} (DRS).
The spectral coverage resulted between 3700-9000 \AA, approximately, and the S/N per pixel measured at $\sim$5000 \AA~resulted in $\sim$300.
\begin{table*}
\centering
\caption{Magnitudes and astrometric data for the stars studied in this work.}
\begin{tabular}{lccccccc}
\hline
Star & V & $\alpha$ & $\delta$ & $\mu_{\alpha}$ & $\mu_{\delta}$ & $\pi$ & Spectra \\
& & J2000 & J2000 & [mas/yr] & [mas/yr] & [mas] & \\
\hline
HD 15164 & 8.27 & 02 26 48.29 & +10 34 57.59 & 36.552 & -13.717 & 7.4185 & MPG+FEROS \\
HD 15165 & 6.69 & 02 26 45.65 & +10 33 55.07 & 36.680 & -13.086 & 7.4414 & MPG+FEROS \\
HD 15165C & 11.78 & 02 26 47.40 & +10 32 58.89 & 36.805 & -13.131 & 7.5499 & MPG+FEROS \\
HD 193256 & 7.53 & 20 20 26.57 & -29 11 28.76 & -1.991 & -1.221 & 5.8675 & CASLEO+REOSC \\
HD 193281 & 6.64 & 20 20 27.88 & -29 11 49.97 & -0.653 & 0.244 & 6.2644 & CASLEO+REOSC \\
HD 198160 & 6.21 & 20 51 38.51 & -62 25 45.59 & 82.697 & -46.562 & 13.5137 & MPG+FEROS \\
HD 198161 & 6.56 & 20 51 38.85 & -62 25 45.26 & 82.077 & -42.340 & 13.5315 & MPG+FEROS \\
\hline
\end{tabular}
\normalsize
\label{table.parallax}
\end{table*}
The spectra of the binary system HD 193256/281 were obtained at the Complejo Astr\'onomico El Leoncito (CASLEO)
between May 9 and 11, 2009 (PI: Maria Eugenia Veramendi). We used the \emph{Jorge Sahade} 2.15-m telescope equipped with a REOSC echelle
spectrograph\footnote{On loan from the Institute d'Astrophysique de Liege, Belgium.} and a TEK 1024$\times$1024 CCD detector.
The REOSC spectrograph uses gratings as cross dispersers. We used a grating with 400 lines mm$^{-1}$, which provides
a resolving power of $\sim$ 12500 covering the spectral range $\lambda\lambda$3800--6500.
Three individual spectra for each object were obtained and then combined, reaching a final S/N per pixel of $\sim$300 measured at $\sim$5000 \AA.
The data were reduced with Image Reduction and Analysis Facility (IRAF) following the standard recipe for echelle spectra (i.e. bias and flat corrections,
order-by-order normalization, scattered light correction, etc.).
Finally, the FEROS spectra of the binary system HD 198160/161 were obtained from the ESO Science Archive
Facility\footnote{http://archive.eso.org/cms.html}. The stars were observed between April 4 and 7, 2017 (Program ID: 099-A-9029).
The spectra were reduced using the FEROS DRS, obtaining a spectral coverage and S/N similar to those obtained with HD 15165.
\section{Stellar parameters and abundance analysis}
The stellar parameters T$_\mathrm{eff}$ and $\log g$ were estimated iteratively,
similar to previous works \citep{saffe21}.
They were first estimated by using the Str\"omgren uvby$\beta$ mean photometry of \citet{hauck-mermilliod98}
or considering previously published results.
We used the calibration of \citet{napi93} and deredenned colors according to \citet{domingo-figueras99}
within the program TempLogG \citep{kaiser06}, in order to derive the fundamental parameters.
These initial values were refined (when necessary and/or possible) by imposing excitation and ionization balances of the iron lines.
A similar strategy was previously applied in the literature \citep[e.g., ][]{saffe-levato14,saffe21}.
The values derived in this way are listed in the Table \ref{table.params}, with an average dispersion of
$\sim$115 K and $\sim$0.13 dex for T$_{\rm eff}$ and {$\log g$}, respectively.
\begin{table*}
\centering
\caption{Fundamental parameters derived for the stars in this work.}
\begin{tabular}{lcccc}
\hline
Star & T$_{\rm eff}$ & $\log g$ & v$_\mathrm{micro}$ & $v\sin i$ \\
& [K] & [dex] & [km s$^{-1}$] & [km s$^{-1}$] \\
\hline
HD 15164 & 7150 $\pm$ 70 & 3.74 $\pm$ 0.08 & 2.54 $\pm$ 0.63 & 17.9 $\pm$ 0.7 \\
HD 15165 & 6950 $\pm$ 139 & 3.80 $\pm$ 0.19 & 2.21 $\pm$ 0.55 & 125.7 $\pm$ 5.4 \\
HD 15165C & 4960 $\pm$ 51 & 4.40 $\pm$ 0.03 & 0.46 $\pm$ 0.07 & 2.4 $\pm$ 0.3 \\
HD 193256 & 7780 $\pm$ 146 & 3.97 $\pm$ 0.19 & 3.23 $\pm$ 0.81 & 257.0 $\pm$ 8.2 \\
HD 193281 & 8700 $\pm$ 140 & 3.60 $\pm$ 0.15 & 2.99 $\pm$ 0.75 & 91.5 $\pm$ 3.9 \\
HD 198160 & 8010 $\pm$ 130 & 4.09 $\pm$ 0.15 & 3.31 $\pm$ 0.83 & 190.0 $\pm$ 6.8 \\
HD 198161 & 8010 $\pm$ 130 & 4.09 $\pm$ 0.15 & 3.31 $\pm$ 0.83 & 185.0 $\pm$ 7.2 \\
\hline
\end{tabular}
\normalsize
\label{table.params}
\end{table*}
Projected rotational velocities $v\sin i$ were estimated by fitting most \ion{Fe}{I} and \ion{Fe}{II} lines in the spectra.
Synthetic spectra were calculated using the program SYNTHE \citep{kurucz-avrett81} together with ATLAS12 \citep{kurucz93} model atmospheres.
Microturbulence velocity v$_\mathrm{micro}$ was estimated as a function of T$_{\rm eff}$ following the formula of \citet{gebran14},
(valid for $\sim$6000 K $<$ T$_{\rm eff}$ $<$ $\sim$10000 K), except for the late-type star
HD 15165C for which we used the formula of \citet{ramirez13} for FGK stars.
We adopt for v$_\mathrm{micro}$ an uncertainty of $\sim$25 $\%$, as suggested by \citet{gebran14}.
Chemical abundances were determined iteratively by fitting a synthetic spectra using the program SYNTHE \citep{kurucz93}.
In the first step, we use an ATLAS12 model atmosphere calculated with solar abundances.
With the new abundance values, we derived a new model atmosphere and started the process again.
In each step, opacities were calculated for an arbitrary composition and v$_\mathrm{micro}$ using the opacity
sampling (OS) method, similar to previous works \citep{saffe20,saffe21}.
Possible differences originated from the use of opacities with solar-scaled composition instead of an arbitrary
composition, were recently estimated for solar-type stars \citep{saffe18,saffe19}.
If necessary, T$_{\rm eff}$ and $\log g$ were refined to achieve the balance of Fe I and Fe II lines.
In this way, abundances and parameters are consistently derived until reach the same input and output abundance values
\citep[for more details, see ][]{saffe21}.
Chemical abundances were derived for 24 different species.
The atomic line list and laboratory data used in this work are the same described in \citet{saffe21}.
In Figs. \ref{fig.region1} and \ref{fig.region3} we present an example of observed and synthetic spectra
(black and blue dotted lines, almost superimposed) together with the difference spectra (magenta)
for the stars in our sample. For clarity, Fig. \ref{fig.region1} corresponds to stars with the higher $v\sin i$ values
($>$ 91 km s$^{-1}$), while Fig. \ref{fig.region3} corresponds to stars with the lower $v\sin i$ values ($<$ 17.9 km s$^{-1}$).
The stars are sorted in these plots by increasing $v\sin i$.
There is a good agreement between modeling and observations for the lines of different chemical species.
To determine the uncertainty in the abundance values, we considered different sources.
The total error e$_{tot}$ was derived as the quadratic sum of the line-to-line dispersion e$_{1}$
(estimated as $\sigma/\sqrt{n}$ , where $\sigma$ is the standard deviation),
and the error in the abundances (e$_{2}$, e$_{3}$ and e$_{4}$) when varying T$_{\rm eff}$, $\log g$
and v$_\mathrm{micro}$ by their corresponding uncertainties\footnote{We adopt a minimum of 0.01 dex for the errors e$_{2}$, e$_{3}$ and e$_{4}$.}.
For chemical species with only one line, we adopt as $\sigma$ the standard deviation of iron lines.
The abundances, the total error e$_{tot}$ and the individual errors e$_{1}$ to e$_{4}$ are presented in Tables
B.1 to B.7 of the Appendix.
\begin{figure}
\centering
\includegraphics[width=8cm]{region1.eps}
\caption{Observed, synthetic, and difference spectra (black, blue dotted, and magenta lines)
for the stars in our sample, sorted by $v\sin i$.}
\label{fig.region1}%
\end{figure}
\begin{figure}
\centering
\includegraphics[width=8cm]{region3.eps}
\caption{Observed, synthetic, and difference spectra (black, blue dotted, and magenta lines)
for the stars in our sample, sorted by $v\sin i$.}
\label{fig.region3}%
\end{figure}
\subsection{NLTE effects}
Light element Non-Local Thermodynamic Equilibrium (NLTE)
abundances are particularly important for the case of $\lambda$ Boo stars.
For instance, \citet{paunzen99} derived for a sample of $\lambda$ Boo stars an average O I correction
of -0.5 dex, while for C I they estimated an average correction of -0.1 dex.
\citet{rentzsch96} calculated carbon NLTE abundance corrections by using a multilevel
model atom for stars with T$_\mathrm{eff}$ between 7000 K and 12000 K, log g between
3.5 and 4.5 dex, and metallicities from -0.5 dex to +1.0 dex.
She showed that C I NLTE effects are negative (calculated as NLTE-LTE) and
depend basically on equivalent width W$_{eq}$.
Near $\sim$7000 K, the three lower levels of C I are always in LTE; however, by increasing
the T$_\mathrm{eff}$ increase the underpopulation of these levels respect to LTE
by UV photoionization.
Then, we estimated NLTE abundance corrections of C I for the early-type stars in our sample
by interpolating in their Figs. 7 and 8 as a function of T$_\mathrm{eff}$, W$_{eq}$
and metallicity.
\citet{sitnova13} performed NLTE abundance corrections for O I for stars with spectral
types from A to K (T$_\mathrm{eff}$ between 10000 and 5000 K).
They showed that NLTE effects lead to an strengthening of O I lines,
producing a negative NLTE correction.
We estimated NLTE abundance corrections of O I (IR triplet 7771 \AA\ and 6158 \AA)
for the stars in this work, by interpolating in the Table 11 of \citet{sitnova13} as
a function of T$_\mathrm{eff}$. Other O I lines present corrections lower than $\sim$-0.02 dex
\citep[see, e.g., Table 5 of ][]{sitnova13}.
\subsection{Comparison with literature}
We present in Fig. \ref{fig.metal.liter} a comparison of [Fe/H] values
derived in this work, with those taken from literature for the stars
HD 15164 \citep{andrievsky95}, HD 15164 \citep{paunzen02},
HD 193256, HD 193281, HD 198160 and HD 198161 \citep{sturenburg93}.
In general, there is a reasonable agreement with literature,
where the star HD 193281 present the larger difference (marked in the plot).
\citet{sturenburg93} estimated for HD 193281 an iron abundance of [Fe/H]=-1.0$\pm$0.2.
However, we estimated for this star a somewhat higher value of [FeI/H]=-0.36$\pm$0.13 ([FeII/H]=-0.48$\pm$0.13).
We explored the possible sources for this difference.
They estimated a T$_\mathrm{eff}$ of 8080 K (without quoting uncertainties) by using the Str\"omgren photometry, while
we estimated for this object a T$_\mathrm{eff}$ of 8700$\pm$140K, having a difference of 620 K.
This could be one of the reasons for the different [Fe/H] that we obtained.
Different works estimated for this star temperatures of 8700 K \citep{gray17},
8623 K \citep{koleva12}, and recently 8695 K \citep{arentsen19}.
Then, our estimated T$_\mathrm{eff}$ is more in agreement with these works.
We also note that this star presents different metallicities in literature:
-1.0$\pm$0.2 dex \citep{sturenburg93}, -0.68 dex \citep{koleva12} and more recently -0.37 dex \citep{arentsen19}.
Our estimated metallicity of [FeI/H]=-0.36$\pm$0.13 is closer to the work of \citet{arentsen19}.
In addition, there is evidence that HD 193281 could be contaminated by a nearby star.
Simbad database reports that the star ADS 13702 B (= TYC 6918-1823-2) is located at $\sim$3.5 arcsec
from HD 193281, having spectral type "F5:V".
\citet{ivanov19} present a library of stellar spectra taken with the integral field spectrograph
MUSE\footnote{https://www.eso.org/sci/facilities/develop/instruments/muse.html}
in low spectral resolution (R$\sim$2000) although with high spatial resolution (0.3-0.4 arcsec).
They report that HD 193281 is a binary with $\sim$3.8 arcsec separation and the
components cross-contaminate each other. They identified the components as HD 193281 A and B,
and estimated spectral types A2 III and K2 III, respectively (updating the spectral type
F5:V reported by Simbad for the star HD 193281 B).
This possible contamination could explain, at least in part, the different parameters and
metallicities obtained from different works for this object.
In this study, we estimated parameters and abundances of HD 193281 taken as single,
for which the resulting values should then be considered with caution.
\begin{figure}
\centering
\includegraphics[width=8.0cm]{metal1.eps}
\caption{Comparison of [Fe/H] values derived in this work with those from literature.
Average dispersion bars are showed in the upper left corner.}
\label{fig.metal.liter}
\end{figure}
\section{Discussion}
In order to test the accretion scenario of $\lambda$ Boo stars, we compare the chemical abundances
of the stars in our sample with those of $\lambda$ Boo stars.
The three multiple systems with candidate $\lambda$ Boo stars are discussed
separately, while other binary or multiple systems with candidate $\lambda$ Boo stars
are discussed in the Appendix.
\subsection{The average pattern of $\lambda$ Boo stars}
To derive an average $\lambda$ Boo pattern is not an easy task.
Few literature works obtain homogeneous abundances of many species for $\lambda$ Boo stars \citep[e.g., ][]{sturenburg93,andrievsky02,heiter02}.
\citet{sturenburg93} derived abundances for 16 A-type stars classified, in principle, as $\lambda$ Boo stars.
They performed NLTE corrections for some elements including C.
However, they included stars that were subsequently considered non-members or uncertain members, such as HD 38545
and HD 193281 \citep{murphy15}.
\citet{paunzen99} and \citet{kamp01} derived light-element NLTE abundances for a sample of $\lambda$ Boo stars.
Then, \citet{andrievsky02} derived elemental abundances for 20 candidate $\lambda$ Boo stars basically selected from
classification-resolution spectroscopy. They performed primarily a LTE approach and included NLTE effects for Na.
They were able to confirm the membership of only nine objects to the $\lambda$ Boo class,
while other stars were ruled out or present an unclear membership.
\citet{paunzen02} collected abundance values for 26 candidate $\lambda$ Boo stars (see their Table 5),
although using different literature sources.
Also, \citet{heiter02} reported LTE abundance values for 12 candidate $\lambda$ Boo stars, four of them belonging
to SB systems.
Then, it would be highly desirable a homogeneous abundance determination including more candidate
$\lambda$ Boo stars, newer laboratory data for the lines and including NLTE effects especially for the light-elements.
In order to test the accretion scenario of $\lambda$ Boo stars, we compare the chemical abundances
of the stars in our sample with those of $\lambda$ Boo stars.
In this work, we used the data derived by \citet{heiter02}, who homogeneously determined
abundances for a number of $\lambda$ Boo stars.
We excluded from the average those stars without CNO values and the stars analyzed here.
\subsection{The triple system HD 15164/65/65C}
This remarkable triple system is composed by two early-type stars (HD 15165 and HD 15164, the stars A and B)
and a late-type companion (HD 15165C).
A number of studies suggest that the spectrum of HD 15165 resembles that of metal-deficient star,
but the companion HD 15164 has a near solar abundance \citep{mechler74,mechler76,abt80}.
Then, as explained in the Introduction, some works suggest that the A star belong to the $\lambda$ Boo class \citep{andrievsky95,cherny98},
while the B star seems to display a solar composition \citep{andrievsky95}.
To our knowledge, there is no abundance determination for the C component.
We present in Fig. \ref{fig.pattern.HD15165} the chemical pattern of the stars
HD 15164, HD 15165 and HD 15165C (black), compared to an average pattern of
$\lambda$ Boo stars (blue). For each star we present two panels,
corresponding to elements with atomic number z$<$32 and z$>$32.
The error bars of the $\lambda$ Boo pattern show the standard deviation derived from different stars,
while the error bars for our stars correspond to the total error e$_{tot}$.
As we can see in the Fig. \ref{fig.pattern.HD15165} the chemical pattern of the primary (HD 15165)
is similar to the pattern of $\lambda$ Boo stars, showing subsolar abundances of most metals
(Mg, Al, Ca, Sc, Ti, Cr, Fe) together with near solar values of C and O.
The abundances of Sr and Ba present a less marked deficiency, although still showing subsolar values.
On the other hand, the chemical pattern of the secondary star (HD 15164) shows a slight deficiency
in some metals (for instance [Fe/H]=-0.36$\pm$0.15 dex), although closer in general to the solar pattern
than to the $\lambda$ Boo stars. In this sense, a primary showing a $\lambda$ Boo pattern
and a secondary showing near solar abundances, verify the early result of \citet{andrievsky95}:
the early-type stars A and B present different chemical compositions.
\begin{figure}
\centering
\includegraphics[width=8.0cm]{pat.HD15164.eps}
\includegraphics[width=8.0cm]{pat.HD15165.eps}
\includegraphics[width=8.0cm]{pat.HD15165C.eps}
\caption{Chemical pattern of the stars HD 15164, HD 15165 and HD 15165C (black),
compared to an average pattern of $\lambda$ Boo stars (blue).}
\label{fig.pattern.HD15165}
\end{figure}
To our knowledge, there is no abundance determination of $\lambda$ Boo stars that belong
to a triple or multiple system. In particular, a late-type star that belong to such system,
could be used as a proxy of the initial composition of the material where the $\lambda$ Boo star formed
(under the hypothesis that they born from the same molecular cloud).
This could be important as an additional constrain for any model trying to explain the $\lambda$ Boo phenomena.
We present in the Fig. \ref{fig.pattern.HD15165} the chemical pattern of HD 15165C,
the late-type component of the triple system.
The chemical pattern is compatible with a solar-like composition (for instance, [FeI/H]=0.04$\pm$0.02 dex).
This is in agreement with the idea that $\lambda$ Boo stars are Population I objects
and originate (following any internal or external mechanism) starting from a solar-like composition.
Notably, the three stars that belong to the triple system present different chemical patterns.
The star A present a $\lambda$ Boo pattern, while the stars B and C present abundances closer
to the Sun. However, the stars B and C are also slightly different between them:
the late-type star C present the closest abundances to the Sun, while the early-type star B
shows a slightly deficiency. Most abundance values between stars B and C agree within $\sim$0.30 dex,
with a possible exception: the lithium content.
The Li I 6707.8 \AA\ line is clearly present in the spectra of the star B (HD 15164) as we can see in the Fig. \ref{fig.HD15164.Li.6707},
while it is not detected in the spectra of stars A nor C.
It is interesting to note that this line is commonly used as a proxy of recent accretion onto the
atmosphere of the stars. For instance, \citet{saffe17} attributed a notable difference in the refractory
abundances and in the Li content between the stars of the binary system HAT-P-4
to a possible accretion event of a rocky planet onto the primary.
However, although HD 15164 shows clearly the Li line, its refractory content is slightly lower than
the star HD 15165C, which would be difficult to explain with the accretion of refractory species.
\begin{figure}
\centering
\includegraphics[width=8.0cm]{HD15164.Li.6707.eps}
\caption{Observed spectra (black line) and synthetic spectra (blue dotted line) near the Li line 6707.8 \AA\ in the star HD 15164.
Synthetic lines are indicated showing the wavelength, atomic number and intensity.}
\label{fig.HD15164.Li.6707}
\end{figure}
Is it possible that the supposed different abundances between stars A, B and C are only due to different T$_\mathrm{eff}$?
The question makes sense because the stars A and C present T$_\mathrm{eff}$ of 7150 K and 4960 K,
a difference of 2190 K. However, the total error e$_{tot}$ in abundances includes the error e$_{2}$, which
measure the change in the abundances when varying T$_\mathrm{eff}$ by their corresponding uncertainty.
Then, we do not expect a strong change in the derived abundances due to T$_\mathrm{eff}$
(in any case, the possible change is contained within the total error e$_{tot}$).
\subsection{The binary system HD 193256/281}
HD 193256 was classified as $\lambda$ Boo by \citet{gray88} and then as uncertain $\lambda$ Boo
by \citet{renson90}. It is separated by $\sim$27.5 arcsec from HD 193281,
which was classified as $\lambda$ Boo by \citet{gray-garrison87}.
Both stars HD 193256 and HD 193281 show approximately solar abundances of C and subsolar Fe
in the study of \citet{sturenburg93}, who analyzed them separately.
However, they also found near solar values for other elements such as Mg and Si in both stars,
which is different from what found in average $\lambda$ Boo stars.
\citet{kamp01} found solar values in HD 193281 for N, O and S, although for C they found -0.61 dex,
similarly to \citet{paunzen99}.
However, more recent classification spectra suggest that only HD 193256 could belong to the $\lambda$ Boo class
\citep[see Tables 1 and 4 of ][]{murphy15,gray17}, while HD 193281 display a normal spectra.
In this work, we analyzed the spectra of HD 193256 and HD 193281 considered both as single,
for which the abundances of HD 193281 should be taken with caution.
We present in Fig. \ref{fig.pattern.HD193256} the chemical pattern of the stars
HD 193256 and HD 193281 (black), compared to an average pattern of $\lambda$ Boo stars (blue).
The colors, panels and error bars used are similar to those of Fig. \ref{fig.pattern.HD15165}.
HD 193256 shows solar or suprasolar values for C and O, together with subsolar values (between 0.5-0.9 dex)
of Ca, Cr, Fe and Sr.
However, we also found near solar values of Mg, Si and Ti, which is not common in $\lambda$ Boo stars.
Then, this object seem to present a mix of metals with solar and subsolar abundances.
On the other hand, HD 193281 present the chemical pattern of a slightly metal-deficient star
in general, showing subsolar values for C and O ($\sim$0.3 dex) similar to Fe I (-0.36$\sim$0.13 dex).
However, the results of HD 193281 should be taken with caution, due to a possible contamination
of the nearby K2 III star.
\begin{figure}
\centering
\includegraphics[width=8.0cm]{pat.HD193256.eps}
\includegraphics[width=8.0cm]{pat.HD193281.eps}
\caption{Chemical pattern of the stars HD 193256 and HD 193281 (black),
compared to an average pattern of $\lambda$ Boo stars (blue).}
\label{fig.pattern.HD193256}
\end{figure}
In short, the solar abundances of some metals of HD 193256 (Mg, Si and Ti) are different of $\lambda$ Boo stars.
The chemical pattern of HD 193281 (considered as single) shows a slightly metal deficient star.
In addition, there is evidence for a possible contamination of HD 193281, where the components A and B
display spectral types A2 III and K2 III.
Then, current evidence does not support the presence of two bonafide $\lambda$ Boo stars in this binary (or triple) system.
It would be desirable an analysis of HD 193281 separately for the components A and B,
in order to more properly determine the individual abundances.
\subsection{The binary system HD 198160/161}
HD 198160 form a visual binary system with HD 198161, separated by $\sim$2.4 arcsec.
HD 198160 was classified "A2 Vann wk4481" and "A2 Vn" \citep{gray88,corbally-garrison80},
while HD 198161 was classified as "A3 Vn" \citep{corbally-garrison80}.
Both stars were studied separately by \citet{sturenburg93} considering them as twins (same T$_\mathrm{eff}$ and log g).
He derived near solar values for C in both stars and subsolar values for
Fe (-0.8$\pm$0.2 dex), however he also obtained solar values for Mg and Si (0.0$\pm$0.1 dex
and -0.2$\pm$0.2 dex for both stars).
Then, \citet{paunzen99} estimated near solar NLTE values for C and O, although quoted
for HD 198160/1 (not separated). More recently, \citet{murphy15} caution that individual NLTE
volatile abundances for HD 198160 and HD 198161 are not confirmed (such as those reported in this work)
and tentatively adopt for HD 198160 a classification "A2 Vann $\lambda$ Boo". However, its companion HD 198161
was classified as a normal star, with spectral type "A3 V" and "A3 IV(n)" \citep{murphy15,gray17}.
We present in Fig. \ref{fig.pattern.HD198160} the chemical pattern of the stars
HD 198160 and HD 198161 (black), compared to an average pattern of $\lambda$ Boo stars (blue).
The colors, panels and error bars used are similar to those of Fig. \ref{fig.pattern.HD15165}.
In both stars, most Fe-peak metals show a deficiency around 0.7-0.8 dex, similar to $\lambda$ Boo stars.
However, C and O also show subsolar values, being possibly low compared to other $\lambda$ Boo stars.
When comparing C with Fe abundances, the group of $\lambda$ Boo stars present [C/Fe]$\sim$1.21$\pm$0.35 dex \citep[excluding stars without CNO values
and the stars analyzed here, ][]{heiter02} with minimum and maximum values of 0.70 and 1.74 dex.
However, the stars HD 198160 and HD 198161 present [C/Fe] values of $\sim$0.54 and $\sim$0.48 dex,
being low compared to the average [C/Fe] and even lower than the minimum of 0.70 dex.
Then, we consider that these low [C/Fe] values possibly correspond to mild-$\lambda$ Boo stars,
rather than to an average $\lambda$ Boo object.
It is important to note that our C and O abundances were corrected by NLTE,
with average corrections of -0.15 dex and -0.81 dex for both stars.
In other words, if we only adopt LTE values without correction,
the C and O abundances would result closer to those of $\lambda$ Boo stars.
\begin{figure}
\centering
\includegraphics[width=8.0cm]{pat.HD198160.eps}
\includegraphics[width=8.0cm]{pat.HD198161.eps}
\caption{Chemical pattern of the stars HD 198160 and HD 198161 (black),
compared to an average pattern of $\lambda$ Boo stars (blue).}
\label{fig.pattern.HD198160}
\end{figure}
\subsection{On the physical association of the stars}
The stars studied in this work were previously reported as (possible) members
of binary/multiple systems, for the case of
HD 15164/65/65C \citep{andrievsky95,cherny98,murphy15},
HD 193256/281 \citep{paunzen12a,murphy15,gray17} and
HD 198160/161 \citep{paunzen12a,murphy15}.
The coordinates, proper motions and parallax of the stars (see Table \ref{table.parallax})
suggest that they are, at least, common proper motion objects.
We searched our targets stars in different binary catalogs from literature \citep{shaya-olling11,tokovinin-lepine12,andrews17}.
In particular, \citet{andrews17} performed a search of binaries through a Bayesian formulation
in the Tycho-Gaia catalogs and derived likelihoods of Keplerian orbits.
For HD 15164/65, they reported a probability greater than 99\% that they form
a physical system.
\citet{shaya-olling11} developed a Bayesian method to discover non-random pairs using Hipparcos data.
They include HD 198160/161 in their catalog of binaries, however there is no
probability quoted for this pair.
Finally, we find no record for HD 193256/181 in these binary catalogs.
In this work, we assume that the stars form physical binary/multiple systems.
In the case of showing that the stars are not gravitationally bound,
then these stars would not be useful to test the accretion scenario.
\subsection{Are there two bonafide $\lambda$ Boo stars in binary systems?}
There is evidence in the literature supporting the accretion scenario.
For example, the anticorrelation of C and O with Si \citep{paunzen99}, first noted by \citet{hs93} for C.
It is expected that refractory elements like Fe and Si are condensed in dust, while the more volatile CNO and S
remain in the gaseous phase. Then, the selective accretion of gas will produce ratios [C/Si] or [O/Si]
larger than solar and reduced metallicity \citep{paunzen99}.
\citet{kamp01} reached a similar conclusion comparing the volatile species N and S with the more refractory Ca.
We should also expect that in stars with large $v\sin i$, the meridional circulation mixes material of
solar composition from the stellar interior into the convection zone so that any surface contamination due to
accretion of circumstellar material should vanish. This observation seems to be weakly verified \citep[see e.g., ][]{solano01},
and would require a larger sample of $\lambda$ Boo stars.
As we can see, the accretion scenario could be tested by different methods.
In this work, we focus on the presence of $\lambda$ Boo stars as members of binary systems
\citep[e.g., ][]{sturenburg93,paunzen02,heiter02,paunzen12a,paunzen12b}.
These are the following 12 systems (see Appendix):
HD 15164/65/65C, HD 38545, HD 64491, HD 84948, HD 111786, HD 141851, HD 148628/638,
HD 171948, HD 174005, HD 193256/281, HD 198160/161, and HD 210111.
Following the accretion scenario, two early-type stars in a binary system should display, in principle,
a similar $\lambda$ Boo pattern after passing through a diffuse cloud.
However, a binary or multiple system having a $\lambda$ Boo star together with a "normal" early-type component
would be difficult to explain under the accretion scenario.
This test of the accretion scenario would require a detailed analysis of both stars.
As explained in the Introduction, some stars that belong to these 12 systems were recently classified
as non-members or uncertain members of the $\lambda$ Boo class, such as HD 141851, HD 148638 and HD 193256
\citep[see, e.g., ][]{murphy15,gray17}.
Then, we wonder if any of these 12 systems really include two stars with bonafide $\lambda$ Boo chemical patterns.
It would be desirable a detailed abundance analysis in order to verify the true $\lambda$ Boo nature of a star,
initially suggested (for instance) by its classification spectra \citep[see, e.g., ][]{andrievsky02,heiter02}.
To our knowledge, only 5 out of the 12 systems present an abundance determination of both components:
HD 15164/65, HD 84948, HD 171948, HD 193256/281 and HD 198160/161 (three of them were analyzed in this work).
Some works present an abundance study only of the brighter component, such as in the case of HD 38545 \citep{sturenburg93}
or HD 64491 \citep{kamp01}, while other systems only have a spectral classification, such as HD 174005 \citep{gray17,murphy15}.
An inspection of the abundance values reported in the literature (see Appendix) shows that, in our opinion,
there is no binary system having two stars with bonafide $\lambda$ Boo chemical patterns.
The same is valid for the three systems analyzed in this work (HD 1564/65/65C, HD 193256/281 and HD 198160/161).
In fact, we cannot find even one binary system where the two stars present bonafide $\lambda$ Boo abundance patterns.
We consider that the closer candidates to show both stars a $\lambda$ Boo pattern are possibly the binary systems
HD 84948, HD 171948 and HD 198160. These three systems show [C/Fe] values lower than 0.7 dex (the minimum [C/Fe]
of $\lambda$ Boo stars, see Sect. 4.4 and Appendix),
being perhaps mild-$\lambda$ Boo systems rather than clear $\lambda$ Boo objects.
Then, we find no clear evidence for the presence of two $\lambda$ Boo stars as members of binary systems.
However, this fact (if confirmed) do not rule out the accretion scenario.
On the other hand, a challenge for the accretion scenario, would be the presence of a bonafide $\lambda$ Boo star
and a normal early-type object, together in the same multiple system.
By reviewing the 12 systems studied (including the stars of this work) we found only one candidate: the system HD 15164/65/65C
analyzed here. The star A present a $\lambda$ Boo pattern, while the stars B (early-type) and C (late-type) present
abundances closer to the Sun.
The different chemical composition between stars A and B was initially
attributed to a possible stellar capture \citep{andrievsky95}.
The probability of a binary capture depends on several factors, such as the number of stars per cubic parsec, the velocity
dispersion and the mass of the stars \citep[e.g.,][]{clarke-pringle91,boffin98}.
The capture is not a dominant formation process for solar-mass (coeval) binaries in dense
clusters \citep[e.g.,][]{clarke-pringle91,heller95,boffin98}.
To our knowledge, there is no known binary or triple system with an origin attributed to a capture.
On the other hand, there are multiple observations of young binaries embedded in dense cores \citep[e.g., ][]{sadavoy-stahler17},
and even an image of a triple protostar formed via disk fragmentation \citep{tobin16}.
Although the capture cannot be totally discarded, most observational evidence points toward the formation
of binary and multiple systems from a common molecular cloud.
Taking up the idea that the three stars are born together, it is difficult to explain the composition
of the stars of HD 15165 under the accretion scenario.
Then, there is an urgent need of additional binary and multiple systems to be analyzed through a detailed
abundance analysis, in order to test the accretion model of $\lambda$ Boo stars.
\section{Concluding remarks}
In the present work, we performed a detailed abundance determination of selected binary and multiple systems
with candidate $\lambda$ Boo stars, in order to test the accretion scenario.
Reviewing abundance values reported in the literature (see Appendix) shows that, in our opinion, there are no
binary system having two stars with bonafide $\lambda$ Boo chemical patterns. The same is valid for the three systems
analyzed in this work (HD 15164/65/65C, HD 193256/281 and HD 198160/161).
We consider that the closer candidates to show both stars a $\lambda$ Boo pattern are possibly the binary systems
HD 84948, HD 171948 and HD 198160. However, these three binary systems are perhaps mild-$\lambda$ Boo systems rather
than clear $\lambda$ Boo objects. Then, in our opinion, current evidence of binary/multiple systems does not give
strong support to the accretion scenario of $\lambda$ Boo stars.
On the other hand, a binary/multiple system formed by a $\lambda$ Boo star and an early-type "normal" object,
would be difficult to explain under the accretion scenario. We found one candidate: the remarkable triple system HD 15164/65/65C.
It is composed by two early-type stars (A and B) and a late-type companion (C).
In particular, the late-type component of the system could be used as a proxy for
the initial composition of the system, constraining formation models of $\lambda$ Boo stars.
We found a $\lambda$ Boo pattern for the A star (HD 15165), while the stars B and C present
abundances closer to the Sun. Then, there is an urgent need of additional binary and multiple systems
to be analyzed through a detailed abundance analysis, in order to test the accretion model of $\lambda$ Boo stars.
\begin{acknowledgements}
We thank the referee Dr. Christopher Corbally for constructive comments that improved the paper.
The authors thank Dr. R. Kurucz for making their codes available to us.
CS acknowledge financial support from FONCyT (Argentina) through grant PICT 2017-2294
and the National University of San Juan (Argentina) through grant CICITCA E1134.
IRAF is distributed by the National Optical Astronomical Observatories,
which is operated by the Association of Universities for Research in Astronomy, Inc., under a cooperative agreement
with the National Science Foundation.
Based on data acquired at Complejo Astron\'omico El Leoncito, operated under agreement between
the Consejo Nacional de Investigaciones Cient\'ificas y T\'ecnicas de la Rep\'ublica Argentina and
the National Universities of La Plata, C\'ordoba and San Juan.
\end{acknowledgements}
|
1,314,259,995,012 | arxiv | \section{Introduction}
The phenomenon of Bose-Einstein (BE) condensation is, probably, one of the most striking manifestation of collective quantum effects \cite{Isihara,Huang}.
Due to its great importance the phase transition (PT) of BE condensation in the ideal gas is discussed in all textbooks on statistical mechanics.
In the wast majority of these textbooks it is written that the BE condensation of ideal gas is the 3-rd order phase transition (see, for instance, \cite{Isihara}), although
in the famous book \cite{Huang} (see the section 12.3 for details) it is argued that the BE condensation is the 1-st order PT
between liquid and gas. The main question we answer here is what kind of PT is the BE condensation in the quantum system with the simplest interaction, namely with the hard-core repulsion?
In all textbooks it is written that the BE condensate is the group of particles with zero momentum. However, the question is what is it? Is it a liquid or a solid?
In what follows we demonstrate that the pressure of the
non-relativistic BE particles with the hard-core interaction taken in the Van der Waals (VdW) approximation can be identically reduced to the one of the simplified version of statistical multifragmentation model (sSMM) \cite{Bondorf} with a vanishing surface tension of the constituents (see below).
This exactly solvable model was formulated in \cite{SMM0} and solved exactly in \cite{KABsmm1, KABsmm2,LFT1,Reuter08}, while its new and more realistic generalization can be found in \cite{KABsmm3}.
Although the sSMM \cite{SMM0, LFT1, KABsmm1, KABsmm2, Reuter08, KABsmm3} lacks the Coulomb interaction between the nuclei and the asymmetry energy of nuclei, its exact analytical solution established both in the
thermodynamic limit \cite{KABsmm1, KABsmm2, KABsmm3} and for finite volumes \cite{LFT1, Reuter08} is able to qualitatively describe the main properties of the nuclear liquid-gas PT.
The mathematical similarity between the VdW EoS of BE hard spheres and the sSMM allows us to show that the high density phase of BE particles with hard-core repulsion is a classical macro-cluster
which, similarly, to the classical hard-spheres is
a solid state \cite{Simple_Liquids,Mulero} and not a liquid as it was argued in K. Huang book \cite{Huang}. In our analysis we also
analyzed the pressure of Fermi-Dirac (FD) particles with the hard-core repulsion which in many respects is similar to the one of sSMM, although it does not have the 1-st order PT.
This analysis allows us to find out some new relations between the
pressures of BE and FD particles with the hard-core repulsion, which help us to
demonstrate that the macro-cluster of BE particles is, indeed, a classical object.
The found relations allow us to clearly demonstrate under what conditions
the FD particles with the hard-core repulsion can have the first order phase transition.
The work is organized as follows. In Sect. II we analyze the pressure of BE and FD particles with the hard-core repulsion in the VdW approximation in a form convenient for the grand canonical ensemble. Sect. III is devoted to discussion of the properties of the macro-cluster with the help
of the BE-FD decomposition identities which identically represent the pressure of FD particles in terms of two BE pressures. Our conclusions are given in Sect. IV.
\section{BE condensation as the 1-st order Phase Transition}
The equation of state (EoS) of hard-spheres with BE or FD statistics in the grand canonical ensemble variables under the Van der Waals approximation for the hard-core repulsion can
be obtained either analyzing the free energy of the Van der Waals gas in canonical ensemble \cite{Qvdw1,Qvdw2}
or more rigorously from the quantum partition function in the grand canonical ensemble \cite{GenISCT2019}.
In the grand canonical variables it has the form
\begin{eqnarray}\label{Eq1}
&&\hspace*{-3.5mm}p_\pm = p^{id}_\pm (T, \nu) \equiv \pm T g \hspace*{-1.5mm} \int \hspace*{-1.5mm} \frac{d^3 k }{(2 \pi \hbar)^3} \ln \left[1 \pm \exp\left[ \frac{\nu - e(k)}{T} \right] \right] , ~ \rm where\quad \nu \equiv \mu - 4 V_0\, p_\pm ,\quad
\end{eqnarray}
where the lower sign is for the BE statistics, while the upper sign is for the FD one.
Here $T$ is temperature of the system, $\mu$ is its chemical potential, $\nu$ is an effective chemical potential, $g$ is the number of spin-isospin states (degeneracy factor), $m$ is the mass of particle,
$V_0 = \frac{4}{3} \pi R^3$ is the ``eigen volume`` of particle, and $R$ is the half of the minimal interaction range of the hard-core potential
$U(r)$
of a one component system (with a single hard-core radius)
\begin{eqnarray}\label{Eq2}
U(r) = \left\{ \begin{array}{lc}
0, & |r| > 2 R ,\\
\infty, & |r| \le 2 R . \\
\end{array} \right.
\end{eqnarray}
The potential $U(r)$ acts in a simplest possible way: (i)
if two particles 1 and 2, for definiteness, do not interact, i.e. the distance between them $|r| > 2 R$ is larger, than two hard-core radii $R$, then $U(r)=0$ and, therefore, their total energy is the sum of their single-particle (kinetic) energies $e_1$ and $e_2$; (ii) if these two particles interact, then $|r| = 2 R$ and $U(|r| =2R) = \infty$, but such configurations
do not contribute to partition (and all thermodynamic functions), since they
are suppressed by
the statistical operator $\exp\left[- \frac{\hat H_{hc}}{T} \right]$ due to an infinite potential energy (here $\hat H_{hc}$ denotes the Hamiltonian of the system). As a result, the total energy of the particles with the hard-core repulsion equals to the sum of their single-particle (kinetic) energies and this allows one to find the pressure (\ref{Eq1}) directly from the quantum partition function. In other words, the particles with the hard-core interaction behave as an ideal quantum gas.
This is a important property of this EoS which leads to a well-known practical consequence, namely that the energy per particle coincides with the one of the ideal gas. Due to this property the sophisticated equations of state with the hard-core repulsion, known as the hadron resonance gas model, are very successfully used to describe the multiplicities of hadrons \cite{IST2018,IST2018b,KABugaev19} and light (anti-, hyper)nuclei \cite{IST2020a,IST2020b} which are measured in the high energy nuclear collisions and to get a reliable thermodynamic information about next to the last stage of such collisions.
For further analysis it is convenient to introduce the auxiliary functions
\begin{eqnarray}\label{Eq3}
&&\hspace*{-7.7mm} {\cal F}_{\pm} (p) \equiv T \sum\limits_{l=1}^{K_{max}} \frac{(\mp1)^{(l+1)}}{l} n^{id}_0 \left[\frac{T}{l}, \nu (p) \right], ~~ \Rightarrow \quad \rm Eq. \, (1) ~becomes \quad p_\pm = {\cal F}_\pm (p_\pm) \, , \\
&&\hspace*{-7.7mm} n^{id}_0 \left[\frac{T}{l}, \nu \right] = \int \hspace*{-1.2mm}\frac{g\, d^3 k }{(2 \pi \hbar)^3} e^{\frac{l \left[ \nu -\sqrt{m^2+\vec k^2} \right] }{T} } \simeq
\label{Eq4}
\int \hspace*{-1.2mm}\frac{g\, d^3 k }{(2 \pi \hbar)^3} e^{\frac{l \left[ \nu - m - \frac{k^2}{2 m } \right] }{T} } = g \left[ \frac{m \, T}{ 2 \pi\, l \,\hbar^2} \right]^\frac{3}{2} e^{\frac{l \left[ \nu - m\right] }{T} } ,~
\end{eqnarray}
where the particle number density of Boltzmann point-like particles with temperature $T$ and chemical potential $\nu $ is denoted as $n^{id}_0 \left[T, \nu \right] $ and the upper limit of sum in Eq. (\ref{Eq3}) is $K_{max} \rightarrow \infty$. To avoid the unnecessary complexity in our derivations through out this work we regard the limit $K_{max}= 2K +1 \rightarrow \infty$ strictly in this sense. For the BE statistics (sign $-$ in Eqs. (\ref{Eq3}) and (\ref{Eq4})) it is not important, but it is very important for the case of FD statistics (sign $+$ in Eqs. (\ref{Eq3}) and (\ref{Eq4})) .
The function ${\cal F}_{\pm}$ in (\ref{Eq3}) is, apparently, obtained by expanding the $\ln$-function in Eq. (\ref{Eq1}).
For large values of $l \gg 1$ the inequality $l m \gg T$ is valid for any non-vanishing mass $m$ and, therefore, in this case one can use the non-relativistic approximation in the left hand side momentum integral in Eq. (\ref{Eq4}) and get the right hand side expression (\ref{Eq4}). However, for convenience we will use
such an approximation for any $l \ge 1$, assuming that considered temperatures are very low compared to the particle mass,
i.e. $m \gg T$. Moreover, in what follows we will always use the non-relativistic approximation for particle energy, unless it is specified explicitly.
To make a direct comparison with the sSMM \cite{SMM0,KABsmm1, KABsmm2, LFT1, Reuter08}
we explicitly write Eq. (\ref{Eq1}) for the BE statistics $(a=-1)$
\begin{eqnarray}\label{Eq5}
\hspace*{-5.5mm} p_- &=& T \, g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\sum\limits_{k=1}^{K_{max}} \frac{ (-a)^{(k+1)} }{ k^\frac{5}{2} }
\exp \left[ \frac{k (\mu - m - 4 V_0\, p_- ) }{T} \right] , ~
\end{eqnarray}
using Eq. (\ref{Eq3}) and the right hand side Eq. (\ref{Eq4}).
Comparing Eq. (\ref{Eq5}) with Eq. (15) from Ref. \cite{KABsmm1}, one can see that the pressure of BE hard spheres
is mathematically absolutely equivalent to the sSMM with the ``volume`` $4 k V_0$ of $k$-nucleon nuclei,
with the vanishing surface tension of all nuclei and with the Fisher exponent $\tau_F = \frac{5}{2}$ (or for the index $\tau \equiv \tau_F + \frac{3}{2} = 4$ in terms of Refs. \cite{KABsmm1,KABsmm2}).
Due to the mathematical similarly to the sSMM, using the exact solution of sSMM \cite{KABsmm1, KABsmm2, Reuter08} one can immediately conclude that Eq. (\ref{Eq5}) describes two phases:
the gaseous phase $p_g = p_- (T, )$ for the low densities defined by the inequality $\mu < \mu_c(T) $, and high density
phase pressure $p_s = \frac{(\mu-m)}{4 V_0 }$ for $\mu > \mu_c(T) $.
According to the Gibbs criterion the PT occurs, if the pressures of two phases are equal, i.e.
$p_g (T, \mu_c) = p_s (T, \mu_c)$. This equation defines the phase equilibrium curve $\mu = \mu_c(T) $ of the 1-st order PT.
At the PT curve $\mu = \mu_c(T) $ the effective chemical potential becomes
\begin{eqnarray}\label{Eq6}
\hspace*{-5.5mm} \nu_c &=& \mu_c - 4 V_0 \, p_g (T, \mu_c) = \mu_c - 4 V_0 \, p_s (T, \mu_c) \equiv m \,.~
\end{eqnarray}
Using this result one can identically rewrite the pressure at PT curve as
\begin{eqnarray}\label{Eq7}
\hspace*{-5.5mm} p_{c-} &=& T \, g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2} \sum\limits_{k=1}^{K_{max}} \frac{ 1 }{ k^\frac{5}{2} } \underbrace{=}_{K_{max} \rightarrow \infty}
\frac{ T \, g}{\Gamma\left[ \frac{5}{2}\right]}
\left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2} \int\limits_0^\infty \frac{t^\frac{3}{2}}{e^t-1} dt
, ~
%
\end{eqnarray}
where we used the integral representation of the Riemann $ \zeta\left[ \frac{5}{2}\right]$-function \cite{Prudnikov}. Here $\Gamma (n+1) = n!$ is the usual gamma-function. Taking $t = \frac{\omega}{T}$ in the integral in Eq. (\ref{Eq7}), one recovers the traditional representation of pressure as an integral over the particle energy $\omega$ \cite{Isihara}.
Although the critical pressure (\ref{Eq7}) coincides with the one obtained usually for the point-like particles \cite{Isihara},
the particle number density of gas $n_-$ is modified due to the presence of hard-core interaction. Using the particle number density
of the gas of point-like particles $n^{id}_{-} (T, \nu)$ one can write
\begin{eqnarray}\label{Eq8}
\hspace*{-5.5mm} n^{id}_{-} (T, \nu) &\equiv & \frac{\partial p^{id}_{-} (T, \nu)}{\partial \nu} = g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2} \sum\limits_{k=1}^{K_{max}} \frac{ 1 }{ k^\frac{3}{2} }
\exp \left[ \frac{k (\nu-m) }{T} \right] , ~ \\
%
\label{Eq9}
%
\hspace*{-5.5mm} n_{-} (T, \nu) &\equiv & \frac{\partial p^{id}_{-} (T, \nu)}{\partial \mu} = \frac{n^{id}_{-} (T, \nu)}{1+ 4V_0 n^{id}_{-} (T, \nu)} .
%
\end{eqnarray}
From Eq. (\ref{Eq9}) one can see that at the PT curve the particle number density of the gas is smaller than the
particle number density of the dense phase, since
\begin{eqnarray}\label{Eq10}
\hspace*{-5.5mm} n_{-} (T, \nu_c) & = & \frac{n^{id}_{-} (T, \nu_c)}{1+ 4V_0 n^{id}_{-} (T, \nu_c)} < n_s \equiv \frac{\partial p_s}{\partial \mu} = \frac{1}{4 V_0}~ ,
%
\end{eqnarray}
and, hence, for any finite temperature $T$ the particle number density of point-like particles $n^{id}_{-} (T, \nu_c)$ is finite too. Therefore, the particle number density of gaseous phase is
smaller than the one of the high density phase as indicated by the inequality (\ref{Eq10}).
As a result, the BE PT is of the 1-st order.
Substituting into Eqs. (\ref{Eq8}) and (\ref{Eq9}) the value $\nu=\nu_c$ one can get the temperature of BE condensation as
\begin{eqnarray}\label{Eq11}
\hspace*{-5.5mm} T^{BE}_c & =&\frac{2 \pi \hbar^2}{m} \left[\frac{1}{g \zeta\left[ \frac{3}{2}\right]}\cdot \frac{n_-}{1- 4 V_0 n_-} \right]^\frac{2}{3} . ~
%
%
\end{eqnarray}
Note that for large values of the excluded volume $V_0$ and high particle number densities $n\rightarrow \frac{1}{4 V_0}$
the hard-core repulsion may essentially increase
the value of the PT temperature and make it more realistic compared to the traditional estimate
obtained for the point-like particles \cite{Isihara}, i.e. if one takes the limit $V_0 \rightarrow 0$ on the right hand side of Eq. (\ref{Eq10}).
It is necessary to stress that the above results are generic in a sense that one can
consider the effective values of degeneracy factor $g \rightarrow g^{eff}$ and the one of
excluded volume $4 V_0 \rightarrow V_0^{eff}$ which correspond to a more realistic EoS than the VdW EoS and which is able to reproduce the pressure of quantum particles beyond the second virial coefficient approximation at least in some (even in a narrow)
range of thermodynamic parameters.
Since we are also interested in analyzing the case of FD particles, we would like to
obtain the above result using a different approach, namely without referring to the sSMM results of Refs. \cite{KABsmm1,KABsmm2,LFT1}.
First we consider the limit $\mu = \rightarrow \infty$ in Eq. (\ref{Eq5}) for very large, but finite values of $K_{max}$. Apparently, this limit should correspond to the dense phase of our EoS. Then in this limit $V_0 p_s /T \gg 1 $ and for $\mu > m + 4 V_0 p_s$ the leading terms of Eq. (\ref{Eq5}) for $a=-1$ can be written as
\begin{eqnarray}\label{Eq12}
&&\hspace*{-15.5mm} \ln \left[\frac{p_s K^\frac{5}{2}_{max}}{T \phi(T) }\right] \simeq
K_{max} \left[ \mu - ( m + 4 V_0 p_s)\right] . ~
\end{eqnarray}
Here the thermal density of the gas of classical hard spheres is denoted as $\phi(T) = g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}$. In deriving Eq. (\ref{Eq5}) we have chosen
the large values of chemical potential $\mu > \mu_c$, which are not allowed in the thermodynamic limit, but for finite systems they can be used \cite{LFT1,Reuter08}.
Now from Eq. (\ref{Eq12}) one can see that for $K_{max} \rightarrow \infty$ the logarithmic correction disappears and the pressure of dense phase $p_s = \frac{\mu -m}{4 V_0} $ acquires a familiar form.
In order to show that the EoS (\ref{Eq5}) for $a=-1$ has the 1-st order PT we examine the derivative $D^1p_- \equiv T \frac{\partial p_-}{\partial \rho^{id}_-}$. Hereafter to avoid a confusion we will distinguish the particle number density of point-like particles as the function given
by the right hand side of Eq. (\ref{Eq8}) and the same quantity as the independent variable $\rho^{id}_-$. The derivative
$D^1p_- $ is more convenient to employ for the spinodal instability point of the gas than the derivative $\frac{\partial p_-}{\partial n_-}$, since its expression is simpler. Note that vanishing of the spinodal instability point of the gas taken at the given isotherm signals about the 1-st order PT \cite{Isihara}. Indeed, the expression for $D^1p_- $
\begin{eqnarray}\label{Eq13}
& D^1p_- \equiv T \frac{\partial p_-}{\partial \nu} \frac{\partial \nu}{\partial \rho^{id}_-} = \left[ \sum\limits_{k=1}^{K_{max}} \frac{ 1 }{ k^\frac{3}{2} }
\exp \left[ \frac{k (\nu - m ) }{T} \right] \right] \left[ \sum\limits_{k=1}^{K_{max}} \frac{ 1 }{ k^\frac{1}{2} }
\exp \left[ \frac{k (\nu - m ) }{T} \right] \right]^{-1} , ~~
\end{eqnarray}
shows that, if the effective chemical potential $\nu =\mu - 4 V_0\, p_- $ approaches the value $\nu=m$, then the derivative $\frac{\partial \rho^{id}_-}{\partial \nu} \equiv \frac{\partial n^{id}_-}{\partial \nu} \rightarrow \infty$ diverges for $K_{max} \rightarrow \infty$ and, hence, in this limit $ D^1p_- =0$. Thus, we have found that
the spinodal instability point of the gas of BE hard spheres coincides with the PT curve.
Now we turn to the analysis of the FD particles with the hard-core repulsion. For $\nu \le m $ the pressure of such particles $p_+ $ and its $\nu$-derivative can be explicitly written as
\begin{eqnarray}\label{Eq15}
\hspace*{-5.5mm} p_+ &=& T \, g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\sum\limits_{k=1}^{K_{max}} \frac{ (-1)^{(k+1)} }{ k^\frac{5}{2} }
\exp \left[ \frac{k (\nu - m ) }{T} \right] \Bigg|_{\nu=\mu - 4 V_0\, p_+} , ~\\
\label{Eq16}
\hspace*{-5.5mm} n^{id}_+ &\equiv & \frac{\partial p_+}{\partial \nu} = g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\sum\limits_{k=1}^{K_{max}} \frac{ (-1)^{(k+1)} }{ k^\frac{3}{2} }
\exp \left[ \frac{k (\nu - m) }{T} \right] \Bigg|_{\nu=\mu - 4 V_0\, p_+} .
\end{eqnarray}
A similarity with the sSMM can be more clearly seen for $\nu =\mu - 4 V_0\, p_+ \rightarrow m$, if in the sum (\ref{Eq15}) one adds the even terms to the preceding odd ones
\begin{eqnarray}\label{Eq16n}
\hspace*{-5.5mm} p_+ &=& T \, g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\left[
\sum\limits_{k \in odd }^{K_{max}-2} \frac{ \exp \left[ \frac{k (\nu - m ) }{T} \right] }{ k^\frac{5}{2} }
\left[ 1- \frac{k^\frac{5}{2}}{(k+1)k^\frac{5}{2}} \exp \left[ \frac{ (\nu - m ) }{T} \right]\right] + \frac{ \exp \left[ \frac{K_{max} (\nu - m ) }{T} \right] }{ K_{max}^\frac{5}{2} }
\right] \simeq ~\\
%
\hspace*{-5.5mm} & \simeq & T \, g \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\left[ \sum\limits_{k \in odd }^{K_{max}-2} \frac{ 5 }{2 \, k^\frac{7}{2} }
\exp \left[ \frac{k (\nu - m ) }{T} - \frac{5}{2 k} \right] + \frac{ \exp \left[ \frac{K_{max} (\nu - m ) }{T} \right] }{ K_{max}^\frac{5}{2} } \right], ~
\label{Eq17n}
\end{eqnarray}
where we expanded the binomial $(k+1)^\frac{5}{2}$ keeping two leading terms and approximated the ratio $k^\frac{5}{2}/(k+1)^\frac{5}{2} \simeq \exp[-\frac{5}{2k}]$. Evidently, this approximation is suited for $k \gg 1$, but for qualitative analysis it is very convenient, since in the vicinity of PT the main role is played by the largest cluster. Eq. (\ref{Eq17n}) shows that, apart from the term with $k= K_{max}$, in the left vicinity of the point
$\nu \rightarrow m - 0$ the EoS for FD particles with the hard-core repulsion is similar to the sSMM
for the clusters of the odd number of constituents which have
the Fisher exponent $\tau_F = \frac{7}{2}$ and a vanishing value of surface tension coefficient.
Apparently, from Eqs. (\ref{Eq15}) and (\ref{Eq17n}) one can also derive Eq. (\ref{Eq12}) and
establish the pressure of dense phase $p_s = \frac{\mu-m}{4 V_0}$ similarly to the case of BE particles. However, the
derivative
%
\begin{eqnarray}\label{Eq17}
\frac{\partial \rho^{id}_+}{\partial \nu} \equiv \frac{\partial^2 p_+}{\partial \nu^2} = \frac{g}{T} \left[ \frac{m \, T}{ 2 \pi \hbar^2} \right]^\frac{3}{2}
\sum\limits_{k=1}^{K_{max}} \frac{ (-1)^{(k+1)} }{ k^\frac{1}{2} }
\exp \left[ \frac{k (\nu - m) }{T} \right] ,
\end{eqnarray}
with respect to the effective chemical potential $\nu = \mu - 4 V_0\, p_+$
is finite for $\nu=m$, since, in contrast to the case of BE particles, the sum staying in Eq. (\ref{Eq17}) converges in the limit $K_{max}\rightarrow \infty$.
Indeed, with the help of integral representation of the Riemann $\zeta$-function \cite{Prudnikov} for $\nu=m$ one finds
\begin{eqnarray}\label{Eq18}
\hspace*{-5.5mm} \sum\limits_{k=1}^{\infty} \frac{ (-1)^{(k+1)} }{ k^\frac{1}{2} } = \frac{1}{\Gamma\left[ \frac{1}{2}\right]} \int\limits_0^\infty \frac{t^{-\frac{1}{2}} }{e^t+1} dt \simeq 0.6049
, ~
%
\end{eqnarray}
and, therefore, the derivative $D^1p_+ \equiv T \frac{\partial p_+}{\partial \rho^{id}_+}$ does not vanish for $\nu=m$ and, hence, there is no 1-st order PT in this case.
In our opinion this is a very simple and good example that the presence of a macro-cluster with
the finite probability in a finite system is a necessary, but not a sufficient condition of the 1-st order PT existence in such a system. We believe this is an important message to be taken into account by the authors of Refs. \cite{Francesca2014,DasGupta2018} who consider the presence and gradual disappearance of the macro-cluster as a signal of the 1-st order nuclear liquid-gas PT
in finite systems. The whole point is that in finite systems the macro-cluster of maximal size can appear as
the metastable state of finite probability not only for the 1-st order PT, but also for the 2-nd order PT or even for the cross-over \cite{LFT1,Reuter08}.
The present analysis once more shows one
that for vanishing surface tension coefficient the value of the Fisher exponent $\tau_F$ defines the PT order \cite{Reuter08}.
One can readily check that all the results on PT existence remain valid, if one uses the relativistic expression for particle energy, i.e. if one makes a replacement $m + \frac{k^2}{2 m} \rightarrow \sqrt{m^2 + k^2}$. However, in this case the BE condensation does not look mathematically identical
to the sSMM and, hence, the corresponding analysis is not made here.
\section{Decomposition identity between bosonic and fermionic pressures}
Apart from the formal difference between the EoS of the BE and FD particles we would like to
understand (i)
whether our interpretation of the appearance of classical macro-clusters is correct, and (ii) under what circumstances the appearance of macro-cluster can be associated with the 1-st order PT in the system of FD particles.
Indeed, an absence of the 1-st order PT in the EoS of FD particles with the hard-core repulsion may
question the validity of our hypothesis about the classical macro-clusters existence and, therefore, one may think that BE condensation leads to an appearance of quantum macro-cluster with BE statistics, while
the quantum macro-cluster with FD statistics cannot be formed due to some reason, namely due to the Pauli blocking principle.
To demonstrate the validity of our hypothesis we consider a peculiar mathematical identity between the BE and FD pressures which we call {\it a BE-FD decomposition identity}
\begin{eqnarray}\label{EqB11}
&&\underbrace{- T g \hspace*{-1.5mm} \int \hspace*{-1.5mm} \frac{d^3 k }{(2 \pi \hbar)^3} \ln \left[{\textstyle 1 - \exp\left[ \frac{\nu - \sqrt{m^2 +k^2}}{T} \right] }\right] }_{p_B\left(\frac{\nu}{T}, m, g\right)}\equiv
\nonumber \\
&& \equiv T g \hspace*{-1.5mm} \int \hspace*{-1.5mm} \frac{d^3 k }{(2 \pi \hbar)^3} \left\{
\underbrace{
\ln \left[{\textstyle 1 + \exp\left[ \frac{\nu - \sqrt{m^2 +k^2}}{T} \right] } \right]}_{p_F\left(\frac{\nu}{T}, m, g\right)}
\underbrace{ -
\frac{1}{8} \ln \left[{\textstyle 1 - \exp\left[ \frac{2 \nu - \sqrt{4m^2 +k^2}}{T} \right] } \right] }_{p_B\left(\frac{2\nu}{T}, 2m, 2^{-3}g\right)} \right\} ,~
\end{eqnarray}
which will
help us to understand the appearance of a classical macro-cluster for BE and FD statistics. The fact that now we do not use the non-relativistic approximation to the particle energy is not important.
The BE-FD decomposition identity (\ref{EqB11}) can be obtained in the following sequence of steps: first we note that
\begin{eqnarray}\label{EqB12}
&& \hspace*{-3.5mm} {\textstyle \ln \left[ 1 - \exp\left[ \frac{2 \nu - \sqrt{4m^2 + 4k^2}}{T} \right] \right] \equiv
\ln \left[ 1 + \exp\left[ \frac{\nu - \sqrt{m^2 +k^2} }{T} \right] \right] + }
{\textstyle \ln \left[ 1 - \exp \left[ \frac{ \nu - \sqrt{m^2 +k^2}}{T} \right] \right] }. ~
\end{eqnarray}
Next one can integrate Eq. (\ref{EqB12}) over $d^3 k$ with the degeneracy factor $g$ and change a particle momentum on the left hand side of Eq. (\ref{EqB12}) as $2 k \rightarrow k$ and in the momentum integral to get a multiplier $\frac{1}{8}$. Finally, interchanging the positions of integrals for lighter and heavier bosons
one arrives at Eq. (\ref{EqB11}).
Eq. (\ref{EqB11}) shows one that for the given values of $T$ and $\nu$ the pressure of ideal gas of bosons (the upper line of Eq. (\ref{EqB11})) of mass $m$ and degeneracy $g$ can be identically decomposed
into the sum of two terms. The first pressure corresponds to the ideal gas of fermions with same mass and degeneracy (the first term on the right hand side of Eq. (\ref{EqB11})), while the second pressure describes the bosons with the double mass and double
charge (and the double excluded volume $V_0$, if $\nu = \mu - 4 V_0 p$ accounts for the effects of hard-core repulsion as above), but with the reduced degeneracy $\frac{g}{8}$. Then the heavy bosons may be interpreted as ``pairs`` of fermions.
Applying the BE-FD decomposition identity (\ref{EqB11}) $(n-1)$ times to the pressure $p_B\left(\frac{2\nu_B}{T}, 2m, 2^{-3}g\right)$ of ``pairs``,
one can identically extract the contribution of bosonic macro-cluster ($n \gg 1$) with the mass $2^n m$, the charge $2^n$ and the degeneracy
$2^{-3 n}g$ from the pressure of bosons of mass $m$, charge $1$ and degeneracy $g$ and get
the following useful relation
\begin{eqnarray}\label{EqB13}
p_B\left(\frac{\nu_B}{T}, m, g\right) \equiv & p_B\left(\frac{2^n\nu_B}{T}, 2^n m, 2^{-3n}g\right) +
\sum\limits_{k=0}^{n-1} p_F \left(\frac{2^k\nu_B}{T}, 2^km, 2^{-3k}g \right) ,
\end{eqnarray}
where $ p_F \left(\frac{2^k\nu_B}{T}, 2^km, 2^{-3k}g \right)$ denotes the pressure of auxiliary fermions with the mass $2^k m$, the charge $2^k$ and degeneracy $2^{-3 k}g$. For low temperatures $T\ll m$ one can safely use the non-relativistic approximation
for the energy of particle.
Applying the identity (\ref{EqB13}) to the gas pressure of bosons $p_-$ of the EoS considered in the preceding section, i.e. for $\nu_B \le \nu_c$, one can immediately conclude that for $\nu_B < m$ the effective chemical potential of the bosonic macro-cluster on the right hand side of Eq. (\ref{EqB13}) is $(\nu_B -m) 2^n \rightarrow - \infty$ for $n\gg 1$ and, hence, such a macro-cluster does not exist for $\nu_B < m$. It is evident, that the bosonic macro-cluster on the right hand side of Eq. (\ref{EqB13}) does not exist for $\nu_B = m$ as well, since for $n\gg 1$ its degeneracy
$2^{-3n}g \rightarrow 0$ vanishes. Apparently, this argument is valid for the case $\nu_B < m$ as well.
Therefore, in the whole gaseous phase and at the condensation curve of the EoS of BE particles
with the hard-core repulsion considered in the preceding section the bosonic macro-cluster
is absent, i.e. for $\nu_B \le m$ one finds
\begin{eqnarray}\label{EqN23}
p_B\left(\frac{\nu_B}{T}, m, g\right) = & \sum\limits_{k=0}^{\infty} p_F \left(\frac{2^k\nu_B}{T}, 2^km, 2^{-3k}g \right) ,
\end{eqnarray}
that the pressure of BE particles can be identically written as an infinite sum of the pressures of FD particles with certain masses, charges and degeneracies.
In the preceding section it was shown that the pressure of FD particles with the hard-core repulsion does not have the 1-st order PT and, thus, in the thermodynamic limit there is no fermionic macro-cluster for each pressure staying on the right hand side of Eq. (\ref{EqN23}). However, the pressure of BE particles staying on the left hand side of Eq. (\ref{EqN23}) demonstrates the 1-st order PT of the BE condensation. Therefore, the only
possible explanation out of this apparent contradiction is that the BE condensation leads to an appearance of the classical macro-cluster which is the sum of individual classical macro-clusters generated by the set of fermionic pressures that are staying on the right hand side of Eq. (\ref{EqN23}).
Now it is appropriate to discuss the properties of the dense phase of BE hard spheres within the VdW approximation.
Since the pressure of dense phase $p_s = \frac{\mu-m}{4 V_0}$ does not depend on the temperature explicitly, then the entropy density of dense phase $s_s = \frac{\partial p_s}{\partial T} =0 $ is zero at any temperature, while the particle number density of this phase is $n_s = \frac{\partial p_s}{\partial \mu} = \frac{1}{4 \, V_0}$. Furthermore, from the thermodynamic identity
\begin{eqnarray}\label{Eqn24}
\varepsilon_s = Ts_s + \mu n_s - p_s = \frac{m}{4 \, V_0}
\end{eqnarray}
one can see that the energy density $\varepsilon_s$ of the dense phase, indeed, corresponds to the particles at rest which have the highest possible density within the adopted approximation.
Therefore, similarly to the case of classical hard spheres it is more appropriate to call this phase as the solid state \cite{Simple_Liquids,Mulero} (since there is not attraction among the particles and the surface tension coefficient is zero).
Furthermore, it seems it is more appropriate to consider the BE condensation of particles with the hard-core repulsion as the deposition PT from a gas to a solid.
Of course, one has to remember that, on the other hand, it is a condensate of hard spheres with a vanishing momentum.
A mathematical similarity with the exact solution of sSMM allows one to reliably interpret the BE condensation of hard spheres as the 1-st order PT in which the gas condenses into a classical macro-cluster of the size $4V_0 K_{max}$ with $K_{max}\rightarrow \infty$ in the thermodynamic limit.
Hence, at the PT curve there should exist the phase boundary. As it was shown above, formally, a macro-cluster corresponds to the term $k= K_{max}$
in the expression for pressure $p_-$ in Eq. (\ref{Eq5}). Therefore, formally a macro-cluster can be considered as a single classical particle which is at rest. From Eq. (\ref{Eq5}) one can see that its
statistical weight is the Boltzmann one. Such an interpretation is similar to the sSMM \cite{KABsmm1,KABsmm2} with the difference that in the sSMM
a macro-cluster is a droplet of liquid which has the non-vanishing surface tension below the critical temperature \cite{Bondorf,KABsmm1,KABsmm2,KABsmm3} and a finite entropy which vanishes at $T=0$ only.
Moreover, considering the EoS (\ref{Eq5}) with the effective values of $g \rightarrow g^{eff}$ and $4 V_0 \rightarrow V_0^{eff}$
which allow one at least in the narrow range of thermodynamic parameters to reproduce the realistic EoS of quantum particles at high densities close $0.45/V_0-0.55/V_0$
and sufficiently high temperature $T$ for which the effects of quantum statistics are not important, one should still have the BE condensation PT on the one hand. On the other hand,
this should be the region of the deposition phase transitions for the classical hard spheres \cite{Simple_Liquids, Mulero}. Thus, we again should conclude that at high temperature $T$ the BE condensation of quantum hard spheres should match with the deposition PT of
the classical hard spheres.
Coming back to the ideal gas of BE particles one should consider the limit $V_0 \rightarrow 0$ in all formulas above. In this limit the high density state has infinite particle number density and, hence, it is inaccessible. However, for any infinitesimally small eigenvolume $V_0$ our conclusions
about the deposition PT remain valid and, therefore, the whole argumentation of K. Huang
in Ref. \cite{Huang} about the BE condensation as the 1-st order PT is correct. Only the K. Huang
interpretation of this PT as a gas-liquid one seems to be inconsistent with the modern interpretation
of the PT of hard spheres.
At the moment it is not clear, if it is just a coincidence that at low pressures the real gases of mono- and diatomic molecules, except for the helium-4 for pressures below 25 atm., indeed, demonstrate the deposition PT under cooling. Maybe a more realistic EoS of quantum particles can resolve this problem.
It is remarkable that the BE-FD decomposition identity (\ref{EqB11}) allows one to establish another important interpretation. The right hand side of the identity (\ref{EqB11}) corresponds to pressure of a mixture of the
ideal gases of fermions and their pairs (which are the bosons) with the same degeneracy, but with the double mass and double charge, which are taken with the wight $1/8$. The left hand side of the identity (\ref{EqB11}) shows that such a mixture should experience the 1-st order PT of BE condensation. From the famous work of L. N. Cooper \cite{Cooper} it is known that the pairing
of fermions can, indeed, happen under not very restrictive conditions leading to the BE condensation of fermionic pairs
and the BE-FD decomposition identity (\ref{EqB11}) illustrates such a possibility for a mixture
discussed above. However, for the appearing of Cooper pairs the fermions must have an attraction, which is absent in the
EoS discussed here.
It is evident that the identity (\ref{EqB12}) is valid for any dimension $D = 1, 2, 3, ...$. Introducing the pressures of BE particles (sign $-$) and FD particles (sign $+$) of mass $m$ that have the chemical potential $\nu$ and temperature $T$
\begin{eqnarray}\label{EqN25}
&&\hspace*{-3.5mm} p_{D\pm} (T, \nu, m) \equiv \pm T g \hspace*{-1.5mm} \int \hspace*{-1.5mm} \frac{d^D k }{(2 \pi \hbar)^D} \ln \left[1 \pm \exp\left[ \frac{\nu - e_m(k)}{T} \right] \right] , ~
\rm where\quad e_m (k) \equiv \sqrt{m^2+ k^2} ,
\end{eqnarray}
one can generalize the BE-FD decomposition identity (\ref{EqB11}) to the dimension $D$
for the fractional mass and charge values
\begin{eqnarray}\label{EqN26}
p_{D-}\left( T, \nu, m \right) \equiv & 2^D p_{D-}\left( T, \frac{\nu}{2}, \frac{m}{2} \right) -
2^D p_{D+}\left( T, \frac{\nu}{2}, \frac{m}{2} \right) .
\end{eqnarray}
For chargeless and massless particles, i.e. for $\nu=0$ and $m=0$, the BE-FD decomposition identity (\ref{EqN26}) gives us the following relation
between the BE and FD momentum integrals
\begin{eqnarray}\label{EqN27}
&& \hspace*{-3.5mm}
p_{D-}\left( T, 0, 0 \right) \equiv \frac{2^D}{2^D-1} p_{D+}\left( T, 0, 0 \right)
\quad \Rightarrow \quad \int\limits_0^\infty \frac{x^D \, dx}{e^x -1} = \frac{2^D}{2^D-1} \int\limits_0^\infty \frac{x^D \, dx}{e^x +1} \, ,
\end{eqnarray}
which for $D=3$ leads to a well-known identity
\begin{eqnarray}\label{EqN28}
&& \hspace*{-3.5mm}
\int\limits_0^\infty \frac{x^3 \, dx}{e^x -1} = \frac{8}{7} \int\limits_0^\infty \frac{x^3 \, dx}{e^x +1} = \frac{\pi^4}{15} \, ,
\end{eqnarray}
Note, however, that
the right equation (\ref{EqN27}) follows from the left one after integrating the pressures of massless and chargeless particles over the angles, first, and, then, after integrating them over $d k^D$ by parts.
Applying the identity (\ref{EqN26}) to its right hand side $n$ times, one obtains another identity
\begin{eqnarray}\label{EqN29}
p_{D-}\left( T, \nu, m \right) \equiv & 2^{D n} p_{D-}\left( T, \frac{\nu}{2^n}, \frac{m}{2^n} \right) -
\sum\limits_{k=1}^n 2^{D k} p_{D+}\left( T, \frac{\nu}{2^k}, \frac{m}{2^k} \right) .
\end{eqnarray}
For $n \gg \ln \left[ \max (\frac{\nu}{T}; \frac{m}{T}) \right]$ with the help of identity (\ref{EqN27}) one can establish an approximative relation
\begin{eqnarray}\label{EqN30}
p_{D-}\left( T, \nu, m \right) \simeq & \frac{2^{D(n+1)}}{2^D-1} p_{D+}\left( T, \frac{\nu}{2^n}, \frac{m}{2^n} \right) -
\sum\limits_{k=1}^n 2^{D k} p_{D+}\left( T, \frac{\nu}{2^k}, \frac{m}{2^k} \right) ,
\end{eqnarray}
which again relates the pressures of BE and FD particles. Note that Eqs. (\ref{EqN25}), (\ref{EqN26}), (\ref{EqN29}) and (\ref{EqN30}) are valid for the particles with the hard-core repulsion, i.e. for $\nu = \mu - 2^{(D-1)} V_D \, p_{D-}\left( T, \nu, m \right)$, where the eigenvolume of particles in the $D$-dimensional space is denoted as $V_D$.
\section{Conclusions}
In this work we recapitulate the VdW equation of state of BE particles with the hard-core repulsion in the grand canonical ensemble.
Our analysis shows that the pressure of non-relativistic BE particles is mathematically equivalent to the one of the exactly solvable model with the 1-st order PT known as the sSMM. The EoS of BE particles corresponds to the sSMM with the vanishing surface tension coefficient and the Fisher exponent $\tau_F = \frac{5}{2}$.
Such a similarity allows us to show that within the present approach the high density phase of BE particles is a classical macro-cluster with vanishing entropy at any temperature which, similarly to the classical hard spheres, is a kind of solid state. Considering the limit of very small eigenvolume of BE particles we argue that the ideal gas of BE particles has the 1-st order PT as it was suggested by K. Huang in his famous textbook \cite{Huang} a long time ago.
To explicitly demonstrate that a macro-cluster with the BE statistics does not exist in this EoS we investigate some peculiar relations between the pressures of BE and FD particles, the BE-FD decomposition identities, showing that under some conditions the pressure of FD particles can be identically rewritten in terms of two BE pressures. Moreover, we establish an exact representation of the pressure of BE particles of mass $m$, charge $1$
and degeneracy $g$ as a series of pressures
of FD particles with the masses $2^k m$, the charge $2^k$ and degeneracy $2^{-3 k}g$, where $k$ are positive natural numbers. These new relations help us to correctly interpret the properties of a high density phase of BE particles with hard-core repulsion.
In fact, here we establish a principally new look at the problem of BE condensation. Of course, the considered model is oversimplified, but now one can use all the achievements of the SMM \cite{Bondorf,SMM0,KABsmm1,KABsmm2,KABsmm3}
and introduce the surface part $\sigma(T) k^\frac{2}{3}$ of the free energy of $k$-particle clusters (here $\sigma(T)$ is the temperature dependent coefficient of surface tension).
Such a modification will make model more realistic, since the surface part of free energy partly accounts for the
short range attraction among the constituents like it is done in the full SMM. Note that in
this case, however, the modified right hand side of Eq. (5) cannot be already reduced to the pressure of
point-like particles $p_- (T, \nu)$ with the shifted chemical potential
$\nu = \mu - 4 V_0 p_- $.
\vspace*{2.2mm}
{\bf Acknowledgements.} The authors thank Oleksandr Vitiuk, Oleksandr Khasai, Maksim Tomchenko, Nazar Yakovenko and Gennady Zinovjev for
fruitful discussions.
The authors are thankful to Edward Gorbar for reading the manuscript and for his valuable critique.
The work of K.A.B., O.I.I. and B.E.G. was supported in part by the Program of Fundamental Research in High Energy and Nuclear Physics launched by the Section of Nuclear Physics of the National Academy of Sciences of Ukraine. K.A.B. is grateful to the COST Action CA15213 ``THOR`` for supporting his networking.
|
1,314,259,995,013 | arxiv | \section{Introduction}
For studies of the hadronic final state in high-energy collisions,
versatile programs for the calculation of QCD corrections are required.
The extraction of scale-dependent
physical quantities such as the running strong coupling
constant $\alpha_s\left(Q^2\right)$ and parton densities
$f_i\left(\xi,Q^2\right)$ requires precise predictions
in next-to-leading order of QCD perturbation theory.
At the electron--proton collider HERA at DESY in Hamburg,
the strong coupling constant has been measured via jet rates
\cite{1,2}. There is also a direct fit of the gluon density
$f_g(\xi, Q^2)$ \cite{3} based on a Mellin transform
method \cite{4,5}. Calculations for jet cross sections
in deeply inelastic scattering for the case of the modified JADE
scheme
have been performed \cite{6,7,8,9,10}\footnote{
In these calculations, terms of the form $c\log c$,
$c$ being the jet cut, have been neglected. This implies in particular
a certain insensitivity with respect to the jet recombination scheme.
The set-up of the calculations \cite{6,7,9} is such that
a jet consisting of two partons is always mapped onto a massless jet.
Therefore the jet definition scheme which is used on experimental data
should be a ``massless'' scheme (this excludes, for example, the E-scheme).
The variation of jet cross sections within the possible massless schemes
cannot be modelled by that calculation.}
and implemented in the
two programs {\tt PROJET} \cite{11} and {\tt DISJET} \cite{12}.
In the meantime,
calculations for arbitrary infrared-safe observables in
deeply inlastic scattering have become available \cite{13,14}.
In the last few years, the technology for the calculation
of QCD corrections in next-to-leading order
has developed considerably.
The main problem in higher-order QCD calculations is the occurence of
severe
infrared singularities (they ultimately cancel for infrared-safe
observables, or are absorbed into process-independent, physical
distribution functions such as parton densities and fragmentation functions).
There are explicit algorithms available
which permit the calculation to be done in a ``universal'' way: the
infrared singularities are subtracted such that arbitrary
infrared-safe observables can be calculated numerically. In principle,
all existing algorithms are variations on a common theme, namely the
interplay of the factorization theorems of perturbative QCD and the
infrared-safety of the observables under consideration.
There are two different ways to achieve the separation of
divergent and finite contributions:
the phase-space-slicing method \cite{15} and
the subtraction method \cite{16}.
Both methods have their merits and drawbacks.
\begin{description}
\item[{\rm\unboldmath ($\alpha$)}]
The phase-space-slicing method relies on a separation of
singular phase-space regions from non-singular ones by means of a
small slicing parameter $s\rightarrow 0$. The divergent parts are evaluated
under the assumption that terms of ${\cal O}(s (\log s)^n)$ can be dropped.
The analytically evaluated phase-space integrals yield terms of the form
$(\log s)^m$, which explicitly cancel against equivalent terms of opposite
sign from a numerically performed phase-space integration.
The simplicity of this scheme is obvious.
The main problem is the residual dependence on the technical cut
parameter~$s$
(in practice the limit $s\rightarrow 0$ is not checked for every observable,
but it is assumed that a fixed small value will be sufficient).
Moreover, the numerical cancellation of the logarithmic terms by means
of a Monte-Carlo integration is delicate.
There is a calculational scheme available for the determination of
the explicit phase space integrals over the singular regions \cite{17}.
For initial and final-state hadrons this scheme moreover
requires the introduction
of so-called {\it crossing functions} \cite{18},
to be evaluated for every parton density parametrization.
For deeply-inelastic lepton--nucleon scattering, an implementation
of this calculational scheme is provided by Mirkes and Zeppenfeld
in {\tt MEPJET} \cite{19}.
\item[{\rm\unboldmath($\beta$)}]
The subtraction method is technically more involved,
since the infrared singularities are cancelled point-by-point in
phase space. The subtraction terms have, owing to the factorization
theorems of perturbative QCD, a simple form. The problem is
to arrange the subtractions in such a way that in the numerical evaluation
no spurious singularities appear. A general framework, using a specific
phase space mapping besides the factorization theorems, is given by Catani
and Seymour in Ref.~\cite{20}, and implemented in {\tt DISENT}
\cite{21}.
The approach of the present
paper is to use a generalized partial fractions
formula to separate the singularities \cite{22}. The method is
briefly explained in Section~\ref{algorithm}. We will describe in some
detail the implementation {\tt DISASTER++}\footnote{
This is an acronym for ``Deeply Inelastic Scattering: All Subtraction Through
Evaluated Residues''.}
in the form of a {\tt C++} class library.
There are two reasons for a new calculation.
(a) The existing
programs have the restriction that the number of flavours is fixed
($N_f=5$ in the case of {\tt MEPJET}
and $N_f$ fixed, but arbitrary for {\tt DISENT}).
For studies of the scale-dependence it is
necessary to
have a variable number of flavours,
in order to be consistent with the scale evolution
of the strong coupling constant and the parton densities.
{\tt DISASTER++} makes the $N_f$ dependence explicit in the ``user routine''
on an event-by-event basis,
and thus results for several renormalization and factorization scales
can be calculated simultaneously.
(b) {\tt DISASTER++}
is already set up such that the extension to one-particle-inclusive
processes will be possible without the necessity of re-coding
the contributions which are already present for
the jet-type observables. This option will be made available
in future versions of the program, as soon as the remaining contributions
for one-particle-inclusive processes are implemented.
\end{description}
The outline of this paper is as follows. In Section~\ref{algorithm}
we briefly review the algorithm employed in the present calculation.
In Section~\ref{structure} the {\tt FORTRAN} interface
to the {\tt C++} class library is described.
Some remarks concerning the installation of the package are made
in Section~\ref{installation}.
A comparison of the available universal programs
{\tt DISASTER++} (Version 1.0), {\tt MEPJET} (Version 2.0)
and {\tt DISENT} (Version 0.1)
is presented in
Section~\ref{comparison}.
In a previous version of this paper, we have drawn
the conclusion that we find an overall, but not completely satisfactory
agreement of {\tt DISASTER++} and {\tt MEPJET}, and
that there are large deviations when comparing
{\tt DISASTER++} and {\tt DISENT}.
One of the purposes of this paper is to present the results of a comparison
of {\tt DISASTER++} and a new, corrected version (0.1)
of {\tt DISENT}. We now find
good agreement of the two programs.
We also give a few more results for {\tt MEPJET}, in particular
for the dependence on the technical cut~$s$. It turns out that
even for very small values of~$s$
we do not achieve agreement with the
{\tt DISASTER++}~/ {\tt DISENT} results
for several cases under
consideration\footnote{
In a very recent paper \cite{23}, E.~Mirkes quotes the results of the
comparison of the three programs as performed in the
previous version of this paper \cite{24} as resulting in
a ``so far satisfactory agreement''. This is a
misquotation. The formulation in Ref.~\cite{24} was that
for {\tt MEPJET} and {\tt DISASTER++} we find an ``overall, though not
completely satisfactory agreement'', and that the results of {\tt DISENT}
(Version 0.0) ``differ considerably''. Moreover, in the summary
of Ref.~\cite{24} we mention that a few deviations of {\tt MEPJET} and
{\tt DISASTER++} are present. We wish to stress that there is a certain
semantic
gap between the expression
``satisfactory agreement'' and the results just quoted.
}.
The paper closes with a summary.
The contents of this paper are mainly technical. The details of the calculation
and phenomenological applications will be described in a forthcoming
publication.
\section{The Algorithm}
\label{algorithm}
The calculation is based on the subtraction method. A simple example
to illustrate this method in general, and a comparison
with the phase-space-slicing
method, is given in Ref.~\cite{25}.
For a more detailed exposition of the contents of this section,
see Ref.~\cite{22}.
The subtraction method is one of the solutions for the problem of
how to
calculate
numerically
infrared-safe observables without having
to modify the calculation for every observable under consideration.
In QCD calculations, infrared singularities cancel for sufficiently
inclusive observables.
The factorization theorems of perturbative
QCD (see Ref.~\cite{26} and references therein)
together with the infrared-safety of the observable under consideration
guarantee that
the structure of the limit of the convolution of the parton-level cross
section with the observable in soft and collinear regions of phase space
is well-defined and factorizes in the form of a product of a kernel
and the Born term.
This property allows, for the real corrections, the definition of a subtraction
term for every phase-space point.
Formally:
\begin{eqnarray}
\int\mbox{dPS}^{(n)}\,\sigma\,{\cal O}
&=& \sum_A \int \mbox{dPS}_{i_A} \,k_A \left(
\int \mbox{dPS}^{(n-1)} \tau_A -
\left[
\int \mbox{dPS}^{(n-1)} \tau_A
\right]_{\mbox{\scriptsize soft/coll.~limit}}
\right)\nonumber\\
&+& \sum_A \int \mbox{dPS}_{i_A} \,k_A \left[
\int \mbox{dPS}^{(n-1)} \tau_A
\right]_{\mbox{\scriptsize soft/coll.~limit}},
\end{eqnarray}
where $\sigma$ is the parton-level cross section, $\cal O$ is the
infrared-safe observable, $k_A$ is a singular kernel,
and $\tau_A$ is the non-singular part of the product $\sigma\,\cal O$.
The index~$A$ runs over all possible soft, collinear and
simultaneously soft and collinear singularities of~$\sigma$.
The first integral is finite and can be calculated numerically. The second
integral contains all infrared singularities. The term in the square bracket
has a simple structure
because of the factorization theorems of QCD, and the one-particle
integral over the kernel $k_A$ and the factorization contribution from the
term in the square brackets can be performed easily.
This subtraction formula works only if the subtraction terms do not
introduce spurious singularities for the individual terms that eventually
cancel in the sum. This is achieved by a separation of all singularities
by means of a general partial fractions formula
\begin{equation}
\label{pfid}
\frac{1}{x_1\,x_2\cdots x_n}
=\sum_{\sigma\in S_n}
\frac{1}{x_{\sigma_1}\,(x_{\sigma_1}+x_{\sigma_2})\cdots
(x_{\sigma_1}+\ldots+x_{\sigma_n})},
\end{equation}
where the sum runs over all $n!$ permutations of $n$~objects.
In {\tt DISASTER++}, the processes for (1+1) and (2+1)-jet production
for one-photon exchange are implemented. The program itself, however,
is set up in a much more general way. The implemented subtraction procedure
can handle arbitrary number of final-state partons, and zero or one incoming
partons (an extension to two incoming partons is possible). The {\tt C++}
class library is intended to provide a very general framework for
next-to-leading-order QCD calculations for arbitrary
infrared-safe observables. Of course, the explicit matrix
elements (Born terms, virtual corrections and factorized real corrections)
have to be provided for every additional process to be included.
\section{Program Structure}
\label{structure}
We now describe the {\tt FORTRAN} interface to the {\tt C++}
class library. The {\tt C++} user interface will be documented in a
forthcoming extension of this manual.
To set the stage, let us first introduce some terminology.
The user has to provide several subroutines which are called by
{\tt DISASTER++} for every generated event. Each {\bf event}
$e_n$, $n=1\ldots N$
consists of a set of
{\bf phase spaces} ${\cal P}_{nr}$, $r=1\ldots R_n$,
and a set of {\bf contributions} ${\cal C}_{ni}$,
$i=1\ldots L_n$. Phase spaces $\cal P$
provide a set of four-vectors of initial and final-state
particles, which are used to calculate observables
${\cal O}({\cal P})$.
Contributions ${\cal C}_{ni}$ consist of a list of
{\bf weights} $w_{nij}$, $j=1\ldots K_{ni}$ (here:
$K_{ni}=11$) which have to be multiplied
by certain {\bf flavour factors} $F_{nij}$.
Every contribution ${\cal C}_{ni}$ has an associated
phase space ${\cal P}_{nr_{ni}}$;
it is generally the case that particular phase spaces are
used for different contributions. Flavour factors are products
of parton densities, quark charges, powers of the strong coupling constant,
and powers of the electromagnetic coupling constant.
The expectation value $\langle {\cal O} \rangle$
of a particular observable is given by the following
sum:
\begin{equation}
\label{exval}
\langle {\cal O} \rangle =
\sum_{n=1}^N
\sum_{i=1}^{L_n}
{\cal O}({\cal P}_{nr_{ni}})
\sum_{j=1}^{K_{ni}}
w_{nij} F_{nij}.
\end{equation}
The first sum is the main loop of the Monte Carlo integration.
\noindent
The user has to provide a subroutine
{\tt user1} and
a function
{\tt user2}.
The subroutine
{\tt user1(iaction)} is called from {\tt DISASTER++} in the following cases:
\begin{description}
\item{\quad{\tt iaction=1}:} {\ }\\after start-up of {\tt DISASTER++}
\item{\quad{\tt iaction=2}:} {\ }\\before the end of {\tt DISASTER++}
\item{\quad{\tt iaction=3}:} {\ }\\before the start of the grid-definition
run of the adaptive Monte-Carlo routine, or before the final run of
the adaptive integration, in case that there is no grid-definition run
\item{\quad{\tt iaction=4}:} {\ }\\before the start of the final
run of the adaptive Monte-Carlo routine
\item{\quad{\tt iaction=5}:} {\ }\\after the final
run of the adaptive Monte-Carlo routine
\item{\quad{\tt iaction=6}:} {\ }\\once for every event (to initialize intermediate
weight sums, etc.)
\item{\quad{\tt iaction=7}:} {\ }\\signals that the event has to be dropped
for technical reasons
\end{description}
\noindent
The function {\tt user2(...)} is called from {\tt DISASTER++}
after an event has been constructed.
It has the following arguments (in an obvious
notation):
\begin{verbatim}
double precision function
& user2(
& integer nr,
& integer nl,
& double precision fvect(0..3, -10..10, 1..30),
& integer npartons(1..30),
& double precision xb(1..30),
& double precision q2(1..30),
& double precision xi(1..30),
& double precision weight(1..11, 1..50),
& integer irps(1..50),
& integer ialphas(1..50),
& integer ialphaem(1..50),
& integer lognf(1..50)
& )
\end{verbatim}
Here {\tt nr} stands for $R_n$, {\tt nl} stands for $L_n$,
{\tt fvect(mu, iparticle, ir)} is the {\tt mu}$^{\mbox{th}}$ component
of the four-vector of the particle
with label {\tt iparticle} ({\tt mu}=0 corresponds to the energy component)
in units of [GeV]
in the Breit frame
for the phase space {\tt ir};
{\tt npartons(ir)} is the number of final-state partons,
{\tt q2(ir)} is the value of $Q^2$, and {\tt xi(ir)} is the momentum fraction
of the incident parton.
The particle labels {\tt iparticle} are given by
\begin{description}
\item[\quad{\tt iparticle=-8:}] proton remnant
\item[\quad{\tt iparticle=-7:}] incident proton
\item[\quad{\tt iparticle=-5:}] outgoing electron
\item[\quad{\tt iparticle=-4:}] incident electron
\item[\quad{\tt iparticle=-1:}] incident parton
\item[\quad{\tt iparticle=0..(npartons-1):}] outgoing partons
\end{description}
The array {\tt weight(j, i)} is a list of the weights for contribution
{\tt i} in units of [pb],
{\tt irps(i)} gives the index of the phase space for this particular
contribution,
{\tt ialphas(i)} and {\tt ialphaem(i)} are the powers of the strong
and electromagnetic coupling constant, respectively, and {\tt lognf(i)}
is an index that specifies whether the weights have to be multiplied
by a factor $\lambda$ consisting of a product of
a logarithm of a scale and/or a factor of $N_f$:
\begin{description}
\item[\quad{\tt lognf=0}:] $\lambda=1$
\item[\quad{\tt lognf=1}:] $\lambda=\ln\left(\mu_r^2/Q^2\right)$
\item[\quad{\tt lognf=2}:] $\lambda=N_f \ln\left(\mu_r^2/Q^2\right)$
\item[\quad{\tt lognf=3}:] $\lambda=\ln\left(\mu_f^2/Q^2\right)$
\item[\quad{\tt lognf=4}:] $\lambda=N_f \ln\left(\mu_f^2/Q^2\right)$
\end{description}
Here $\mu_r$ and $\mu_f$ are the renormalization and factorization scales,
respectively.
The total flavour factor for contribution $i$ is given by
\begin{equation}
F_{nij} =
\lambda \,
\alpha_s^{\mbox{\tt ialphas($i$)}} \,
\alpha^{\mbox{\tt ialphem($i$)}} \,
\rho_{ij},
\end{equation}
where
the quantity $\rho_{ij}$ is a product of squares of quark charges $Q_\alpha$
in units of $e$ and parton densities.
In particular:
\begin{description}
\item \quad$\rho_{i1}
= \sum\limits_{\alpha=1}^{N_f} Q_\alpha^2 \, f_\alpha$
\item \quad$\rho_{i2}
= \sum\limits_{\alpha=1}^{N_f} Q_\alpha^2 \,
f_{\overline{\alpha}}$
\item \quad$\rho_{i3}
= \sum\limits_{\alpha=1}^{N_f} Q_\alpha^2 \, f_g$
\item \quad$\rho_{i4} = \rho_{i1}$
\item \quad$\rho_{i5} = \rho_{i2}$
\item \quad$\rho_{i6} = \rho_{i1}\,(N_f-1)$
\item \quad$\rho_{i7} = \rho_{i2}\,(N_f-1)$
\item \quad$\rho_{i8}
= \sum\limits_{\alpha=1}^{N_f} f_\alpha \,
\sum\limits_{\beta=1,\, \beta \neq \alpha}^{N_f} Q_\beta^2$
\item \quad$\rho_{i9}
= \sum\limits_{\alpha=1}^{N_f} f_{\overline{\alpha}} \,
\sum\limits_{\beta=1,\, \beta \neq \alpha}^{N_f} Q_\beta^2$
\item \quad$\rho_{i10}
= \sum\limits_{\alpha=1}^{N_f} f_\alpha Q_\alpha \,
\sum\limits_{\beta=1,\, \beta \neq \alpha}^{N_f} Q_\beta$
\item \quad$\rho_{i11}
= \sum\limits_{\alpha=1}^{N_f} f_{\overline{\alpha}} Q_\alpha \,
\sum\limits_{\beta=1,\, \beta \neq \alpha}^{N_f} Q_\beta$
\end{description}
The $f_\alpha$ are parton densities evaluated for
momentum fractions $\mbox{\tt xi(irps($i$))}$ and factorization scale
$\mu_f$,
and $f_{\overline{\alpha}}$ stands for the parton density of the anti-flavour
of the flavour $\alpha$. The renormalization and factorization schemes are
the $\overline{\mbox{MS}}$ scheme. The correction terms for the
DIS factorization
scheme will be implemented in the near future.
We wish to note that the product of the weights, the flavour factors and the
values of the observable is normalized in such a way that
the sum yields the expectation value in units of [pb]. No additional
factor such as $1/N$, $N$ being the total number of generated events,
has to be applied
in Eq.~\ref{exval}.
Since phase spaces are used several times for different contributions,
it is a good strategy to first evaluate the observable(s) under consideration
for every phase space and to store the corresponding results.
Then there should be the loop over the various contributions (the second sum).
The innermost loop is the one over the flavour factors.
The Monte Carlo integration itself employs the program {\tt VEGAS}
\cite{27,28}.
{\tt VEGAS} is an adaptive multi-dimensional integration routine.
Integrations proceed in two steps.
The first step is an adaptation step in order
to set up a grid in the integration variables
which then steers the final integration step.
The adaptation step itself refines
the grid in a sequence of several iterations.
{\tt VEGAS} requires, as parameters, the number of Monte Carlo points
to be used in the first and second step, respectively,
and the number of iterations to refine the grid.
In the framework of {\tt DISASTER++}, {\tt VEGAS} can be used in three different
ways:
\begin{itemize}
\item As an adaptive integration routine.
The routine {\tt user2} should return a value. This value is handed over
to {\tt VEGAS} as
the value of the integrand at the particular phase space point,
and summed up. The final integral quoted by {\tt VEGAS}
is the sum of these weights
for the final integration.
This is the best choice if just one observable,
for example a jet cross section, is to be evaluated.
\item As a routine that merely supplies random numbers for
the events.
If the number of iterations is set to zero, then {\tt VEGAS} just performs
the final integration run. The user is then responsible for the correct
summation of the weights, and for the determination of the
statistical error. It should be noted that, since all weights are
available individually in the user routine, an arbitrary number of
observables can be evaluated in a single run. In particular, since the
dependence on the renormalization and factorization scales and on $N_f$
is fully explicit, the study of the scale dependence of observables
can be done in a very convenient way. For example, all plots from
Ref.~\cite{22}
have been obtained in a single run of {\tt DISASTER++}.
\item As a combination of the two preceeding alternatives. Here the adaptation
steps are included. A ``typical'' infrared-safe observable,
in the following called the {\it adaptation variable}, is evaluated, and
its value is returned to {\tt VEGAS}. This observable serves to optimize the
distribution of points over phase space. A convenient observable of this
kind is provided by {\tt DISASTER++} (see below).
The ``real'' observables under consideration are evaluated as in the
second alternative in the final integration step.
\end{itemize}
\noindent
{\tt DISASTER++} is initialized by a call of
the subroutine {\tt disaster\_ca()}. It is recommended
to end a {\tt DISASTER++}
run by a call of the subroutine
{\tt disaster\_cb()} in order to free
dynamically allocated memory.
\noindent
Parameters can be set or commands be executed by means of three routines:
\begin{description}
\item {\quad\tt disaster\_ci(str, i)} {\ }\\
sets the integer parameter denoted by
the character string {\tt str} to the value {\tt i}
\item {\quad\tt disaster\_cd(str, d)} {\ }\\
sets the double precision parameter denoted by
the character string {\tt str} to the value {\tt d}
\item {\quad\tt disaster\_cc(str)} {\ }\\ executes the command
given by the character string {\tt str}
\end{description}
The following parameters are available (there are a few more to optimize the
generation of the phase space points; they will be documented in forthcoming
versions of this manual):
\begin{description}
\item[\quad{\makebox[3cm][l]{\tt ECM:}}]{\ }\\
the centre-of-mass energy in [GeV]
\item[\quad{\tt LEPTON\_INTEGRATION}:]{\ }\\
{\tt 1:} integration over $x_B$ and $y$
\item[\quad{\makebox[3cm][l]{\tt XBMIN:}}]{\ }\\
minimum value of $x_B$
\item[\quad{\makebox[3cm][l]{\tt XBMAX:}}]{\ }\\
maximum value of $x_B$
\item[\quad{\makebox[3cm][l]{\tt YMIN:}}]{\ }\\
minimum value of $y$
\item[\quad{\makebox[3cm][l]{\tt YMAX:}}]{\ }\\
maximum value of $y$
\item[\quad{\makebox[3cm][l]{\tt QMIN:}}]{\ }\\
minimum value of $Q$
\item[\quad{\makebox[3cm][l]{\tt QMAX:}}]{\ }\\
maximum value of $Q$
\item[\quad{\makebox[3cm][l]{\tt WMIN:}}]{\ }\\
minimum value of $W$
\item[\quad{\makebox[3cm][l]{\tt WMAX:}}]{\ }\\
maximum value of $W$
\item[\quad{\makebox[3cm][l]{\tt PROCESS\_INDEX:}}]{\ }\\
{\tt 1:} leading order\\
{\tt 2:} next-to-leading order
\item[\quad{\tt NUMBER\_OF\_FINAL\_STATE\_PARTONS\_IN\_BORN\_TERM:}]{\ }\\
{\tt 1}, {\tt 2}, {\tt 3} for the process under consideration;\\
{\tt 1:} (1+1)-jet-type observables (leading and next-to-leading order)\\
{\tt 2:} (2+1)-jet-type observables (leading and next-to-leading order)\\
{\tt 3:} (3+1)-jet-type observables (leading order only)
\item[\quad{\makebox[3cm][l]{\tt POINTS:}}]{\ }\\
{\tt POINTS * (FACT\_PREP + FACT\_FINAL)} is the
number of generated points in the Monte Carlo integration
\item[\quad{\makebox[3cm][l]{\tt FACT\_PREP:}}]{\ }\\
the number of points for the grid-definition run is given by
{\tt FACT\_PREP * POINTS}
\item[\quad{\makebox[3cm][l]{\tt FACT\_FINAL:}}]{\ }\\
the number of points for the final integration step is given by
{\tt FACT\_FINAL * POINTS}
\item[\quad{\makebox[3cm][l]{\tt RUN\_MC:}}]{\ }\\
to start the Monte-Carlo integration
\end{description}
\noindent
A convenient adaptation observable can be evaluated by a call of
the following function:
\begin{verbatim}
double precision function disaster_cao(
& integer ipdf_collection,
& integer ipdf_parametrization,
& integer ipdf_set,
& integer ialphas_variant,
& integer ialphas_order,
& double precision dalphas_lambdaqcd4,
& integer ialphaem_variant
& )
\end{verbatim}
The arguments of the function call are:
\begin{description}
\item[\quad{\tt ipdf\_collection:}]{\ }\\
the collection of parton densities; \\
{\tt 1:} {\tt PDFLIB} \cite{29}
\item[\quad{\tt ipdf\_parametrization:}]{\ }\\
parametrization of parton densities (cf.\ {\tt PDFLIB})
\item[\quad{\tt ipdf\_set:}]{\ }\\
set of parton densities (cf.\ {\tt PDFLIB})
\item[\quad{\tt ialphas\_variant:}]{\ }\\
function which is used to evaluate the strong coupling constant;\\
{\tt 1:} running coupling $\alpha_s(Q^2)$ with
flavour thresholds at the single heavy quark masses
\item[\quad{\tt ialphas\_order:}]{\ }\\
{\tt 1:} one-loop formula\\
{\tt 2:} two-loop formula\\
for the running strong
coupling constant
\item[\quad{\tt dalphas\_lambdaqcd4:}]{\ }\\
the QCD parameter $\Lambda_{\mbox{\scriptsize QCD}}^{(4)}$
for four flavours
\item[\quad{\tt ialphaem\_variant:}]{\ }\\
function which is used to evaluate the electromagnetic coupling constant;\\
{\tt 1:} fine structure constant \\
{\tt 2:} 1/137\\
(an implementation of the running electromagnetic
coupling constant is in preparation)
\end{description}
\noindent
To simplify the calculation of the flavour factors,
a {\tt DISASTER++} routine can be called which returns the
required coupling constants and the combinations of parton densities
and quark charges:
\begin{verbatim}
subroutine disaster_cff(
& integer ipdf_collection,
& integer ipdf_parametrization,
& integer ipdf_set,
& integer ialphas_variant,
& integer ialphas_order,
& double precision dalphas_lambdaqcd4,
& integer ialphaem_variant,
& integer nf,
& double precision ffactin(4),
& double precision ffactout(13)
& )
\end{verbatim}
The arguments of the function call are the same as in the case of the
routine {\tt disaster\_cao} (see above), except for the following:
\begin{description}
\item[\quad{\tt nf:}]{\ }\\
the number of flavours $N_f$
\item[\quad{\tt ffactin:}]{\ }\\
input parameters;
\begin{description}
\item {\tt ffactin(1):} the momentum fraction variable $\xi$
\item {\tt ffactin(2):} the factorization scale in [GeV]
(i.e.\ the scale argument of the parton densities)
\item {\tt ffactin(3):} the renormalization scale in [GeV]
(i.e.\ the scale argument of the running strong coupling constant)
\item {\tt ffactin(4):} the scale argument of the running electromagnetic
coupling constant
\end{description}
\item[\quad{\tt ffactout:}]{\ }\\
output parameters;
\begin{description}
\item {\tt ffactout(1..11):} the quantities $\rho_{i1}$ \ldots
$\rho_{i11}$,
\item {\tt ffactout(12):} the running strong coupling constant
\item {\tt ffactout(13):} the electromagnetic
coupling constant
\end{description}
\end{description}
It is strongly recommended to use this routine, since it uses
a cache that stores a few of the most recent values temporarily, such that
the sums $\rho_{ij}$ and the parton densities do not have to be reevaluated.
This routine is supplied for the convenience of the user. The weights
and events generated by {\tt DISASTER++} do not depend on this routine.
The description of the program structure just given may sound
complicated. It is actually quite simple to use the program; an example
for the calculation of the (2+1)-jet cross section for the JADE algorithm
in the E-scheme is given in the files {\tt disaster\_f.f}
and {\tt clust.f}, as described in Section~\ref{installation}.
\section{Program Installation}
\label{installation}
\begin{description}
\item[Source code:]
The source code of the class library is available on the World Wide Web:
\begin{verbatim}
http://wwwcn.cern.ch/~graudenz/disaster.html
\end{verbatim}
\item[Files:]
The package consists of a number of files. To facilitate the installation,
and to enable the {\tt C++} compiler to perform certain optimizations,
the complete {\tt C++} part of the program is provided as one file
{\tt onefile\_n.cc} (the individual files are available on request).
An example for the {\tt FORTRAN} interface is
given in the file {\tt disaster\_f.f} (calculation of the (2+1) jet
cross section for the JADE algorithm in the E-scheme),
together with a simple cluster
routine in the file {\tt clust.f}.
The number of Monte Carlo events in the example is set to
a tiny number (100) in order to terminate the program after a few seconds.
Realistic values for the parameter {\tt POINTS} are of the order of
$10^6$.
An example ``make file'' is given in {\tt makedisaster}.
\item[Mixed Language Programming:]
{\tt DISASTER++} is mainly writen in the {\tt C++} programming language.
The reason for the choice of this language are twofold:
Object-oriented programming allows for programs that are easily
maintained and extended\footnote{It could even be said that object-oriented
programming is a kind of applied ontology: the central categories of this
approach are given by {\it objects} and {\it methods} that define their
relationships.
}, and in high-energy physics there is a trend in the experimental
domain for a transition from {\tt FORTRAN} to {\tt C++}.
Although the goal has been to write a self-contained {\tt C++}
package,
a few parts of the program are still coded in
{\tt FORTRAN}. Moreover, the standard parton density parametrizations
are only
available as {\tt FORTRAN} libraries. This means that the {\tt DISASTER++}
package cannot be run as a stand-alone {\tt C++} program. In most cases,
users may wish to interface the program to their existing {\tt FORTRAN}
routines. An elegant and machine-independent
way for {\it mixed language programming} for the case
of {\tt C}, {\tt C++} and {\tt FORTRAN} is supported by the
{\tt cfortran.h} package described in Ref.~\cite{30}.
For every {\tt FORTRAN} routine to be called by a {\tt C++} method,
an {\tt extern "C"} routine has to be defined as an interface,
and vice versa. The explicit calls are then generated by means of macros
from {\tt cfortran.h}. The most convenient way is, after compilation,
to link the {\tt FORTRAN} and {\tt C++} parts via the standard
\begin{verbatim}
f77 -o disaster onefile_n.o ...
\end{verbatim}
command\footnote{The procedure is described here for the {\tt UNIX}
operating system.},
such that the {\tt FORTRAN} part supplies the entry point.
The required {\tt C++} libraries have to be stated explicitly
via the {\tt -L} and {\tt -l} options. The library paths can be obtained
by compiling and linking a trivial program {\tt hw.cc} of the type
\begin{verbatim}
#include <stdio.h>
main() { printf("Hello world!\n"); }
\end{verbatim}
with
\begin{verbatim}
gcc -v hw.cc
\end{verbatim}
(for the {\tt GNU C++} compiler).
An example for the required libraries can be found in the
prototype ``make file'' {\tt makedisaster}. Some machine-specific information
is mentioned in the manual of {\tt cfortran.h}.
In the {\tt DISASTER++} package, the explicit {\tt FORTRAN} interface,
as described in Section~\ref{structure},
is already provided. Thus
from the outside the {\tt C++}
kernel is transparent and hidden behind {\tt FORTRAN} subroutines
and functions.
\item[Template instantiation:]
In {\tt DISASTER++}, heavy use is made of {\it templates}. At present, there
is not yet a universally accepted scheme for template instantiations.
The solution adopted here is the explicit instantiation
of all templates. This requires
that the compiler itself does not instantiate templates automatically.
This is achieved for the {\tt GNU} compiler by means of the switch
\begin{verbatim}
-fno-external-templates
\end{verbatim}
\item[Output files:]
There is a small problem with the output from the {\tt C++} and {\tt FORTRAN}
parts of {\tt DISASTER++}. It seems to be the case that generally {\tt C++}
({\tt FILE* stdout} and {\tt ostream cout})
and {\tt FORTRAN} ({\tt UNIT=6}) keep different
file buffers. This is no problem when the output is written to a terminal,
since then the file buffers are immediately flushed
after each line-feed character. When writing to
a file (as is usually the case for batch jobs), the file buffers are not
immediately flushed, and this leads to the problem that the output
on the file is mixed in non-chronological order. This problem will be solved
by the introduction of a particular stream class which hands over the output
to a {\tt FORTRAN} routine.
\item[Miscellaneous:]
{\tt DISASTER++} employs the {\tt ANSI C} {\tt signal} facility to
catch interrupts caused by floating point arithmetic.
If the signal {\tt SIGFPE} is raised, a flag in {\tt DISASTER++} is set,
which eventually leads to the requirement that the event has to be
dropped (via a call of {\tt user1(7)}). Similarly a non-zero value of
the variable {\tt errno} of the {\tt ANSI C errno} facility
is treated. The signal handler is also active
when the user routine is executed, which leads to the effect that
in the case of a floating point exception the program does not crash, but
continues under the assumption that the event has been dropped.
Forthcoming version of {\tt DISASTER++} will make a flag available
that can be used to access the status of the signal handler in
the user routines.
Moreover, it is checked whether the weight returned to {\tt DISASTER++} via
{\tt user2} fulfills the criterion for {\tt IEEE} {\tt NaN} (``not a number'').
If this is the case, it is also requested that the event be dropped.
\end{description}
\section{Comparison of Available Programs}
\label{comparison}
In this section, we compare the three available programs {\tt MEPJET}
(Version 2.0)\footnote{
For the very-high statistics runs the default random number
generator (generating a Sobol sequence of pseudo-random numbers)
of {\tt MEPJET} ran out of random numbers. We therefore had to modify the
program such that it uses another generator which is also part of
the {\tt MEPJET} package. --- The crossing functions for the ``artificial''
parton densities have been obtained by means of a modification of the program
{\tt make\_str\_pdf1.f}.
},
{\tt DISENT} (Version 0.1)\footnote{
An earlier version of this paper \cite{24}
reported results of a comparison
with {\tt DISENT} Version 0.0. We found large discrepancies for
some choices of the parton density parametrization. In the meantime,
an error in {\tt DISENT} has been fixed, and the results
of {\tt DISENT} and {\tt DISASTER++} are now
in good agreement, see below.
} and {\tt DISASTER++}
(Version 1.0) numerically for various bins of
$x_B$ and $y$ as defined in Table~\ref{tab1},
and for various choices of the parton density parametrizations.
\begin{table}[htb]
\begin{center}
\begin{tabular}[h]{|c|c|c|c|}
\cline{2-4}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \makebox[4.1cm]{$0.01 < y < 0.03 $}
& \makebox[4.1cm]{$0.03 < y < 0.1 $}
& \makebox[4.1cm]{$0.1 < y < 0.3 $}
\\ \hline
$0.005 < x_B < 0.01$ \rule[-2.5mm]{0mm}{8mm}
& \makebox[1.2cm]{Bin 1}($Q^2 > 4.6\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 2}($Q^2 > 13.5\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 3}($Q^2 > 45.0\,\mbox{GeV}^2$)
\\ \hline
$0.05 < x_B < 0.1$ \rule[-2.5mm]{0mm}{8mm}
& \makebox[1.2cm]{Bin 4} ($Q^2 > 45\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 5} ($Q^2 > 135\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 6} ($Q^2 > 450\,\mbox{GeV}^2$)
\\ \hline
$0.2 < x_B < 0.4$ \rule[-2.5mm]{0mm}{8mm}
& \makebox[1.2cm]{Bin 7} ($Q^2 > 180\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 8} ($Q^2 > 540\,\mbox{GeV}^2$)
& \makebox[1.2cm]{Bin 9} ($Q^2 > 1800\,\mbox{GeV}^2$)
\\ \hline
\end{tabular}
\end{center}
\caption[tab1]
{
\label{tab1}
{\it
Bins in $x_B$ and $y$. The values in parentheses give the resulting
lower bounds on $Q^2$.
}
}
\end{table}
The centre-of-mass energy is set to 300\,GeV. To facilitate the
comparison, the strong coupling constant is set to a fixed value of
$\alpha_s=0.1$,
and the number of flavours is set to $N_f=5$, even below the bottom
threshold ($N_f=5$ is hard-wired into {\tt MEPJET}).
The electromagnetic coupling constant
is chosen to be $\alpha=1/137$ (the value
is hard-wired into {\tt DISENT}, but this could be changed trivially,
in principle). The factorization- and renormalization schemes of the
hard scattering cross sections are $\overline{\mbox{MS}}$, and the
factorization and renormalization scales $\mu_f$ and $\mu_r$, respectively,
are set to $Q$.
The quantity under consideration is the (2+1)-jet cross section,
shown in Tables~2--8 in Appendix~\ref{capp}.
For simplicity we consider the modified JADE clustering scheme
with resolution criterion $S_{ij} <> c W^2$ and the E~recombination scheme,
where
$S_{ij} = (p_i + p_j)^2$, $W$ is the total hadronic energy,
and $c=0.02$ is the jet resolution parameter.
We require, in the laboratory frame ($E_e=27.439$\,GeV,
$E_P=820$\,GeV), a minimum transverse momentum of 1\,GeV and a pseudo-rapidity
of $-3.5<\eta<3.5$ for all jets\footnote{These cuts in $p_T$ and $\eta$
are employed in order to facilitate event generation with {\tt MEPJET};
the phase space generator implemented in that program is reminiscent of a
generator for pp~collider physics where $p_T$ and $\eta$ cuts
in the laboratory frame are
a standard experimental procedure. It is thus complicated to generate
events with {\tt MEPJET}
in the full phase space of the laboratory system, as usually required
for eP scattering, where ``natural'' cuts in transverse momentum and
pseudo-rapidity would be performed in the hadronic centre-of-mass frame
or in the Breit frame.}.
The parton density parametrizations employed in the comparison are:
\begin{description}
\item[{\makebox[1cm][l]{(a)}}]
\makebox[6cm][l]{the MRSD$_-^\prime$ parton densities
\cite{31}} (Table 2),
\item[{\makebox[1cm][l]{(b)}}]
\makebox[3cm][l]{$q(\xi)=(1-\xi)^5$,}\makebox[3cm][l]{$g(\xi)=0$}
(Table 3),
\item[{\makebox[1cm][l]{(c)}}]
\makebox[3cm][l]{$q(\xi)=0$,}\makebox[3cm][l]{$g(\xi)=(1-\xi)^5$}
(Table 4),
\item[{\makebox[1cm][l]{(d)}}]
\makebox[3cm][l]{$q(\xi)=(1-\xi)^2$,}\makebox[3cm][l]{$g(\xi)=0$}
(Table 5),
\item[{\makebox[1cm][l]{(e)}}]
\makebox[3cm][l]{$q(\xi)=0$,}\makebox[3cm][l]{$g(\xi)=(1-\xi)^2$}
(Table 6),
\item[{\makebox[1cm][l]{(f)}}]
\makebox[3cm][l]{$q(\xi)=(1-\xi)$,}\makebox[3cm][l]{$g(\xi)=0$}
(Table 7),
\item[{\makebox[1cm][l]{(g)}}]
\makebox[3cm][l]{$q(\xi)=0$,}\makebox[3cm][l]{$g(\xi)=(1-\xi)$}
(Table 8).
\end{description}
Here $q(\xi)$ generically stands for valence and sea distributions\footnote{
This means that $u_v(\xi)$, $d_v(\xi)$, $u_s(\xi)$, $d_s(\xi)$,
$s_s(\xi)$, $c_s(\xi)$,
$b_s(\xi)$ have been set to $q(\xi)$.
},
and $g(\xi)$ is the gluon distribution.
We wish to point out that the comparison involving the ``artificial''
parton densities is not just of academic interest. On the contrary,
for the extraction
of, for instance, the gluon density
from jet data
it is convenient
to replace the parton densities by simple functions
with special properties (such as powers
of the momentum fraction variable $\xi$ or functions of an orthonormal
basis system),
in order to achieve a fast fit. These functions usually do not have
the shape of physical parton densities, in particular they do not
have to fall off rapidly for $\xi\rightarrow 1$.
Moreover, next-to-leading-order calculations yield unique and well-defined
results for the hard scattering cross sections to be convoluted with
observables and parton densities. We employ the ``artificial''
parton densities also in order to have a stricter test of the
hard scattering cross sections.
The leading-order results of all three programs are in excellent agreement.
The next-to-leading-order results of {\tt DISASTER++} and {\tt DISENT} are
in good agreement within about two to (sometimes)
three standard deviations\footnote{
We wish to note that the error estimates quoted by the programs are usually not
rigorous estimates because of the non-Gaussian distribution of the
Monte-Carlo weights. Therefore, in principle, it is not possible to
infer probabilities for the consistency of data samples produced by two
programs based on these estimates.
A more precise, but in general unfeasible way to obtain an estimate of the
Monte Carlo error would be to run the programs a number of times
with different random number seeds and to analyze the spread of the
quoted results around their central value.
Such a study has recently been done by M.~Seymour
for {\tt DISENT} with the result that the
individual error estimates are quite reliable \cite{32}.
}
of the larger of the two errors quoted by the two programs. An exception
is bin~7 for $g(\xi) = (1-\xi)^2$. A run of {\tt DISENT} with higher statistics
yields a value of $0.1836 \pm 0.0025$, which is within two standard deviations
of the {\tt DISASTER++} result, indicating that there was indeed a statistical
fluctuation in the original result.
The comparison of the next-to-leading-order results
of {\tt MEPJET} and {\tt DISASTER++} requires a more detailed discussion:
\begin{itemize}
\item For the MRSD$_-^\prime$ parton densities, the results for
bins 3--9 are
compatible within about two standard deviations of the statistical error
of the Monte-Carlo integrations.
The results for bins~1 and~2 differ considerably.
Runs with a smaller value
of the internal {\tt MEPJET} cut-off variable~$s$, which is set by default
to $s=0.1\,$GeV$^2$, yield
the following results for bin 1:
$580.6 \pm 6.7$\,pb ($s=0.01\,$GeV$^2$),
$564.8 \pm 10.5$\,pb ($s=0.001\,$GeV$^2$) and
$575.4 \pm 13.0$\,pb ($s=0.0001\,$GeV$^2$).
The statistical error is increased for decreased~$s$ because the integration
volume
of the (3+1) parton contributions is extended into the singular domain.
Because of the increased statistical error, we also performed a
high-statistics runs with $\sim 4\cdot10^9$ (!) Monte Carlo events
of {\tt MEPJET}
for this bin.
For $s=0.001\,$GeV$^2$ we obtain
$576.3 \pm 6.7$\,pb
and for $s=0.0001\,$GeV$^2$
the result is
$583.2 \pm 7.4$\,pb.
These values from {\tt MEPJET}
are compatible with the {\tt DISASTER++} and {\tt DISENT} results\footnote{
These results underline that, for the phase space slicing method, results
generally have to be validated {\it ex post} by a cross-check with a
smaller technical cut~$s$ and much higher statistics. It may be argued that
there are jet algorithms (the $k_T$~algorithm, for example)
which show a better convergence for $s\rightarrow 0$.
However, the point here is that one does not know in advance whether this
is the case for the observable under consideration. --- In Ref.~\cite{23}
we find the statement that $s$-independence in {\tt MEPJET} is achieved for
$s=0.1\,$GeV$^2$. Our study shows that this is generally not the case,
and that extremely small values of~$s$, possibly of the order of
$s=0.0001\,$GeV$^2$, might be necessary.
}.
\item For the parton density parametrization (b) (quarks only, with a steeply
falling distribution $q(\xi)$ for $\xi \rightarrow 1$),
{\tt DISASTER++} and {\tt MEPJET}
are in good agreement.
\item The results for parametrization (c) (steeply falling
gluon parametrization)
are in good agreement, except for bin 1.
\item For parametrization (d),
{\tt DISASTER++} and {\tt MEPJET} are in agreement except for bins 1 and 4.
Runs with a smaller value
of the {\tt MEPJET} cut-off variable~$s$
yield
the following results for bin 1:
$59.6 \pm 1.8$\,pb ($s=0.01\,$GeV$^2$),
$56.7 \pm 5.8$\,pb ($s=0.001\,$GeV$^2$) and
$54.9 \pm 10.4$\,pb ($s=0.0001\,$GeV$^2$).
A high-statistics run ($\sim 4\cdot10^9$ events) of {\tt MEPJET}
for bin 1 with $s=0.0001\,$GeV$^2$ gives the
cross section $60.0 \pm 1.9$\,pb.
Contrary to the observation in case (a), for small~$s$
we do not get agreement of
the {\tt MEPJET} result with the {\tt DISASTER++} / {\tt DISENT} result
of about $48$--$49$\,pb.
\item The {\tt MEPJET} results for parametrization (e)
($g(\xi) = (1-\xi)^2$)
deviate considerably from the {\tt DISASTER++}
results in bins~1, 2, 4 and 7.
\item For parametrization (f),
{\tt DISASTER++} and {\tt MEPJET} are incompatible
for bins 1, 2, 4, 6 and 7.
\item For parametrization (g),
{\tt MEPJET} and {\tt DISASTER++} are compatible in bins
3, 5, 8 and 9 only.
A high-statistics run ($\sim 4\cdot10^9$ events) of {\tt MEPJET}
for bin 4 with $s=0.0001\,$GeV$^2$ yields the
cross section $1.29 \pm 0.02$\,pb.
This value is different from the result for $s=0.1\,$GeV$^2$,
but still inconsistent
with the {\tt DISASTER++} / {\tt DISENT} result of about $0.69$\,pb.
\end{itemize}
The overall picture is thus: Out of the three programs, {\tt DISASTER++}
and {\tt DISENT} (Version 0.1) are in good agreement within about
two, sometimes three standard deviations of the quoted integration errors,
both for ``physical'' and ``artificial'' parton densities. This agreement
is very encouraging, but not yet perfect, and much more detailed studies
involving different sets of observables and differential distributions
are required. For the two programs, a direct comparison of the
``jet structure functions'' should also be feasible.
For several bins, in particular for the ``artificial'' parton distribution
functions, the {\tt MEPJET}
results for the default setting of the
internal parameters deviate considerably from the {\tt DISASTER++}
and {\tt DISENT} results.
For one particular bin studied in more detail for
the MRSD$_-^\prime$ parton densities,
the
discrepancy disappears in the case of an extremely small internal technical
cut~$s$ of {\tt MEPJET}, for a substantial increase of the
number of generated events to obtain a meaningful Monte Carlo error.
A few {\tt MEPJET} results employing ``artificial''
parton densities have been studied in more detail. We observed that
in these cases a reduction of the~$s$ parameter does not lead to an
improvement of the situation. For lack of computer time, we could not study
all bins with a smaller $s$~cut. The overall situation
is thus still inconclusive and unclear. An independent cross check of the
{\tt MEPJET} results, in particular of those using the
implementation of the crossing functions for the ``artificial'' parton
densities, is highly desirable.
\section{Miscellaneous}
\begin{itemize}
\item If you intend to install and use {\tt DISASTER++}, please send me
a short e-mail message, and I will put your name on a mailing list
so that I can inform you when there is a new version of the package.
\item Suggestions for improvements and bug reports are welcome.
\item In case that there are problems with the installation of the program,
please send me an e-mail message.
\end{itemize}
\section{Summary}
We have presented the {\tt C++} class library
{\tt DISASTER++} for the calculation
of (1+1) and (2+1)-jet type observables in deeply inelastic scattering.
The program is based on the subtraction formalism and thus does not require
a technical cut-off for the separation of the infrared-singular from the
infrared-finite phase-space regions.
A {\tt FORTRAN} interface to the {\tt C++} class library is available.
{\tt DISASTER++} is actually intended to be a general object-oriented
framework for next-to-leading order QCD calculations. In particular,
the subtraction formalism is implemented in a very general way.
We have performed a comparison of the three available programs
{\tt MEPJET}, {\tt DISENT} and {\tt DISASTER++}
over a wide range of the parameters for the lepton phase space.
We find good agreement of {\tt DISASTER++} and the Catani-Seymour
program {\tt DISENT} (Version 0.1).
The comparison of {\tt DISASTER++} and the Mirkes-Zeppenfeld program
{\tt MEPJET} (for the {\tt MEPJET}
default parameters) leads to several
discrepancies, both for physical and for ``artificial'' parton densities.
For the MRSD$_-^\prime$ parton densities a
reduction of the internal {\tt MEPJET} phase-space slicing cut-off
variable~$s$, the number of Monte Carlo events kept fixed, leads to a certain
improvement of the central values of the results,
accompanied by a substantially increased statistical error and fluctuating
central values. A considerable increase of the number of generated events
(up to of the order of several billion events)
eventually leads to an agreement of the {\tt MEPJET} results with the
{\tt DISASTER++} / {\tt DISENT} results for a particular bin of the lepton
variables which has been studied in detail.
For ``artificial'' parton densities and a selected set of bins of
the lepton variables, a reduction of the internal cut~$s$
does not resolve the discrepancies.
Other bins are not considered
for the lack of computer time for very-high statistics runs.
It should be stressed that the present study is still limited in scope.
An independent cross check of the {\tt MEPJET} results for the ``artificial''
parton densities has to be done until a firm conclusion can be reached.
Moreover,
the study has to be repeated for a wider range of observables and much higher
Monte Carlo statistics. The $s$~dependence of the {\tt MEPJET} results
should also be studied in more detail.
Compared to the other two programs {\tt MEPJET} and {\tt DISENT},
{\tt DISASTER++} makes the full $N_f$ dependence and the dependence
on the renormalization and factorization scales available in the
user routine. This is required for consistent studies of effects
such as the scale dependence when the bottom threshold is crossed.
\section{Acknowledgements}
I wish to thank M.~Seymour for sending me the numerical results for the new
{\tt DISENT} version. D.~Zeppenfeld made a few cross
checks of the results for the MRSD$_-^\prime$ parton densities.
J.~Collins has provided me with the {\tt FORTRAN}
routine to test the {\tt IEEE NaN} condition.
I am also grateful to Th.~Hadig for a few comments on the first version
of this paper, and for suggestions for improvements of the program.
\clearpage
\begin{appendix}
\section{Numerical Results}
\label{capp}
This appendix contains the numerical results which are discussed in
Section~\ref{comparison}. The entries in the tables are the (2+1)-jet
cross sections
in units of [pb].
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{402.1}{1.13}
& \pmdg{399.9}{0.53}
& \pmdg{399.6}{1.1}
& \pmdg{585.0}{2.6}
& \pmdg{564.1}{1.9}
& \pmdg{578.4}{7.1}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{207.6}{0.59}
& \pmdg{207.5}{0.34}
& \pmdg{207.4}{0.15}
& \pmdg{364.8}{1.5}
& \pmdg{347.3}{2.4}
& \pmdg{361.1}{3.5}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{60.0}{0.16}
& \pmdg{59.9}{0.14}
& \pmdg{59.9}{0.15}
& \pmdg{119.1}{1.71}
& \pmdg{118.0}{1.05}
& \pmdg{120.1}{0.94}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{82.9}{0.16}
& \pmdg{82.9}{0.10}
& \pmdg{82.6}{0.21}
& \pmdg{98.1}{1.11}
& \pmdg{95.1}{0.61}
& \pmdg{95.4}{0.87}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{42.9}{0.08}
& \pmdg{42.9}{0.06}
& \pmdg{42.6}{0.28}
& \pmdg{55.3}{0.46}
& \pmdg{54.4}{0.49}
& \pmdg{54.9}{0.40}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{11.9}{0.02}
& \pmdg{11.9}{0.02}
& \pmdg{11.9}{0.08}
& \pmdg{17.5}{0.06}
& \pmdg{16.8}{0.22}
& \pmdg{17.3}{0.13}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{9.60}{0.03}
& \pmdg{9.58}{0.01}
& \pmdg{9.59}{0.04}
& \pmdg{12.1}{0.50}
& \pmdg{12.7}{0.07}
& \pmdg{12.3}{0.15}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{6.24}{0.01}
& \pmdg{6.23}{0.01}
& \pmdg{6.24}{0.02}
& \pmdg{8.61}{0.12}
& \pmdg{8.55}{0.15}
& \pmdg{8.52}{0.08}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.78}{0.003}
& \pmdg{1.78}{0.003}
& \pmdg{1.78}{0.06}
& \pmdg{2.65}{0.03}
& \pmdg{2.57}{0.06}
& \pmdg{2.63}{0.02}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 2: {\it
Comparison for MRSD$_-^{\,\prime}$ parton densities.
}
\end{center}
\clearpage
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{36.2}{0.09}
& \pmdg{36.3}{0.05}
& \pmdg{36.3}{0.12}
& \pmdg{39.1}{0.33}
& \pmdg{40.9}{0.89}
& \pmdg{38.2}{0.53}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{17.8}{0.04}
& \pmdg{17.8}{0.03}
& \pmdg{17.7}{0.05}
& \pmdg{23.2}{0.37}
& \pmdg{22.7}{0.41}
& \pmdg{22.6}{0.22}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{5.21}{0.01}
& \pmdg{5.21}{0.01}
& \pmdg{5.21}{0.02}
& \pmdg{8.24}{0.22}
& \pmdg{7.86}{0.12}
& \pmdg{8.14}{0.06}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{27.3}{0.06}
& \pmdg{27.3}{0.03}
& \pmdg{27.2}{0.09}
& \pmdg{28.0}{0.52}
& \pmdg{29.2}{0.18}
& \pmdg{30.0}{0.21}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{14.8}{0.03}
& \pmdg{14.8}{0.02}
& \pmdg{14.7}{0.04}
& \pmdg{17.4}{0.29}
& \pmdg{16.9}{0.10}
& \pmdg{17.0}{0.11}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{4.33}{0.008}
& \pmdg{4.32}{0.006}
& \pmdg{4.31}{0.01}
& \pmdg{5.62}{0.10}
& \pmdg{5.44}{0.05}
& \pmdg{5.54}{0.03}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{6.38}{0.02}
& \pmdg{6.37}{0.01}
& \pmdg{6.38}{0.03}
& \pmdg{8.49}{0.17}
& \pmdg{8.59}{0.10}
& \pmdg{8.37}{0.11}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{4.44}{0.01}
& \pmdg{4.43}{0.007}
& \pmdg{4.44}{0.02}
& \pmdg{6.11}{0.08}
& \pmdg{6.05}{0.07}
& \pmdg{6.07}{0.06}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.36}{0.002}
& \pmdg{1.36}{0.002}
& \pmdg{1.36}{0.05}
& \pmdg{2.02}{0.02}
& \pmdg{2.00}{0.05}
& \pmdg{2.01}{0.01}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 3: {\it
Comparison for $q(\xi) = (1-\xi)^5$
}
\end{center}
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{4.89}{0.017}
& \pmdg{4.89}{0.007}
& \pmdg{4.87}{0.01}
& \pmdg{5.38}{0.07}
& \pmdg{6.03}{0.06}
& \pmdg{5.22}{0.13}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{2.66}{0.009}
& \pmdg{2.66}{0.007}
& \pmdg{2.65}{0.007}
& \pmdg{3.67}{0.08}
& \pmdg{3.66}{0.04}
& \pmdg{3.58}{0.05}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.825}{0.003}
& \pmdg{0.826}{0.002}
& \pmdg{0.826}{0.002}
& \pmdg{1.44}{0.07}
& \pmdg{1.37}{0.03}
& \pmdg{1.39}{0.02}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.60}{0.005}
& \pmdg{1.60}{0.003}
& \pmdg{1.60}{0.003}
& \pmdg{1.20}{0.05}
& \pmdg{1.30}{0.01}
& \pmdg{1.12}{0.04}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.904}{0.003}
& \pmdg{0.900}{0.001}
& \pmdg{0.899}{0.002}
& \pmdg{0.833}{0.027}
& \pmdg{0.801}{0.008}
& \pmdg{0.764}{0.019}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.279}{0.001}
& \pmdg{0.278}{0.001}
& \pmdg{0.278}{0.001}
& \pmdg{0.314}{0.007}
& \pmdg{0.287}{0.004}
& \pmdg{0.299}{0.006}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.130}{0.001}
& \pmdg{0.131}{0.001}
& \pmdg{0.130}{0.001}
& \pmdg{0.119}{0.005}
& \pmdg{0.118}{0.002}
& \pmdg{0.110}{0.006}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.0981}{0.001}
& \pmdg{0.0980}{0.001}
& \pmdg{0.0981}{0.001}
& \pmdg{0.105}{0.002}
& \pmdg{0.096}{0.001}
& \pmdg{0.099}{0.004}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.0313}{0.0001}
& \pmdg{0.0310}{0.001}
& \pmdg{0.0313}{0.001}
& \pmdg{0.0396}{0.001}
& \pmdg{0.034}{0.001}
& \pmdg{0.0386}{0.001}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 4: {\it
Comparison for $g(\xi) = (1-\xi)^5$
}
\end{center}
\clearpage
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{46.1}{0.11}
& \pmdg{46.2}{0.07}
& \pmdg{46.2}{0.14}
& \pmdg{49.4}{0.67}
& \pmdg{58.8}{0.65}
& \pmdg{47.8}{1.2}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{23.8}{0.05}
& \pmdg{23.8}{0.09}
& \pmdg{23.8}{0.07}
& \pmdg{30.6}{0.33}
& \pmdg{31.4}{0.71}
& \pmdg{29.0}{0.54}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{7.28}{0.02}
& \pmdg{7.28}{0.02}
& \pmdg{7.29}{0.02}
& \pmdg{11.2}{0.21}
& \pmdg{11.0}{0.24}
& \pmdg{11.4}{0.14}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{42.4}{0.09}
& \pmdg{42.3}{0.06}
& \pmdg{42.3}{0.12}
& \pmdg{38.4}{0.30}
& \pmdg{41.9}{0.26}
& \pmdg{38.4}{0.31}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{23.9}{0.04}
& \pmdg{23.9}{0.03}
& \pmdg{23.8}{0.06}
& \pmdg{24.8}{0.46}
& \pmdg{24.2}{0.19}
& \pmdg{23.9}{0.16}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{7.31}{0.01}
& \pmdg{7.30}{0.01}
& \pmdg{7.27}{0.02}
& \pmdg{8.11}{0.19}
& \pmdg{8.04}{0.41}
& \pmdg{8.24}{0.05}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{20.3}{0.05}
& \pmdg{20.3}{0.08}
& \pmdg{20.3}{0.08}
& \pmdg{23.3}{0.64}
& \pmdg{25.1}{0.18}
& \pmdg{22.4}{0.24}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{15.4}{0.03}
& \pmdg{15.4}{0.02}
& \pmdg{15.4}{0.01}
& \pmdg{18.6}{0.36}
& \pmdg{18.3}{0.47}
& \pmdg{18.4}{0.15}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{4.87}{0.01}
& \pmdg{4.86}{0.01}
& \pmdg{4.87}{0.04}
& \pmdg{6.47}{0.08}
& \pmdg{6.38}{0.07}
& \pmdg{6.41}{0.05}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 5: {\it
Comparison for $q(\xi) = (1-\xi)^2$
}
\end{center}
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{6.24}{0.02}
& \pmdg{6.22}{0.01}
& \pmdg{6.21}{0.02}
& \pmdg{6.73}{0.13}
& \pmdg{8.94}{0.12}
& \pmdg{6.67}{0.24}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{3.59}{0.01}
& \pmdg{3.58}{0.01}
& \pmdg{3.57}{0.01}
& \pmdg{4.77}{0.06}
& \pmdg{5.24}{0.09}
& \pmdg{4.43}{0.11}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.18}{0.004}
& \pmdg{1.18}{0.004}
& \pmdg{1.18}{0.003}
& \pmdg{1.93}{0.04}
& \pmdg{1.89}{0.04}
& \pmdg{1.86}{0.03}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{2.65}{0.007}
& \pmdg{2.65}{0.003}
& \pmdg{2.65}{0.006}
& \pmdg{1.13}{0.03}
& \pmdg{1.66}{0.02}
& \pmdg{0.94}{0.07}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.62}{0.004}
& \pmdg{1.61}{0.002}
& \pmdg{1.61}{0.003}
& \pmdg{1.04}{0.04}
& \pmdg{1.09}{0.02}
& \pmdg{0.993}{0.03}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.535}{0.001}
& \pmdg{0.534}{0.001}
& \pmdg{0.533}{0.001}
& \pmdg{0.433}{0.018}
& \pmdg{0.412}{0.009}
& \pmdg{0.430}{0.010}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.452}{0.002}
& \pmdg{0.452}{0.001}
& \pmdg{0.451}{0.001}
& \pmdg{0.221}{0.026}
& \pmdg{0.292}{0.010}
& \pmdg{0.129}{0.02}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.398}{0.001}
& \pmdg{0.398}{0.001}
& \pmdg{0.397}{0.001}
& \pmdg{0.298}{0.01}
& \pmdg{0.271}{0.005}
& \pmdg{0.237}{0.01}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.136}{0.001}
& \pmdg{0.135}{0.001}
& \pmdg{0.135}{0.001}
& \pmdg{0.130}{0.003}
& \pmdg{0.109}{0.002}
& \pmdg{0.120}{0.004}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 6: {\it
Comparison for $g(\xi) = (1-\xi)^2$
}
\end{center}
\clearpage
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{50.6}{0.12}
& \pmdg{50.7}{0.13}
& \pmdg{50.7}{0.15}
& \pmdg{58.6}{1.29}
& \pmdg{72.9}{1.56}
& \pmdg{54.7}{2.1}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{27.1}{0.05}
& \pmdg{27.1}{0.16}
& \pmdg{27.0}{0.07}
& \pmdg{36.4}{0.57}
& \pmdg{40.0}{0.84}
& \pmdg{34.9}{1.0}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{8.51}{0.02}
& \pmdg{8.51}{0.02}
& \pmdg{8.52}{0.02}
& \pmdg{13.8}{0.35}
& \pmdg{13.3}{0.43}
& \pmdg{13.9}{0.2}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{49.8}{0.10}
& \pmdg{49.7}{0.05}
& \pmdg{49.6}{0.14}
& \pmdg{41.2}{0.55}
& \pmdg{47.2}{0.91}
& \pmdg{41.9}{0.38}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{29.0}{0.05}
& \pmdg{29.0}{0.03}
& \pmdg{28.8}{0.07}
& \pmdg{27.3}{0.52}
& \pmdg{28.2}{0.42}
& \pmdg{26.4}{0.19}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{9.09}{0.01}
& \pmdg{9.07}{0.01}
& \pmdg{9.04}{0.02}
& \pmdg{9.58}{0.06}
& \pmdg{9.16}{0.15}
& \pmdg{9.54}{0.06}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{30.6}{0.08}
& \pmdg{30.5}{0.04}
& \pmdg{30.5}{0.12}
& \pmdg{32.0}{0.34}
& \pmdg{36.3}{0.59}
& \pmdg{32.4}{0.52}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{24.3}{0.04}
& \pmdg{24.3}{0.03}
& \pmdg{24.3}{0.07}
& \pmdg{27.6}{0.56}
& \pmdg{28.4}{0.35}
& \pmdg{27.6}{0.21}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{7.88}{0.01}
& \pmdg{7.86}{0.01}
& \pmdg{7.87}{0.02}
& \pmdg{9.63}{0.21}
& \pmdg{9.50}{0.15}
& \pmdg{9.47}{0.06}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 7: {\it
Comparison for $q(\xi) = (1-\xi)$
}
\end{center}
\begin{center}
\begin{tabular}[h]{|c|c|c|c|c|c|c|}
\cline{2-7}
\multicolumn{1}{c|}{\rule[-2.5mm]{0mm}{8mm}}
& \multicolumn{3}{|c|}{leading order}
& \multicolumn{3}{|c|}{next-to-leading order}
\\ \hline
bin\rule[-2.5mm]{0mm}{8mm}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
& \makebox[2.2cm]{\tt DISASTER++}
& \makebox[2.2cm]{\tt MEPJET}
& \makebox[2.2cm]{\tt DISENT}
\\ \hline\hline
1\rule[-2.5mm]{0mm}{8mm}
& \pmdg{6.84}{0.02}
& \pmdg{6.84}{0.01}
& \pmdg{6.82}{0.02}
& \pmdg{8.20}{0.25}
& \pmdg{11.6}{0.14}
& \pmdg{8.26}{0.45}
\\ \hline
2\rule[-2.5mm]{0mm}{8mm}
& \pmdg{4.09}{0.01}
& \pmdg{4.07}{0.01}
& \pmdg{4.07}{0.01}
& \pmdg{5.70}{0.11}
& \pmdg{6.69}{0.16}
& \pmdg{5.68}{0.17}
\\ \hline
3\rule[-2.5mm]{0mm}{8mm}
& \pmdg{1.39}{0.004}
& \pmdg{1.39}{0.005}
& \pmdg{1.39}{0.003}
& \pmdg{2.41}{0.07}
& \pmdg{2.33}{0.05}
& \pmdg{2.34}{0.05}
\\ \hline
4\rule[-2.5mm]{0mm}{8mm}
& \pmdg{3.19}{0.01}
& \pmdg{3.19}{0.01}
& \pmdg{3.19}{0.01}
& \pmdg{0.686}{0.09}
& \pmdg{1.65}{0.03}
& \pmdg{0.691}{0.10}
\\ \hline
5\rule[-2.5mm]{0mm}{8mm}
& \pmdg{2.06}{0.005}
& \pmdg{2.06}{0.002}
& \pmdg{2.05}{0.003}
& \pmdg{1.00}{0.08}
& \pmdg{1.14}{0.03}
& \pmdg{0.866}{0.05}
\\ \hline
6\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.711}{0.001}
& \pmdg{0.710}{0.001}
& \pmdg{0.709}{0.001}
& \pmdg{0.500}{0.006}
& \pmdg{0.471}{0.01}
& \pmdg{0.442}{0.017}
\\ \hline
7\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.712}{0.003}
& \pmdg{0.711}{0.001}
& \pmdg{0.710}{0.002}
& \pmdg{0.157}{0.026}
& \pmdg{0.373}{0.008}
& \pmdg{0.082}{0.038}
\\ \hline
8\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.692}{0.002}
& \pmdg{0.690}{0.001}
& \pmdg{0.690}{0.001}
& \pmdg{0.411}{0.020}
& \pmdg{0.408}{0.022}
& \pmdg{0.340}{0.023}
\\ \hline
9\rule[-2.5mm]{0mm}{8mm}
& \pmdg{0.245}{0.001}
& \pmdg{0.245}{0.001}
& \pmdg{0.245}{0.001}
& \pmdg{0.194}{0.012}
& \pmdg{0.172}{0.007}
& \pmdg{0.161}{0.008}
\\ \hline
\end{tabular}
\vspace{0.5cm}
Table 8: {\it
Comparison for $g(\xi) = (1-\xi)$
}
\end{center}
\end{appendix}
\clearpage
\newcommand{\bibitema}[1]{\bibitem{#1}}
|
1,314,259,995,014 | arxiv | \section{Introduction}
This work is concerned with Gaussian approximations to a Poisson noise model for linear
inverse problems. The Poisson model is popular for modeling count data, where the response variable
follows a Poisson distribution with a parameter that is the exponential of a linear combination of the
unknown parameters. The model is especially suitable
for low count data, where the standard Gaussian model is inadequate. It has found many successful
practical applications, including transmission tomography \cite{YavuzFessler:1997,ErdoganFessler:1999}.
One traditional approach to parameter estimation with the Poisson model is the maximum likelihood method
or penalized variants with a convex penalty. This leads to a convex optimization problem,
whose solution is then taken as an approximation to the true solution. This approach has been extensively
studied, and we refer interested readers to the survey \cite{HohageWerner:2016} for a comprehensive
account on important developments along this line. However, this approach gives only a point estimator,
and does not allow quantifying the associated uncertainties directly. In this work, we
aim at a full Bayesian treatment of the problem, where both the point estimator (mean) and the associated
uncertainties (covariance) are of interest \cite{KaipioSomersalo:2005,Stuart:2010}. We shall focus
on the case of a Gaussian prior, which forms the basis of many other important priors, e.g., sparsity
prior via scale mixture representation. Then following the Bayesian procedure, we arrive at a posterior
probability distribution, which however is analytically intractable due to the nonstandard form of the
likelihood function for the Poisson model. We will explain this more precisely in Section \ref{sec:Poisson}.
To explore the posterior state space, instead of applying popular general-purposed sampling techniques, e.g.,
Markov chain Monte Carlo (MCMC), we employ a variational Gaussian approximation (VGA). The VGA
is one extremely popular approximate inference technique in
machine learning \cite{WainwrightJordan:2008,challis2013gaussian}. Specifically, we seek an optimal {Gaussian}
approximation to the non-Gaussian posterior distribution with respect to the Kullback-Leibler divergence.
The approach leads to a large-scale optimization problem over the mean $\mathbf{\bar x}$ and covariance $\mathbf{C}$
(of the Gaussian approximation). In practice, it generally delivers an accurate approximation in an
efficient manner, and thus has received immense attention in recent years in many different areas \cite{hinton1993keeping,barber1998ensemble,
challis2013gaussian,archambeau2007gaussian}. By its very construction, it also gives a lower bound to the
model evidence, which facilitates its use in model selection. However, a systematic theoretical
understanding of the approach remains largely missing.
In this work, we shall study the analytical properties and develop an efficient algorithm for the VGA in the
context of Poisson data (with the log linear link function). We shall provide a detailed analysis of the
resulting optimization problem. The study sheds interesting new insights into the approach from the perspective
of regularization. Our main contributions are as follows. First, we derive explicit expressions for the
objective functional and its gradient, and establish its strict concavity and the well-posedness of the
optimization problem. Second, we develop an efficient numerical algorithm for finding the optimal Gaussian
approximation, and discuss its convergence properties. The algorithm is of alternating maximization (coordinate
ascent) nature, and it updates the mean $\mathbf{\bar x}$ and covariance $\mathbf{C}$ alternatingly by a globally
convergent Newton method and a fixed point iteration, respectively. We also discuss strategies for its efficient
implementation, by leveraging realistic structure of inverse
problems, e.g., low-rank nature of the forward map $\mathbf{A}$ and sparsity of the covariance $\mathbf{C}$,
to reduce the computational complexity. Third, we illustrate the use of the evidence lower bound for
hyperparameter selection within a hierarchical Bayesian framework, leading to a purely data-driven approach for
determining the regularization parameter, whose proper choice is notoriously challenging. We shall develop a monotonically convergent
algorithm for determining the hyperparameter in the Gaussian prior. Last, we illustrate the approach and the algorithms
with extensive numerical experiments for one- and two-dimensional examples.
Last, we discuss existing works on Poisson models. The majority of existing works aim at recovering
point estimators, either iteratively or by a variational framework \cite{HohageWerner:2016}. Recently, Bardsley and Luttman \cite{BardsleyLuttman:2016} described
a Metroplis-Hastings algorithm for exploring the posterior distribution (with rectified linear inverse link function),
where the proposal samples are drawn from the Laplace approximation (cf. Remark \ref{rmk:Laplace}). The
Poisson model \eqref{eqn:poisson} belongs to generalized linear models (GLMs), to which the VGA has been applied in
statistics and machine learning \cite{OrmerodWand:2012,KhanMohamedMurphy:2012,challis2013gaussian,RohdeWand:2016}. Ormerod and Wand
\cite{OrmerodWand:2012} suggested a variational approximation strategy for fitting GLMs suitable for grouped data.
Challis and Barber \cite{challis2013gaussian} systematically studied VGA for GLMs and various extensions.
The focus of these interesting works \cite{OrmerodWand:2012,KhanMohamedMurphy:2012,challis2013gaussian,
RohdeWand:2016} is on the development of the general VGA methodology and its applications to concrete problems, and do not study
analytical properties and computational techniques for the lower bound functional, which is the main goal
of this work.
The rest of the paper is organized as follows. In Section \ref{sec:Poisson}, we describe
the Poisson model, and formulate the posterior probability distribution. Then in Section \ref{sec:vb},
we develop the variational Gaussian approximation, and analyze its basic analytical properties.
In Section \ref{sec:algorithm}, we propose an efficient numerical algorithm for finding the
optimal Gaussian approximation, and in Section \ref{sec:hyper}, we apply the lower bound to
hyperparameter selection within a hierarchical Bayesian framework.
In Section \ref{sec:numer} we present numerical results for several examples. In two
appendices, we provide further discussions on the convergence
of the fixed point iteration \eqref{eqn:iter-C} and the differentiability of the regularized solution.
\section{Notation and problem setting}\label{sec:Poisson}
First we recall some standard notation in linear algebra. Throughout, (real-valued) vectors and matrices are
denoted by bold lower- and upper-case letters, respectively, and the vectors are
always column vectors. We will use the notation $(\cdot,\cdot)$ to denote the usual Euclidean inner
product. We shall slightly abuse the notation $(\cdot,\cdot)$ also for the inner product for
matrices. That is, for two matrices $\mathbf{X}, \mathbf{Y}\in\mathbb{R}^{n\times m}$, we define
\begin{equation*}
(\mathbf{X},\mathbf{Y})=\mathrm{tr}(\mathbf{XY}^t)=\mathrm{tr}(\mathbf{X}^t\mathbf{Y}),
\end{equation*}
where $\mathrm{tr}(\cdot)$ denotes taking the trace of a square matrix, and the superscript $t$ denotes the transpose
of a vector or matrix. This inner product induces the usual Frobenius norm for matrices. We shall use extensively
the cyclic property of the trace operator $\mathrm{tr}(\cdot)$: for three matrices $\mathbf{X,Y,Z}$ of appropriate size, there holds
\begin{equation*}
\mathrm{tr}(\mathbf{XYZ})=\mathrm{tr}(\mathbf{YZX})=\mathrm{tr}(\mathbf{ZXY}).
\end{equation*}
We shall also use the notation \texttt{diag}$(\cdot)$ for a vector and a square matrix, which gives a diagonal matrix
and a column vector from the diagonals of the matrix, respectively, in the same manner as the \texttt{diag} function
in \texttt{MATLAB}. The notation $\mathbb{N}=\{0,1,\ldots\}$ denotes the set of natural numbers.
Further, the notation $\circ$ denotes the Hadamard product of two matrices or vectors. Last, we
denote by $\mathcal{S}_m^+\subset\mathbb{R}^{m\times m}$ the set of symmetric positive definite matrices in $\mathbb{R}^{m
\times m}$, $\mathbf{I}_m$ the identity matrix in $\mathbb{R}^{m\times m}$, and by $|\cdot|$ and $\|\cdot\|$ the determinant
and the spectral norm, respectively, of a square matrix. Throughout, we view exponential,
logarithm and factorial of a vector as componentwise operation.
Next we recall the finite-dimensional Poisson data model. Let $\mathbf{x}\in\mathbb{R}^m$ be the unknown
signal, $\mathbf{a}_i\in\mathbb{R}^m$, $i = 1,\dots,n$, and $\mathbf{y}\in\mathbb{N}^n\subset \mathbb{R}^n$
be the data vector. We stack the column vectors $\mathbf{a}_i$ into a matrix $\mathbf{A}$
by $\mathbf{A} = [\mathbf{a}^t_i]\in\mathbb{R}^{n\times m}$. Given the matrix $\mathbf{A}$ and data $\mathbf{y}\in\mathbb{N}^n$,
the Poisson model takes the form:
\begin{equation*}
y_i\sim\text{Pois}(e^{(\mathbf{a}_i,\mathbf{x})}),\quad i=1,2,\ldots,n.
\end{equation*}
Thus, the likelihood function $p(y_i|\mathbf{x})$ for the data point $y_i$ is given by
\begin{equation}\label{eqn:pois}
p(y_i|\mathbf{x}) = \frac{\lambda_i^{y_i}e^{-\lambda_i}}{y_i!}, \quad \lambda_i = e^{(\mathbf{a}_i,\mathbf{x})}, \,\, i = 1,\ldots,n.
\end{equation}
It is worth noting that the exponential function enters into the Poisson parameter $\lambda$. This is commonly known as the log link
function or log-linear model in the statistical literature \cite{CameronTrivedi:1998}. There are several other models
for the (inverse) link functions, e.g.,
rectified-linear and softplus \cite{pillow2007likelihood}, each having its own pros and cons for
modeling count data. In this work, we shall focus on the log link function. Also this model can be viewed as a simplified
statistical model for transmission tomography \cite{YavuzFessler:1997,ErdoganFessler:1999}.
The likelihood function $p(y_i|\mathbf{x})$ can be equivalently written as
\begin{equation*}
p(y_i|\mathbf{x}) = e^{y_i(\mathbf{a}_i,\mathbf{x})-e^{(\mathbf{a}_i,\mathbf{x})}-\text{ln}(y_i!)}.
\end{equation*}
Under the independent identically distributed (i.i.d.)
assumption on the data points $y_i$, the likelihood function $p(\mathbf{y}|\mathbf{x})$ of
the data vector $\mathbf{y}$ is given by
\begin{equation}\label{eqn:poisson}
p(\mathbf{y}|\mathbf{x}) = \prod^n_{i=1}p(y_i|\mathbf{x})
= e^{(\mathbf{A}\mathbf{x},\mathbf{y})-(e^{\mathbf{A}\mathbf{x}},\mathbf{1}_n)-(\text{ln}(\mathbf{y}!),\mathbf{1}_n)},
\end{equation}
where $\mathbf{1}_n\in\mathbb{R}^n$ is the vector with all entries equal to unity, i.e., $\mathbf{1}_n = [1,\dots,1]^t\in\mathbb{R}^n$.
Further, we assume that the unknown $\mathbf{x}$ follows a Gaussian prior $p(\mathbf{x})$, i.e.,
\begin{equation*}
p(\mathbf{x})=\mathcal{N}(\mathbf{x};\bm{\mu}_0,\mathbf{C}_0):=(2\pi)^{-\frac{m}{2}}|\mathbf{C}_0|^{-\frac{1}{2}}e^{-\frac{1}{2}(\mathbf{x}-\bm \mu_0)^t\mathbf{C}_0^{-1}(\mathbf{x}-\bm \mu_0)},
\end{equation*}
where $\bm\mu_0\in\mathbb{R}^m$ and $\mathbf{C}_0\in\mathcal{S}_m^+$ denote the mean and covariance
of the Gaussian prior, respectively, and $\mathcal{N}$ denotes the normal distribution. In the
framework of variational regularization, the
corresponding penalty $\frac{1}{2}(\mathbf{x}-\bm \mu_0)^t\mathbf{C}_0^{-1}(\mathbf{x}-\bm\mu_0)$ often
imposes certain smoothness constraint. The
Gaussian prior $p(\mathbf{x})$ may depend on additional hyperparameters, cf. Section \ref{sec:hyper} for details. Then
by Bayes' formula, the posterior probability distribution $p(\mathbf{x} |\mathbf{y})$ is given by
\begin{equation}\label{eq:post}
p(\mathbf{x}|\mathbf{y})= Z^{-1}p(\mathbf{x},\mathbf{y}),
\end{equation}
where the joint distribution $p(\mathbf{x},\mathbf{y})$ is defined by
\begin{equation*}
p(\mathbf{x},\mathbf{y}) = (2\pi)^{-\frac{m}{2}}|\mathbf{C}_0|^{-\frac{1}{2}} e^{(\mathbf{A}\mathbf{x},\mathbf{y})-(e^{\mathbf{A}\mathbf{x}},\mathbf{1}_n)-(\text{ln}(\mathbf{y}!),
\mathbf{1}_n)-\frac{1}{2}(\mathbf{x}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{x}-\bm{\mu}_0)},
\end{equation*}
and the normalizing constant $Z(\mathbf{y})$, which depends only on the given
data $\mathbf{y}$, is given by
\begin{equation*}
Z(\mathbf{y})=p(\mathbf{y})=\int p(\mathbf{x},\mathbf{y}){\rm d}\mathbf{x}.
\end{equation*}
That is, the normalizing constant $Z$ is an integral living in a very high-dimensional space if the parameter
dimension $m$ is large. Thus it is computationally intractable, and so is the posterior distribution $p(\mathbf{x}
|\mathbf{y})$, since it also involves the constant $Z$. The quantity $Z$ is commonly known as model evidence in
the literature, and it underlies many model selection rules, e.g., Bayes factor \cite{kass1995bayes}.
Thus the reliable approximation of $Z(\mathbf{y})$ is important in certain tasks.
The posterior distribution $p(\mathbf{x}|\mathbf{y})$ given in \eqref{eq:post} is the Bayesian solution to the
Poisson model \eqref{eqn:pois} (under a Gaussian prior), and it contains all the information about the
inverse problem. In order to explore the posterior state
space, one typically employs Markov chain Monte Carlo methods, which, however, can be prohibitively expensive for
high-dimensional problems, apart from the well-known challenge in diagnosing the convergence of the Markov chain.
To overcome the challenge, over the last two decades, a large number of approximate inference methods
have been developed, including mean-field approximation \cite{WainwrightJordan:2008}, expectation propagation
\cite{minka2001expectation} and variational Gaussian approximation (VGA) \cite{OpperArchambeau:2009,challis2013gaussian}.
In all these approximations, we aim at finding a best approximate yet tractable distribution $q(\mathbf{x})$ within
a family of parametric/nonparametric probability distributions, by minimizing the error in a certain
probability metric, prominently the Kullback-Leibler divergence $\mathrm{D}_{\text{KL}}(q||p)$, cf. Section
\ref{ssec:KL} below.
In this work, we shall employ the VGA to obtain an optimal Gaussian
approximation $q(\mathbf{x})$ to the posterior
distribution $p(\mathbf{x}|\mathbf{y})$ in the Kullback-Leibler divergence ${\rm D}_{\rm KL}(q||p)$.
Fitting a Gaussian to an intractable distribution is a well-established norm for approximate Bayesian inference,
and it has demonstrated success in many practical applications
\cite{hinton1993keeping,barber1998ensemble,challis2013gaussian,archambeau2007gaussian}.
The popularity can be largely attributed to the fact that the Gaussian approximation is
computationally attractive, and yet delivers reasonable accuracy for a wide range of problems, due to the
good analytical properties and great flexibility of the Gaussian family.
However, analytical properties of approximate inference procedures are rarely
studied. In the context of Poisson mixed models,
the asymptotic normality of the estimator and its convergence rate was analyzed \cite{hall2011theory}.
In a general setting, some theoretical issues were studied in \cite{PinskiSimpson:2015,LuStuartWeber:2016}.
\section{Gaussian variational approximation}\label{sec:vb}
In this section, we recall the Kullback-Leibler divergence, derive explicit
expressions for the lower bound functional and its gradient, and discuss basic
analytic properties, e.g., concavity and existence.
\subsection{Kullback-Leibler divergence}\label{ssec:KL}
The Kullback-Leibler divergence is one of the most popular metrics for measuring the distance between
two probability distributions. The Kullback-Leibler (KL) divergence \cite{KullbackLeibler:1951} from
one probability distribution $p$ to another distribution $q$ is a functional defined by
\begin{equation}\label{eq:kl}
\text{D}_{\text{KL}}(q||p) = \int q(\mathbf{x})\text{ln}\frac{q(\textbf{x})}{p(\mathbf{x})}\text{d}\mathbf{x}.
\end{equation}
Clearly, KL divergence is not symmetric and thus not a metric in the mathematical sense.
Since the logarithm function $\ln x$ is concave and that $q$ is
normalized, i.e., $\int q(\mathbf{x}){\rm d}\mathbf{x}=1$, by Jensen's inequality, we can
derive the nonnegativity of the KL divergence:
\begin{equation}\label{eqn:KLD-nonnegative}
\begin{aligned}
\text{D}_{\text{KL}}(q||p) &= \int q(\mathbf{x})\text{ln}\frac{q(\textbf{x})}{p(\mathbf{x})}\text{d}\mathbf{x} = - \int q(\mathbf{x})\text{ln}\frac{p(\mathbf{x})}{q(\textbf{x})}\text{d}\mathbf{x}\\
&\ge -\text{ln}\int q(\mathbf{x})\frac{p(\mathbf{x})}{q(\textbf{x})}\text{d}\mathbf{x} = - \text{ln}\int p(\mathbf{x})\text{d}\mathbf{x} = 0.
\end{aligned}
\end{equation}
Further, $\text{D}_{\text{KL}}(q||p) = 0$ if and only if $p=q$ almost everywhere.
Due to unsymmetry of the KL divergence, to find an approximation $q$ to the target distribution $p$, there are
two options, i.e., minimizing either $\text{D}_{\text{KL}}(q||p)$ or $\text{D}_{\text{KL}}(p||q)$.
These two options lead to different approximations. It was pointed out in \cite[Section 10.1.2]{Bishop:2006}
that minimizing $\text{D}_{\text{KL}}(p||q)$ tends to find the average of modes of
$p$, while minimizing $\text{D}_{\text{KL}}(q||p)$ tends to find one exact mode.
Traditionally, the former is used in expectation propagation, and the latter in
variational Bayes. In this work, we focus on the approach $\min\text{D}_{\text{KL}}(q||p)$,
which leads to the VGA to be described below.
\begin{remark}\label{rmk:Laplace}
In practice, the so-called Laplace approximation is quite popular \cite{ThierneyKadane:1986}. Specifically, let
$\hat{\mathbf{x}}$ be the maximum a posteriori {\rm(}MAP{\rm)} estimator $\hat{\mathbf{x}}$, i.e.,
$\hat{\mathbf{x}}=\arg\min_{\mathbf{x}\in\mathbb{R}^m} g(\mathbf{x}),$ where $g(\mathbf{x}) = -\ln p(\mathbf{x}
|\mathbf{y})$ is the negative log posterior distribution. Consider the Taylor expansion of $g(\mathbf{x})$ at
the MAP estimator $\hat{\mathbf{x}}$:
\begin{equation*}
\begin{aligned}
g(\mathbf{x}) & \approx g(\hat{\mathbf{x}}) + (\nabla g(\hat{\mathbf{x}}),\mathbf{x} - \hat{\mathbf{x}}) + \tfrac{1}{2}(\mathbf{x}-\hat{\mathbf{x}})^t\mathbf{H}(\mathbf{x}-\hat{\mathbf{x}})\\
& =g(\hat{\mathbf{x}}) + \tfrac{1}{2}(\mathbf{x}-\hat{\mathbf{x}})^t\mathbf{H}(\mathbf{x}-\hat{\mathbf{x}}),
\end{aligned}
\end{equation*}
since $\nabla g(\hat{\mathbf{x}})$ vanishes.
The Hessian $\mathbf{H}$ of $g(\mathbf{x})$ is given by
\begin{equation*}
\mathbf{H}=\mathbf{A}^t\mathrm{diag}(e^{\mathbf{A}\mathbf{\hat x}})\mathbf{A}+\mathbf{C}^{-1}_0.
\end{equation*}
Thus, $\mathbf{\hat x}$ might serve as an approximate posterior mean, and the inverse Hessian
$\mathbf{H}^{-1}$ as an approximate posterior covariance. However, unlike the VGA discussed below, it lacks the optimality as
evidence lower bound {\rm(}within the Gaussian family{\rm)}, and thus may be suboptimal for model selection etc.
\end{remark}
\subsection{Variational Gaussian lower bound}
Now we derive the variational Gaussian lower bound.
By substituting $p(\mathbf{x})$ with the posterior distribution $p(\mathbf{x}|\mathbf{y})$ in \eqref{eq:kl}, we obtain
\begin{equation*}
\text{D}_{\text{KL}}(q(\mathbf{x})||p(\mathbf{x}|\mathbf{y})) = \int q(\mathbf{x})\text{ln}\frac{q(\textbf{x})}{p(\mathbf{x}|\mathbf{y})}\text{d}\mathbf{x}.
\end{equation*}
Since the posterior distribution $p(\mathbf{x}|\mathbf{y})$ depends on the unknown normalizing constant $Z(\mathbf{y})$,
the integral on the right hand side is not computable. Nonetheless,
given $\mathbf{y}$, $Z(\mathbf{y})$ is fixed. In view of the identity
\begin{equation*}
\begin{aligned}
\text{ln}Z &
= \int q(\mathbf{x})\text{ln}\frac{p(\mathbf{x},\mathbf{y})}{q(\mathbf{x})}\text{d}\mathbf{x} + \int q(\mathbf{x})\text{ln}\frac{q(\mathbf{x})}{p(\mathbf{x}|\mathbf{y})}\text{d}\mathbf{x},
\end{aligned}
\end{equation*}
instead of minimizing $\text{D}_{\text{KL}}(q(\mathbf{x})||p(\mathbf{x}|\mathbf{y}))$, we may equivalently maximize the functional
\begin{equation}\label{eq:lb}
F(q,\mathbf{y})=\int q(\mathbf{x})\text{ln}\frac{p(\mathbf{x},\mathbf{y})}{q(\mathbf{x})}\text{d}\mathbf{x}.
\end{equation}
By \eqref{eqn:KLD-nonnegative}, we have $\text{D}_{\text{KL}}(q(\mathbf{x})||p(\mathbf{x}|\mathbf{y}))\ge0$, and thus
$ \text{ln}Z \ge F(q,\mathbf{y})$. That is, $F(q,\mathbf{y})$ provides a lower bound on the model evidence $Z$, for any choice of the
distribution $q$. For any fixed $q$, $F(q,\mathbf{y})$ may be used as a substitute for the analytically intractable model
evidence $Z(\mathbf{y})$, and hence it is called an evidence lower bound (ELBO). Since the data $\mathbf{y}$ is fixed,
it will be suppressed from $F(q,\mathbf{y})$ below. In the VGA, we
restrict our choice of $q$ to Gaussian distributions. Meanwhile, a Gaussian distribution $q(\mathbf{x})$ is fully
characterized by its mean ${\mathbf{\bar x}}\in \mathbb{R}^m$ and covariance $\mathbf{C}\in\mathcal{S}^+_m\subset \mathbb{R}^{m\times m}$, i.e.,
\begin{equation*}
q(\mathbf{x}) = \mathcal{N}(\mathbf{x};
\mathbf{\bar{x}},\mathbf{C}).
\end{equation*}
Thus, $F(q)$ is actually a function of $\mathbf{\bar{x}}\in\mathbb{R}^m$ and $\mathbf{C}\in \mathcal{S}_m^+$, and will
be written as $F(\mathbf{\bar{x}},\mathbf{C})$ below. Then the approach seeks optimal variational parameters
$(\mathbf{\bar x},\mathbf{C})$ to maximize ELBO. This step turns a challenging
sampling problem into a computationally more tractable optimization problem.
The next result gives an explicit expression for the lower bound $F(\mathbf{\bar{x}},\mathbf{C})$.
\begin{proposition}\label{prop:lb}
For any fixed $\mathbf{y}, \bm{\mu}_0$ and $\mathbf{C}_0$, the lower bound $F(\mathbf{\bar x},\mathbf{C})$ is given by
\begin{equation}\label{eq:lbxc}
\begin{split}
F(\mathbf{\bar{x}},\mathbf{C}) &= (\mathbf{y},\mathbf{A}\mathbf{\bar{x}})- (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\mathrm{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}) - \tfrac{1}{2}(\mathbf{\bar{x}}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0) - \tfrac{1}{2}\mathrm{tr}(\mathbf{C}^{-1}_0\mathbf{C})\\ &\hspace{1em}+ \tfrac{1}{2}\ln|\mathbf{C}| - \tfrac{1}{2}\ln|\mathbf{C_0}| + \tfrac{m}{2} - (\mathbf{1}_n,\ln(\mathbf{y}!)).
\end{split}
\end{equation}
\end{proposition}
\begin{proof}
By the definition of the functional $F(\mathbf{\bar x},\mathbf{C})$ and the joint distribution
$p(\mathbf{x},\mathbf{y})$, we have
\begin{equation*}
\begin{aligned}
F(\bar{\mathbf{x}},\mathbf{C})
&=\int\mathcal{N}(\mathbf{x};\mathbf{\bar{x}},\mathbf{C})\Big[\text{ln}|\mathbf{C}_0|^{-\frac{1}{2}}-\text{ln}|\mathbf{C}|^{-\frac{1}{2}}+(\mathbf{A}\mathbf{x},\mathbf{y})-(e^{\mathbf{A}\mathbf{x}},\mathbf{1}_n)-(\text{ln}(\mathbf{y}!),\mathbf{1}_n)\\ &\hspace{1em}-\tfrac{1}{2}(\mathbf{x}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{x}-\bm{\mu}_0)+\tfrac{1}{2}(\mathbf{x}-\mathbf{\bar{x}})^t\mathbf{C}^{-1}(\mathbf{x}-\mathbf{\bar{x}})\Big]\text{d}\mathbf{x}.
\end{aligned}
\end{equation*}
It suffices to evaluate the integrals termwise. Clearly, we have $
\int\mathcal{N}(\mathbf{x};\mathbf{\bar{x}},\mathbf{C})(\mathbf{A}\mathbf{x},\mathbf{y})
\text{d}\mathbf{x} = (\mathbf{A}\mathbf{\bar{x}},\mathbf{y}).$
Next, using moment generating function, we have
\begin{equation*}
\begin{aligned}
\int\mathcal{N}(\mathbf{x};\mathbf{\bar{x}},\mathbf{C})(e^{\mathbf{A}\mathbf{x}},\mathbf{1}_n)\text{d}\mathbf{x}
&= \sum_i\int\mathcal{N}(\mathbf{x};\mathbf{\bar{x}},\mathbf{C}) e^{(\mathbf{a}_i,\mathbf{x})}\text{d}\mathbf{x}\\
&= \sum_ie^{(\mathbf{a}_i,\mathbf{\bar{x}})+\frac{1}{2}\mathbf{a}^t_i\mathbf{C}\mathbf{a}_i}
= (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}).
\end{aligned}
\end{equation*}
With the Cholesky decomposition $\mathbf{C}=\mathbf{L}\mathbf{L}^t$, for $\mathbf{z}\sim \mathcal{N}(\bm{0},
\mathbf{I}_m)$, $\mathbf{x}=\bm{\mu}+\mathbf{L}\mathbf{z}\sim\mathcal{N}(\mathbf{x};\bm{\mu},\mathbf{C})$.
This and the bias-variance decomposition yield ($\mathbb{E}_{q(\mathbf{x})}[\cdot]$ takes expectation with respect to the density $q(\mathbf{x})$)
\begin{equation*}
\mathbb{E}_{q(\mathbf{x})}[\mathbf{x}^{t}\mathbf{A}\mathbf{x}] = \mathbb{E}_{\mathcal{N}(\mathbf{z};\mathbf{0},\mathbf{I}_m)}[(\bm{\mu}+\mathbf{L}\mathbf{z})^t\mathbf{A}(\bm{\mu}+\mathbf{L}\mathbf{z})]
= \bm{\mu}^t\mathbf{A}\bm{\mu} + \mathbf{E}_{\mathcal{N}(\mathbf{z};\mathbf{0},\mathbf{I}_m)}[\mathbf{z}^t\mathbf{L}^t\mathbf{A}\mathbf{L}\mathbf{z}].
\end{equation*}
By the cyclic property of trace, we have
$
\mathbb{E}_{\mathcal{N}(\mathbf{z};\mathbf{0},\mathbf{I}_m)}[\mathbf{z}^t\mathbf{L}^t\mathbf{A}\mathbf{L}\mathbf{z}]= \text{tr}(\mathbf{L}^t\mathbf{A}\mathbf{L})=\text{tr}(\mathbf{A}\mathbf{L}\mathbf{L}^t)
= \text{tr}(\mathbf{A}\mathbf{C}).
$
In particular, this gives
\begin{equation*}
\begin{aligned}
\mathbb{E}_{q(\mathbf{x})}[(\mathbf{x}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{x}-\bm{\mu}_0)]
= (\mathbf{\bar{x}}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0) +\text{tr}(\mathbf{C}^{-1}_0\mathbf{C}),
\end{aligned}
\end{equation*}
and
\begin{equation*}
\mathbb{E}_{q(\mathbf{x})}[(\mathbf{x}-\mathbf{\bar{x}})^t\mathbf{C}^{-1}
(\mathbf{x}-\mathbf{\bar{x}})] = m.
\end{equation*}
Collecting preceding identities completes the proof of the proposition.
\end{proof}
\begin{remark}\label{rmk:functional}
The terms in the functional $F(\mathbf{\bar{x}},\mathbf{C})$ in \eqref{eq:lbxc} admit interesting interpretation in
the lens of classical Tikhonov regularization {\rm(}see, e.g., \cite{EnglHankeNeubauer:1996,ito2014inverse,SchusterKaltenbacher:2012}{\rm)}.
To this end, it is instructive to rewrite it as
\begin{equation*}
\begin{split}
F(\mathbf{\bar{x}},\mathbf{C}) =& (\mathbf{y},\mathbf{A}\mathbf{\bar{x}})- (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\mathrm{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}) - (\mathbf{1}_n,\ln(\mathbf{y}!))\\
& - \tfrac{1}{2}(\mathbf{\bar{x}}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0) \\
& - \tfrac{1}{2}\mathrm{tr}(\mathbf{C}^{-1}_0\mathbf{C})+ \tfrac{1}{2}\ln|\mathbf{C}| - \tfrac{1}{2}\ln|\mathbf{C_0}| + \tfrac{m}{2}.
\end{split}
\end{equation*}
The first line represents the fidelity or ``pseudo-likelihood'' function. It is worth noting that it actually involves
the covariance $\mathbf{C}$. In the absence of the covariance $\mathbf{C}$, it recovers the familiar log likelihood for
Poisson data, cf. Remark \ref{rmk:Laplace}. The second line imposes a quadratic penalty on the mean $\mathbf{\bar x}$.
This term recovers the familiar penalty in Tikhonov regularization {\rm(}except that it is imposed on $\mathbf{\bar x}${\rm)}.
Recall that the function $-\ln|\mathbf{C}|$ is strictly convex in $\mathbf{C}\in\mathcal{S}_m^+$
\cite[Lemma 6.2.2]{gartner2012approximation}. Thus, one may define the corresponding Bregman divergence $d(\mathbf{C},
\mathbf{C}_0)$. In view of the identities \cite{Dwyer:1967}
\begin{equation}\label{eqn:deriv-logdet}
\frac{\partial}{\partial\mathbf{C}}\mathrm{tr}(\mathbf{CC}_0^{-1})=\mathbf{C}_0^{-1}\quad \mbox{and} \quad \frac{\partial}{\partial\mathbf{C}}\ln|\mathbf{C}|=\mathbf{C}^{-1}.
\end{equation}
simple computation gives the following expression for the divergence:
\begin{equation*}
d(\mathbf{C},\mathbf{C}_0)= \mathrm{tr}(\mathbf{C}^{-1}_0\mathbf{C})- \ln|\mathbf{C}_0^{-1}\mathbf{C}| - m \geq 0.
\end{equation*}
Statistically, it is the Kullback-Leibler divergence between two Gaussians
of identical mean. The divergence $d(\mathbf{C},\mathbf{C}_0)$ provides a distance measure between the prior covariance $\mathbf{C}_0$ and
the posterior one $\mathbf{C}$. Let $\{(\lambda_i,\mathbf{v}_i)\}_{i=1}^m$ be the pairs of generalized eigenvalues and eigenfunctions of
the pencil $(\mathbf{C},\mathbf{C}_0)$, i.e., $\mathbf{C}\mathbf{v}_i=\lambda_i\mathbf{C}_0\mathbf{v}_i$. Then it can be expressed as
\begin{equation*}
d(\mathbf{C},\mathbf{C}_0)= \sum_{i=1}^m (\lambda_i-\ln \lambda_i-1).
\end{equation*}
This identity directly indicates that $d(\mathbf{C},\mathbf{C}_0)\leq c$ implies $\|\mathbf{C}\|\leq c$
and $\|\mathbf{C}^{-1}\|\leq c$, where here and below $c$ denotes a generic constant which
may change at each occurrence.
Thus, the third line regularizes the posterior covariance $\mathbf{C}$ by requesting nearness to the prior one
$\mathbf{C}_0$ in Bregman divergence. It is interesting to observe that the Gaussian prior implicitly induces
a penalty on $\mathbf{C}$, although it is not directly enforced. In statistics, the Bregman divergence $d(\mathbf{C},
\mathbf{C}_0)$ is also known as Stein's loss \cite{JamesStein:1961}. In recent years, the Bregman divergence
$d(\mathbf{C},\mathbf{C}_0)$ has been employed in clustering, graphical models, sparse covariance estimate
and low-rank matrix recovery etc. \cite{KulisSustik:2009,RavikumarWainwrightPaskuttiYu:2011}.
\end{remark}
\subsection{Theoretical properties of the lower bound}
Now we study basic analytical properties, i.e., concavity, existence and uniqueness of maximizer, and
gradient of the functional $F(\mathbf{\bar x},\mathbf{C})$ defined in \eqref{eq:lbxc}, from the perspective
of optimization.
A first result shows the concavity of $F(\mathbf{\bar x},\mathbf{C})$. Let $X$ and $Y$
be two convex sets. Recall that a functional $f: X\times Y\to\mathbb{R}$ is said to be jointly concave, if and only if
\begin{equation*}
f(\lambda x_1+(1-\lambda)x_2,\lambda y_1+(1-\lambda)y_2) \ge \lambda f(x_1,y_1) + (1-\lambda)f(x_2,y_2)
\end{equation*}
for all $x_1,x_2\in X$, $y_1,y_2\in Y$ and $\lambda\in [0,1]$. Further, $f$ is called strictly jointly concave if
the inequality is strict for any $(x_1,y_1)\neq(x_1,y_1)$ and $\lambda\in (0,1)$. It is easy to see that
$\mathcal{S}_m^+$ is a convex set.
\begin{theorem}\label{thm:concavity}
For any $\mathbf{C}_0\in\mathcal{S}^+_m$, the functional $F(\mathbf{\bar{x}},\mathbf{C})$ is strictly
jointly concave with respect to $\mathbf{\bar{x}}\in\mathbb{R}^m$ and $\mathbf{C}\in\mathcal{S}^+_m$.
\end{theorem}
\begin{proof}
It suffices to consider the terms apart from the linear terms $(\mathbf{y},\mathbf{A}\mathbf{\bar{x}})$ and
$- \frac{1}{2}\text{tr}(\mathbf{C}^{-1}_0\mathbf{C})$ and the constant term $-\frac{1}{2}\text{ln}|\mathbf{C_0}|
+ \frac{m}{2} - (\mathbf{1}_n,\text{ln}(\mathbf{y}!))$. Since $\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}
\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)$ is linear in $\mathbf{\bar x}$ and $\mathbf{C}$, and exponentiation
preserves convexity, the term $- (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}
\mathbf{C}\mathbf{A}^t)})$ is also jointly concave.
Next, the term $-\frac{1}{2}(\mathbf{\bar{x}}-\bm{\mu}_0)^t\mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0)$ is
strictly concave for any $\mathbf{C}_0\in\mathcal{S}_m^+$. Last, the log-determinant $\ln|\mathbf{C}|$ is strictly concave over
$\mathcal{S}_m^+$ \cite[Lemma 6.2.2]{gartner2012approximation}. The assertion follows since strict concavity is preserved
under summation.
\end{proof}
Next, we show the well-posedness of the optimization problem in VGA.
\begin{theorem}\label{thm:existence}
There exists a unique pair
of $(\mathbf{\bar{x}},\mathbf{C})$ solving the optimization problem
\begin{equation}\label{eqn:opt}
\max_{\mathbf{\bar x}\in\mathbb{R}^m,\mathbf{C}\in \mathcal{S}_m^+}F(\mathbf{\bar{x}},\mathbf{C})
\end{equation}
\end{theorem}
\begin{proof}
The proof follows by direct methods in calculus of variation, and we only briefly sketch it. Clearly,
there exists a maximizing sequence, denoted by $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\}
\subset\mathbb{R}^m\times\mathcal{S}_m^+$, and
we may assume $F(\mathbf{\bar x}^k,\mathbf{C}^k) \geq c=:F(\bm\mu_0,\mathbf{C}_0)$. Thus, by
\eqref{eq:lbxc} in Proposition \ref{prop:lb} and the divergence $d(\mathbf{C},\mathbf{C}_0)$, we have
\begin{equation*}
(\mathbf{A}\mathbf{\bar x}^k,\mathbf{y}) - (\mathbf{\bar x}^k-\bm\mu_0)^t\mathbf{C}_0^{-1}(\mathbf{\bar x}^k-\bm\mu_0) - d(\mathbf{C}^k,\mathbf{C}_0) \geq c+ (e^{\mathbf{A\bar x}^k+\frac{1}{2}{\rm diag}(\mathbf{AC}^k\mathbf{A}^t)},\mathbf{1}_n)\geq c.
\end{equation*}
By the Cauchy-Schwarz inequality, we have $(\mathbf{\bar x}^k-\bm\mu_0)^t\mathbf{C}_0^{-1}
(\mathbf{\bar x}^k-\bm\mu_0) + d(\mathbf{C}^k,\mathbf{C}_0) \leq c.$ This immediately
implies a uniform bound on $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\}$ and $\{(\mathbf{C}^k)^{-1}\}$.
Thus, there exists a convergent subsequence, relabeled as $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\}$,
with a limit $(\mathbf{\bar x}^*,\mathbf{C}^*)\in\mathbb{R}^m\times\mathcal{S}_m^+$. Then
by the continuity of the functional $F$ in $(\mathbf{\bar x},\mathbf{C})$, we deduce
that $(\mathbf{\bar x}^*,\mathbf{C}^\ast)$ is a maximizer to $F(\mathbf{\bar x},\mathbf{C})$,
i.e., the existence of a maximizer. The uniqueness follows from
the strict joint-concavity of $F(\mathbf{\bar{x}},\mathbf{C})$, cf. Theorem \ref{thm:concavity}.
\end{proof}
Since $F$ is composed of smooth functions, clearly it is smooth. Next
we give the gradient formulae, which are useful for developing numerical algorithms below.
\begin{theorem}
The gradients of the functional $F(\mathbf{\bar{x}},\mathbf{C})$ with respect to $\mathbf{\bar{x}}$ and $\mathbf{C}$ are respectively given by
\begin{align*}
\frac{\partial F}{\partial \mathbf{\bar{x}}} &= \mathbf{A}^t\mathbf{y} - \mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}
+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)} - \mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0),\\
\frac{\partial F}{\partial \mathbf{ C}} &= \tfrac{1}{2}[-\mathbf{A}^t{\rm diag}(e^{\mathbf{A}\mathbf{\bar{x}}
+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A} - \mathbf{C}^{-1}_0 + \mathbf{C}^{-1}].
\end{align*}
\end{theorem}
\begin{proof}
Let $\mathbf{d} = \mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)$. Then by the chain rule
\begin{align*}
\frac{\partial}{\partial \bar{x}_i}(\mathbf{1}_n,e^\mathbf{d}) &= \frac{\partial}{\partial \bar{x}_i}\sum_{j=1}^n e^{d_j} = \sum_{j=1}^n \frac{\partial e^{d_j}}{\partial d_j}\frac{\partial d_j}{\partial \bar{x}_i}= \sum_{j=1}^ne^{d_j}(A)_{ji}.
\end{align*}
That is, we have $\frac{\partial}{\partial \mathbf{\bar{x}}}(\mathbf{1}_n,e^\mathbf{d})= \mathbf{A}^te^{\mathbf{d}}$,
showing the first formula. Next we derive the gradient with respect to the covariance $\mathbf{C}$. In view of \eqref{eqn:deriv-logdet},
it remains to differentiate the term $(\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\mathrm{diag}
(\mathbf{A}\mathbf{C}\mathbf{A}^t)})$ with respect to $\mathbf{C}$. To this end, let $\mathbf{H}$ be a small
perturbation to $\mathbf{C}$. By Taylor expansion, and with the diagonal matrix $\mathbf{D}=\text{diag}
(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})$, we deduce
\begin{equation*}
\hspace{1em}(\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{(C+H)}\mathbf{A}^t)}) - (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})
=(\mathbf{D}, \tfrac{1}{2}\text{diag}(\mathbf{A}\mathbf{H}\mathbf{A}^t))+\mathcal{O}(\|\mathbf{H}\|^2).
\end{equation*}
Since the matrix $\mathbf{D}$ is diagonal, by the cyclic property of trace, we have
\begin{equation*}
\begin{aligned}
(\mathbf{D}, \tfrac{1}{2}\text{diag}(\mathbf{A}\mathbf{H}\mathbf{A}^t)) =(\mathbf{D},\tfrac{1}{2}(\mathbf{A}\mathbf{H}\mathbf{A}^t))
=\tfrac{1}{2}\text{tr}(\mathbf{DAH}^t\mathbf{A}^t)=\tfrac{1}{2}\text{tr}(\mathbf{A}^t\mathbf{DA}\mathbf{H}^t) =\tfrac{1}{2}(\mathbf{A^tDA},\mathbf{H}).
\end{aligned}
\end{equation*}
Now the definition of the gradient completes the proof.
\end{proof}
An immediate corollary is the following optimality system.
\begin{corollary}\label{cor:opt}
The necessary and sufficient optimality system of problem \eqref{eqn:opt} is given by
\begin{align*}
\mathbf{A}^t\mathbf{y} - \mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)} - \mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0)&=0,\\
\mathbf{C}^{-1}-\mathbf{A}^t{\rm diag}(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A} - \mathbf{C}^{-1}_0&=0.
\end{align*}
\end{corollary}
\begin{remark}
Challis and Barber \cite{challis2013gaussian} showed that for log-concave site
posterior potentials, the variational lower bound is jointly concave in $\mathbf{\bar{x}}$
and the Cholesky factor $\mathbf{L}$ of the covariance $\mathbf{C}$.
This assertion holds also for the lower bound $F(\mathbf{\bar x},\mathbf{C})$ in \eqref{eq:lbxc},
i.e., joint concavity with respect to $(\mathbf{\bar x},\mathbf{L})$.
\end{remark}
\begin{remark}
Corollary \ref{cor:opt} indicates that the covariance $\mathbf{C}^*$ of the
optimal Gaussian approximation $q^*(\mathbf{ x})$ is of the following form:
\begin{equation*}
(\mathbf{C}^*)^{-1} = \mathbf{C}_0^{-1} + \mathbf{A}^t\mathbf{D}\mathbf{A},
\end{equation*}
for some diagonal matrix $\mathbf{D}$. Thus it is tempting that one may minimize with respect
to $\mathbf{D}$ instead of $\mathbf{C}$ in order to reduce the complexity of the algorithm, by reducing the
number of unknowns from $m^2$ to $m$. However, $F$ is generally not concave with respect
to $\mathbf{D}$; see \cite{KhanMohamedMurphy:2012} for a one-dimensional counterexample. The loss of concavity
might complicate the analysis and computation.
\end{remark}
\begin{remark}
In practice, the parameter $\mathbf{x}$ in the model \eqref{eqn:poisson} often admits physical constraint. Thus it is
natural to impose a box constraint on the mean $\mathbf{\bar x}$ in problem \eqref{eqn:opt}, e.g., $c_l\leq \bar x_i\leq c_u$,
$i=1,\ldots,m$, for some $c_l<c_u$. This can be easily
incorporated into the optimality system in Corollary \ref{cor:opt}, and the algorithms below
remain valid upon minor changes, e.g., including a pointwise projection operator in the update of $\mathbf{\bar x}$.
\end{remark}
\section{Numerical algorithm and its complexity analysis}\label{sec:algorithm}
Now we develop an efficient numerical algorithm, which is of alternating direction maximization
type, provide an analysis of its complexity, and discuss strategies for complexity reduction.
\subsection{Numerical algorithm}
In view of the strict concavity of $F(\mathbf{\bar x},\mathbf{C})$,
it suffices to solve the optimality system (cf. Corollary \ref{cor:opt}):
\begin{align}
\mathbf{A}^t\mathbf{y} - \mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)} - \mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0)&=0,\label{eqn:barx}\\
\mathbf{C}^{-1}-\mathbf{A}^t{\rm diag}(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A} - \mathbf{C}^{-1}_0&=0.\label{eqn:C}
\end{align}
This consists of a coupled nonlinear system for $(\mathbf{\bar x},\mathbf{C})$.
We shall solve the system by alternatingly maximizing
$F(\mathbf{\bar x},\mathbf{C})$ with respect to $\mathbf{\bar x}$ and
$\mathbf{C}$, i.e., coordinate ascent. From the strict concavity in Theorem \ref{thm:concavity}, we
deduce that for a fixed $\mathbf{C}$, \eqref{eqn:barx} has a unique
solution $\mathbf{\bar x}$, and similarly, for a fixed $\mathbf{\bar x}$,
\eqref{eqn:C} has a unique solution $\mathbf{C}$. Below, we discuss the efficient numerical
solution of \eqref{eqn:barx}--\eqref{eqn:C}.
\subsubsection{Newton method for updating $\mathbf{\bar x}$}
To solve $\mathbf{\bar x}$ from \eqref{eqn:barx}, for a fixed $\mathbf{C}$, we employ a Newton
method. Let the nonlinear map $\mathbf{G}:\mathbb{R}^m\to\mathbb{R}^m$ be defined by
\begin{equation*}
\mathbf{G}(\mathbf{\bar x})= \mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)} + \mathbf{C}^{-1}_0(\mathbf{\bar{x}}-\bm{\mu}_0)-\mathbf{A}^t\mathbf{y}.
\end{equation*}
The Jacobian $\partial \mathbf{G}$ of the map $\mathbf{G}$ is given by
\begin{equation*}
\partial\mathbf{G}(\mathbf{\bar x})=\mathbf{A}^t\mathrm{diag}(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}{\rm diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A} + \mathbf{C}^{-1}_0 \geq \mathbf{C}_0^{-1},
\end{equation*}
where the partial ordering $\geq$ is in the sense of symmetric positive definite matrix, i.e., $\mathbf{X}\geq \mathbf{Y}$ if and
only if $\mathbf{X}-\mathbf{Y}$ is positive semidefinite. That is, the Jacobian $\partial\mathbf{G}
(\mathbf{\bar x})$ is uniformly invertible (since the prior covariance $\mathbf{C}_0^{-1}$ is invertible). This concurs
with the strict concavity of the functional $F(\mathbf{\bar x},\mathbf{C})$ in $\mathbf{\bar x}$.
This motivates the use of the Newton method or its variants: for a nonlinear system with uniformly invertible
Jacobians, the Newton method converges globally \cite{Kelley:1995}. Specifically, given $\mathbf{\bar x}^0$, we iterate
\begin{equation}\label{eqn:iter-barx}
\partial\mathbf{G}(\mathbf{\bar x}^k) \delta \mathbf{\bar x} = -\mathbf{G}(\mathbf{\bar x}^k),\qquad
\mathbf{\bar x}^{k+1} = \mathbf{\bar x}^k + \delta\mathbf{\bar x}.
\end{equation}
The main cost of the Newton update \eqref{eqn:iter-barx} lies in solving the linear system involving
$\partial\mathbf{G}(\mathbf{\bar x}^k)$. Clearly, the Jacobian $\partial\mathbf{G}(\mathbf{\bar x}^k)$
is symmetric and positive definite, and thus the (preconditioned) conjugate gradient method is a natural
choice for solving the linear system. One may use $\mathbf{C}_0^{-1}$ (or the diagonal part
of the Jacobian $\mathbf{\partial G}(\mathbf{\bar x})$) as a preconditioner.
It is worth noting that inverting the Jacobian $\partial \mathbf{G}(\mathbf{\bar x})$ is identical
with one fixed point update of the covariance $\mathbf{C}$ below. In the presence of \textit{a priori}
structural information, this can be carried out efficiently even for very large-scale problems; see Section
\ref{ssec:complexity} below for further details. By the fast local convergence of the Newton method,
a few iterations suffice the desired accuracy, which is fully confirmed by our numerical experiments.
\subsubsection{Fixed-point method for updating $\mathbf{C}$}
Next we turn to the solution of \eqref{eqn:C} for updating $\mathbf{C}$, with $\mathbf{\bar x}$ fixed. There
are several different strategies, and we shall describe two of them below. The first option is to employ
a Newton method. Let the nonlinear map $\mathbf{S}:\mathbb{R}^{m\times m}\to\mathbb{R}^{m\times m}$ be defined by
\begin{equation*}
\mathbf{S}(\mathbf{C}) = \mathbf{C}^{-1}-\mathbf{C}_0^{-1}-\mathbf{A}^t{\rm diag}(e^{\mathbf{A\bar x}+{\rm diag}(\mathbf{ACA}^t)})\mathbf{A}.
\end{equation*}
The Jacobian $\partial\mathbf{S}$ of the map $\mathbf{S}$ is given by
\begin{equation*}
\partial\mathbf{S}(\mathbf{C})[\mathbf{H}] = -\mathbf{C}^{-1}\mathbf{H}\mathbf{C}^{-1}-\mathbf{A}^t{\rm diag}(e^{\mathbf{A\bar x}+{\rm diag}(\mathbf{ACA}^t)})\mathrm{diag}(\mathbf{AHA}^t )\mathbf{A}.
\end{equation*}
It can be verified that the map $\partial\mathbf{S}(\mathbf{C})$ is symmetric with a uniformly bounded
inverse (see the proof of Theorem \ref{thm:sensitivity-sol} in the appendix for details).
However, its explicit form seems not available due to the presence of the operator $\mathrm{diag}$.
Nonetheless, one can apply a (preconditioned) conjugate gradient method for updating $\mathbf{C}$.
The Newton iteration is guaranteed to converge globally.
The second option is to use a fixed-point iteration. This choice is very
attractive since it avoids solving huge linear systems. Specifically, given
an initial guess $\mathbf{C}^0$, we iterate by
\begin{equation}\label{eqn:iter-C}
\mathbf{D}^k = \mathrm{diag}(e^{\mathbf{A\bar x}+\frac{1}{2}\mathrm{diag}(\mathbf{AC}^k\mathbf{A}^t)}),\qquad
\mathbf{C}^{k+1} =(\mathbf{C}_0^{-1}+ \mathbf{A}^t\mathbf{D}^k\mathbf{A})^{-1}.
\end{equation}
Conceptually, it has the flavor of a classical fixed point scheme for solving algebraic
Riccati equations in Kalman filtering \cite{AndersonKleindorfer:1969}, and it has also been used in a slightly
different context of variational inference with Gaussian processes \cite{KhanMohamedMurphy:2012}. Numerically, each inner iteration
of \eqref{eqn:iter-C} involves computing the vector ${\rm diag}(\mathbf{AC}^k\mathbf{A}^t)$ (which should be regarded
as computing $\mathbf{a}_i\mathbf{C}^k\mathbf{a}_i^t$, $i=1,\ldots,m$, instead of forming the full matrix
$\mathbf{AC}^k\mathbf{A}^t$) and a matrix inversion.
Next we briefly discuss the convergence of \eqref{eqn:iter-C}. Clearly, for all iterates $\mathbf{C}^k$,
we have $\mathbf{C}^k\leq \mathbf{C}_0$. We claim $\lambda_{\max}(\mathbf{C}^k)\leq \lambda_{\max}(\mathbf{C}_0).$ To see this,
let $\mathbf{v}\in\mathbb{R}^m$ be a unit eigenvector corresponding to the largest eigenvalue $\lambda_{\max}(\mathbf{C}^k)$, i.e.,
$\mathbf{v}^t\mathbf{C}^k\mathbf{v}=\lambda_{\max}(\mathbf{C}^k)$. Then by the minmax principle
\begin{equation*}
\lambda_{\max}(\mathbf{C}^k)=\mathbf{v}^t\mathbf{C}^k\mathbf{v} \leq \mathbf{v}^t\mathbf{C}_0\mathbf{v} \leq \sup_{\mathbf{v}\in\mathbb{S}^{m}}\mathbf{v}^t\mathbf{C}_0\mathbf{v}=\lambda_{\max}(\mathbf{C}_0).
\end{equation*}
Thus, the sequence $\{\mathbf{C}^k \}_{k=1}^\infty$ generated by the iteration \eqref{eqn:iter-C}
is uniformly bounded in the spectral norm (and thus any norm due to the norm equivalence in a finite-dimensional space).
Hence, there exists a convergent subsequence, also relabeled as $\{\mathbf{C}^k \}$, such that $\mathbf{C}^k\to
\mathbf{C}^*$, for some $\mathbf{C}^*$. In practice, the iterates converge fairly steadily to the unique
solution to \eqref{eqn:C}, which however remains to be established. In Appendix \ref{app:iter-C}, we show a certain
``monotone'' type convergence of \eqref{eqn:iter-C} for the initial guess $\mathbf{C}^0=\mathbf{C}_0$.
\subsubsection{Variational Gaussian approximation algorithm}
With the preceding two inner solvers, we are ready to state the complete procedure in Algorithm \ref{alg:vb}.
One natural stopping criterion at Step 7 is to monitor ELBO. However, computing ELBO can be expensive and
cheap alternatives, e.g., relative change of the mean $\mathbf{\bar x}$, might be considered.
Note that Step 3 of Algorithm \ref{alg:vb}, i.e., randomized singular value decomposition (rSVD), has to be carried
out only once, and it constitutes a preprocessing step. Its crucial role will be discussed in Section \ref{ssec:complexity} below.
With exact inner updates $(\mathbf{\bar x}^k,\mathbf{C}^k)$, by
the alternating maximizing property, the sequence $\{F(\mathbf{\bar x}^k,
\mathbf{C}^k)\}$ is guaranteed to be monotonically increasing, i.e.,
\begin{equation*}
F(\mathbf{\bar x}^0,\mathbf{C}^0) \leq F(\mathbf{\bar x}^1,\mathbf{C}^0)\leq F(\mathbf{\bar x}^1,\mathbf{C}^1)\leq ... \leq F(\mathbf{\bar x}^k,\mathbf{C}^k)\leq ...,
\end{equation*}
with the inequality being strict until convergence is reached. Further, $F(\mathbf{\bar x}^k,\mathbf{C}^k)\leq \ln Z(\mathbf{y})$. Thus,
$\{F(\mathbf{\bar x}^k,\mathbf{C}^k)\}$ converges. Further, by \cite[Prop. 2.7.1]{Bertsekas:2016}, the coordinate ascent method converges if
the maximization with respect to each coordinate is uniquely attained. Clearly, Algorithm \ref{alg:vb} is
a coordinate ascent method for $F(\mathbf{\bar x},\mathbf{C})$, and $F(\mathbf{\bar x},\mathbf{C})$ satisfies the unique solvability condition.
Thus the sequence $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\}$ generated by Algorithm \ref{alg:vb}
converges to the unique maximizer of $F(\mathbf{\bar x},\mathbf{C})$.
\begin{algorithm}[hbt!]
\centering
\caption{Variational Gaussian Approximation Algorithm\label{alg:vb}}
\begin{algorithmic}[1]
\STATE Input: $(\mathbf{A},\mathbf{y})$, specify the prior $(\bm{\mu}_0,\mathbf{C}_0)$, and the maximum number $K$ of iterations
\STATE Initialize $\mathbf{\bar x}=\mathbf{\bar x}^1$ and $\mathbf{C}=\mathbf{C}^1$;
\STATE SVD: $(\mathbf{U}, \mathbf{\Sigma}, \mathbf{V}) = \text{rSVD}(\mathbf{A})$;
\FOR{$k=1,2,\ldots,K$}
\STATE Update the mean $\mathbf{\bar x}^{k+1}$ by \eqref{eqn:iter-barx};
\STATE Update the covariance $\mathbf{C}^{k+1}$ by \eqref{eqn:iter-C};
\STATE Check the stopping criterion.
\ENDFOR
\STATE Output: $(\mathbf{\bar{x}},\mathbf{C})$
\end{algorithmic}
\end{algorithm}
\subsection{Complexity analysis and reduction}\label{ssec:complexity}
Now we analyze the computational complexity of Algorithm \ref{alg:vb}, and describe strategies
for complexity reduction, in order to arrive at a scalable implementation.
When evaluating the functional $F(\mathbf{\bar x},\mathbf{C})$ and its gradient, the constant terms can be
precomputed. Thus, it suffices to analyze the terms that will be updated.
Standard linear algebra \cite{GolubVanLoan:2013} gives the following operational complexity.
\begin{itemize}
\item The complexity of evaluating the objective functional $F(\mathbf{\bar{x}},\mathbf{C})$ is $\mathcal{O}(m^2n+m^3)$:
\begin{itemize}
\item the inner product $-(\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\sim\mathcal{O}(m^2n)$
\item the matrix determinant $\text{ln}|\mathbf{C}|\sim\mathcal{O}(m^3)$
\end{itemize}
\item The complexity of evaluating the gradient $\frac{\partial F}{\partial \mathbf{\bar{x}}} $ is $\mathcal{O}(m^2n)$:
\begin{itemize}
\item the matrix-vector product $\mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}\sim\mathcal{O}(m^2n)$
\end{itemize}
\item The complexity of evaluating the gradient $\frac{\partial F}{\partial\mathbf{ C}}$ is $\mathcal{O}(m^2n+m^3)$:
\begin{itemize}
\item the matrix product $\mathbf{A}^t\text{diag}(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A}\sim\mathcal{O}(m^2n)$
\item the matrix inversion $\mathbf{C}^{-1}\sim\mathcal{O}(m^3)$.
\end{itemize}
\end{itemize}
In summary, evaluating ELBO $F(\mathbf{\bar x},\mathbf{C})$ and its gradients
each involves $\mathcal{O}(nm^2+m^3)$ complexity, which is infeasible for large-scale
problems. The most expensive piece lies in matrix products/inversion, e.g., $(\mathbf{1}_n,e^{\mathbf{A}
\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})$, $\mathbf{A}^te^{\mathbf{A}
\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}$ and $\mathbf{A}^t\text{diag}
(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A}$.
The log-determinant $\text{ln}|\mathbf{C}|$ can be approximated accurately
with $\mathcal{O}(m^2)$ operations by a stochastic algorithm \cite{ZhangLeithead:2007}.
In many practical inverse problems, there do exist structures: (i) $\mathbf{A}$ is
low rank, and (ii) $\mathbf{C}$ is sparse, which can be leveraged to reduce the per-iteration cost.
First, for many inverse problems, the matrix $\mathbf{A}$ is ill-conditioned, and the singular
values decay to zero. Thus, $\mathbf{A}$ naturally has a low-rank structure. The effective rank $r$
is determined by the decay rate of the singular values. In this work, we assume a known rank $r$.
The rSVD is a powerful technique for obtaining low-rank approximations \cite{halko2011finding}. For a
rank $r$ matrix, the rSVD can yield an accurate approximation with $\mathcal{O}(mn\ln r + (m + n)r^2)$
operations \cite[pp. 225]{halko2011finding}. We denote the rSVD approximation by $\mathbf{A}\approx
\mathbf{U}\mathbf{\Sigma}\mathbf{V}^t$, where the matrices $\mathbf{U}\in\mathbb{R}^{n\times r}$
and $\mathbf{V}\in\mathbb{R}^{m\times r}$ are column orthonormal, and $\mathbf{\Sigma}\in
\mathbb{R}^{r\times r}$ is diagonal with its entries ordered nonincreasingly.
Second, the covariance $\mathbf{C}$ is approximately sparse, and each row/column has at most $s$
nonzero entries. This reflects the fact that only (physically) neighboring elements
are highly correlated, and there is no long range correlation. This choice will be implemented in
the numerical experiments for 2D image deblurring. Naturally, one can also consider a more
flexible option by adaptively selecting the sparsity pattern. This can be achieved by penalizing of the
off-diagonal entries of $\mathbf{C}$ by the $\ell^1$-norm, which allows automatically detecting
significant correlation \cite{RavikumarWainwrightPaskuttiYu:2011}. Other structures, e.g., low-rank
plus sparsity, offer potential alternatives. We leave these advanced
options to a future study.
Under these structural assumptions, the complexity of computing the terms $(\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})$, $ \mathbf{A}^te^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}$ and $\mathbf{A}^t\text{diag}(e^{\mathbf{A}\mathbf{\bar{x}}+\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})\mathbf{A}$
can be reduced to $\mathcal{O}(smn)$. Thus, the complexity of calculating $F$ and $\frac{\partial F}{\partial
\mathbf{\bar{x}}}$ is reduced to $\mathcal{O}(smn+m^2)$. For the matrix inversion in
\eqref{eqn:iter-C}, we exploit the low-rank structure of $\mathbf{A}$. Upon recalling the low-rank approximation
of $\mathbf{A}$ and the Sherman-Morrison-Woodbury formula \cite[pp. 65]{GolubVanLoan:2013}, i.e.,
\begin{equation*}
(\mathbf{\tilde{A}}+\mathbf{\tilde{U}}\mathbf{\tilde{V}})^{-1} = \mathbf{\tilde{A}}^{-1}-\mathbf{\tilde{A}}^{-1}\mathbf{\tilde{U}}(\mathbf{I}+\mathbf{\tilde{V}}
\mathbf{\tilde{A}}^{-1}\mathbf{\tilde{U}})^{-1}\mathbf{\tilde{V}}\mathbf{\tilde{A}}^{-1},
\end{equation*}
we deduce (with $\mathbf{D}=\text{diag}(e^{\mathbf{A}\mathbf{\bar{x}}+
\frac{1}{2}\text{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})$)
\begin{equation}\label{eqn:update-C-woodbury}
\mathbf{C} = \mathbf{C}_0 -\mathbf{C}_0\mathbf{V}\mathbf{\Sigma}\mathbf{U}^t\mathbf{D}\mathbf{U}\mathbf{\Sigma}(\mathbf{I}
+\mathbf{V}^t\mathbf{C}_0\mathbf{V}\mathbf{\Sigma}\mathbf{U}^t\mathbf{D}\mathbf{U}\mathbf{\Sigma})^{-1}\mathbf{V}^t\mathbf{C}_0.
\end{equation}
Note that the inversion step only involves a matrix in $\mathbb{R}^{r\times r}$, and can be carried out
efficiently. The sparsity structure on $\mathbf{C}$ can be enforced by computing only the respective entries.
Then the update formula \eqref{eqn:update-C-woodbury} can be achieved
in $\mathcal{O}(smn+r^2n+r^2m)$ operations. In comparison with the $\mathcal{O}(m^3+nm^2)$ complexity
of the direct implementation, this represents a substantial complexity reduction.
\section{Hyperparameter choice with hierarchical model}\label{sec:hyper}
When encoding prior knowledge about the unknown $\mathbf{x}$ into the prior $p(\mathbf{x})$, it is often necessary
to tune its strength, a scalar parameter commonly known as hyperparameter.
It plays the role of the regularization parameter in variational regularization
\cite[Chapter 7]{ito2014inverse}, where its proper choice is notoriously challenging.
In the Gaussian prior $p(\mathbf{x})$, $\mathbf{C}_0=\alpha^{-1}\bar{\mathbf{C}}_0$, where
$\bar{\mathbf{C}}_0$ describes the interaction structure and
the scalar $\alpha$ determines the strength of the interaction which has to be specified.
In the Bayesian paradigm, one principled approach to handle hyperparameters is hierarchical modeling,
by assuming a hyperprior and treating them as a part of the
inference procedure. Specifically, we write the Gaussian prior $p(\mathbf{x}|\alpha)=\mathcal{N}(\mathbf{x}
|\mathbf{0},\alpha^{-1}\bar{\mathbf{C}}_0)$, and employ
a Gamma distribution $p(\alpha|a,b)=\text{Gamma}(\alpha|a,b)$ on $\alpha$, where $(a,b)$
are the parameters. The Gamma distribution is the conjugate prior for $\alpha$, and it is
analytically and computationally convenient. In practice, one may take $(a,b)$ close
to $(1,0)$ to mimic a noninformative prior. Then appealing to Bayes' formula again, one obtains a
posterior distribution (jointly over $(\mathbf{x},\alpha)$).
Conceptually, with the VGA, this construction determines the optimal parameter by maximizing ELBO as a function of
$\alpha$, i.e., model selection within a parametric family. Thus it can be viewed as a direct application
of ELBO in model selection.
One may explore the resulting joint posterior distribution in several ways \cite[Chapter 7]{ito2014inverse}.
In this work, we employ an EM type method to maximize the following (joint) lower bound
\begin{equation*}
\begin{aligned}
F(\mathbf{\bar{x}},\mathbf{C},\alpha) &= \int q(\mathbf{x})\text{ln}\frac{p(\mathbf{x},\mathbf{y}|\alpha)p(\alpha|a,b)}{q(\mathbf{x})}\text{d}\mathbf{x}\\
&= \int q(\mathbf{x})\text{ln}\frac{p(\mathbf{x},\mathbf{y}|\alpha)}{q(\mathbf{x})}\text{d}\mathbf{x} + \int q(\mathbf{x})\text{ln}p(\alpha|a,b)\text{d}\mathbf{x}\\
&= F_\alpha(\mathbf{\bar{x}},\mathbf{C}) + (a-1)\ln\alpha -\alpha b + \ln\frac{b^a}{\Gamma(a)},
\end{aligned}
\end{equation*}
where the subscript $\alpha$ indicates the dependence of ELBO
on $\alpha$. Then, using \eqref{eq:lbxc} and substituting $\mathbf{C}_0$ with $\alpha^{-1}\bar{\mathbf{C}}_0$, we have
\begin{equation}\label{eq:hyperbd}
\begin{split}
F(\mathbf{\bar{x}},\mathbf{C},\alpha) &= (\mathbf{y},\mathbf{A}\mathbf{\bar{x}})- (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\tfrac{1}{2}\mathrm{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)}) - \tfrac{\alpha}{2}(\mathbf{\bar{x}}-\bm{\mu}_0)^t\bar{\mathbf{C}}_0^{-1}(\mathbf{\bar{x}}-\bm{\mu}_0) - \tfrac{\alpha}{2}\mathrm{tr}(\bar{\mathbf{C}}_0^{-1}\mathbf{C})\\
&\hspace{1em}+ \tfrac{1}{2}\ln|\mathbf{C}| + \tfrac{m}{2}\ln\alpha - \tfrac{1}{2}\ln|\bar{\mathbf{C}}_0|+ (a-1)\ln\alpha -\alpha b + \tfrac{m}{2} - (\mathbf{1}_n,\ln(\mathbf{y}!)) + \ln\frac{b^a}{\Gamma(a)}.
\end{split}
\end{equation}
This functional extends ELBO $F(\mathbf{\bar x},\mathbf{C})$ to estimate the
hyperparameter $\alpha$ simultaneously with $(\mathbf{\bar x},\mathbf{C})$ in a way
analogous to augmented Tikhonov regularization \cite{JinZou:2009}.
To maximize $F(\mathbf{\bar x},\mathbf{C},\alpha)$, we employ an EM algorithm \cite[Chapter 9.3]{Bishop:2006}. In
the E-step, we fix $\alpha$, and maximize $F(\mathbf{\bar{x}},\mathbf{C},\alpha)$ for a new Gaussian approximation
$\mathcal{N}(\mathbf{x}|\mathbf{\bar{x}},\mathbf{C})$ by Algorithm \ref{alg:vb}, with the unique maximizer
denoted by $(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$. Then in the M-step, we fix $(\mathbf{\bar{x}},\mathbf{C})$
and update $\alpha$ by
\begin{equation}\label{eq:alpha}
\alpha = \frac{m+2(a-1)}{(\mathbf{\bar{x}}_\alpha-\bm{\mu}_0)^t\mathbf{\bar C}_0^{-1}(\mathbf{\bar{x}}_\alpha-\bm{\mu}_0)+\text{tr}(\bar{\mathbf{C}}_0^{-1}\mathbf{C}_\alpha)+2b}.
\end{equation}
This follows from the condition $\frac{\partial F}{\partial \alpha}=0$. These
discussions lead to the procedure in Algorithm \ref{alg:hyper}. A natural
stopping criterion at line 5 is the change of $\alpha$. Below we analyze the convergence of
Algorithm \ref{alg:hyper}.
\begin{remark}
The first two terms in the denominator of the iteration \eqref{eq:alpha} is given by
\begin{equation*}
\alpha(\mathbf{\bar{x}}_\alpha-\bm{\mu}_0)^t\mathbf{\bar C}_0^{-1}(\mathbf{\bar{x}}_\alpha-\bm{\mu}_0)+\alpha{\rm tr}(\bar{\mathbf{C}}_0^{-1}\mathbf{C}_\alpha)
=\mathbb{E}_{q(\mathbf{x})}[\|\mathbf{x}-\bm{\mu}_0\|^2_{\mathbf{C}_0^{-1}}],
\end{equation*}
i.e., the expectation of the negative logarithm of the Gaussian prior $p(\mathbf{x})$ with respect to the Gaussian posterior approximation
$q(\mathbf{x})$. Formally, the fixed point iteration \eqref{eq:alpha} can be viewed as an extension of that for
a balancing principle for Tikhonov regularization in \cite{JinZou:2009,ItoJinTakeuchi:2011} to a probabilistic
context.
\end{remark}
\begin{algorithm}[hbt!]
\centering
\caption{Hierarchical variational Gaussian approximation\label{alg:hyper}}
\begin{algorithmic}[1]
\STATE Input $(\mathbf{A},\mathbf{y})$, and initialize $\alpha^1$
\FOR{$k=1,2,\ldots$}
\STATE E-step: Update $(\mathbf{\bar{x}}^k,\mathbf{C}^k)$ by Algorithm \ref{alg:vb}:
\begin{equation*}
(\mathbf{\bar{x}}^k,\mathbf{C}^k)=\arg\max_{(\mathbf{\bar x},\mathbf{C})\in\mathbb{R}^m\times\mathcal{S}_m^+} F_{\alpha^k}(\mathbf{\bar{x}},\mathbf{C});
\end{equation*}
\STATE M-step: Update $\alpha$ by \eqref{eq:alpha}.
\STATE Check the stopping criterion;
\ENDFOR
\STATE Output: $(\mathbf{\bar{x}},\mathbf{C})$
\end{algorithmic}
\end{algorithm}
In order to analyze the convergence of Algorithm \ref{alg:hyper}, we write the functional $F_\alpha(\mathbf{\bar x},\mathbf{C})$ as
\begin{equation*}
F_\alpha(\mathbf{\bar x},\mathbf{C}) = \phi(\mathbf{\bar x},\mathbf{C}) + \alpha \psi(\mathbf{\bar x},\mathbf{C}),
\end{equation*}
where
\begin{equation*}
\begin{aligned}
\phi(\mathbf{\bar{x}},\mathbf{C})&= (\mathbf{y},\mathbf{A}\mathbf{\bar{x}})- (\mathbf{1}_n,e^{\mathbf{A}\mathbf{\bar{x}}+\tfrac{1}{2}\mathrm{diag}(\mathbf{A}\mathbf{C}\mathbf{A}^t)})+\tfrac{1}{2}\ln|\mathbf{C}|- \tfrac{1}{2}\ln|\bar{\mathbf{C}}_0|+ - (\mathbf{1}_n,\ln(\mathbf{y}!)),\\
\psi(\mathbf{\bar{x}},\mathbf{C})&=- \tfrac{1}{2}(\mathbf{\bar{x}}-\bm{\mu}_0)^t\bar{\mathbf{C}}_0^{-1}(\mathbf{\bar{x}}-\bm{\mu}_0)- \tfrac{1}{2}\mathrm{tr}(\bar{\mathbf{C}}_0^{-1}\mathbf{C})\le 0.
\end{aligned}
\end{equation*}
Thus the functional $F_\alpha(\mathbf{\bar x},\mathbf{C})$ resembles classical Tikhonov regularization.
By Theorem \ref{thm:existence}, for any $\alpha>0$, there exists a unique maximizer $(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$ to $F_\alpha$,
and the value function $\psi(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$ is continuous in $\alpha$, cf. Lemma \ref{lem:cont}
below. In Appendix \ref{app:sensitivity}, we show that the maximizer $(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$ is actually differentiable in $\alpha$.
\begin{lemma}\label{lem:bdd}
For any $\alpha>0$, the maximizer $(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$ is bounded, with the bound depending only on $\alpha$.
\end{lemma}
\begin{proof}
Taking inner product between \eqref{eqn:barx} and $\mathbf{\bar x}_\alpha$, we deduce
\begin{equation*}
(\mathbf{C}_0^{-1}\mathbf{\bar x}_\alpha,\mathbf{\bar x}_\alpha) + (e^{\mathbf{A\bar x}_\alpha+{\rm diag}(\mathbf{ACA}^t)},\mathbf{A}\mathbf{\bar x}_\alpha)=(\mathbf{A}^t\mathbf{y},\mathbf{\bar x}_\alpha).
\end{equation*}
It can be verified directly that the function $f(t)=te^t$ is bounded from below by $-e^{-1}$ for $t\in\mathbb{R}$.
Meanwhile, by \eqref{eqn:C}, $\mathbf{C}\leq \mathbf{C}_0$, and thus
\begin{equation*}
(e^{\mathbf{A\bar x}_\alpha+{\rm diag}(\mathbf{ACA}^t)},\mathbf{A}\mathbf{\bar x}_\alpha) \geq -e^{-1}\sum_ie^{{\rm diag}(\mathbf{ACA}^t)_i} \geq -e^{-1}\sum_ie^{{\rm diag}(\mathbf{AC}_0\mathbf{A}^t)_i}=-ce^{-1}.
\end{equation*}
This and the Cauchy-Schwarz inequality give $\|\mathbf{\bar x}_\alpha\|\leq c\alpha^{-1}$,
with $c$ depending only on $\mathbf{y}$. Next, by \eqref{eqn:C}, we have
\begin{equation*}
0\leq e^{(\mathbf{A\bar x})_i+{\rm diag}(\mathbf{ACA}^t)_i}\leq e^{(\mathbf{A\bar x})_i+{\rm diag}(\mathbf{AC}_0\mathbf{A}^t)_i}\leq c,
\end{equation*}
and consequently appealing to \eqref{eqn:C} again yields
$(\mathbf{C}_0^{-1}+c\mathbf{A}^t\mathbf{A})^{-1}\leq \mathbf{C}\leq \mathbf{C}_0$, completing the proof.
\end{proof}
\begin{lemma}\label{lem:cont}
The functional value $\psi(\mathbf{\bar{x}}_\alpha,\mathbf{C}_\alpha)$
is continuous at any $\alpha>0$.
\end{lemma}
\begin{proof}
Let $\{\alpha^k\}\subset\mathbb{R}^+$ be a sequence convergent to $\alpha$. By Theorem \ref{thm:existence}, for each $\alpha^k$,
there exists a unique maximizer $(\mathbf{\bar x}^k,\mathbf{C}^k)$ to $F_{\alpha^k}(\mathbf{\bar x},\mathbf{C})$.
By Lemma \ref{lem:bdd}, the sequence $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\}$
is uniformly bounded, and there exists a convergent subsequence, relabeled as $\{(\mathbf{\bar x}^k,\mathbf{C}^k)\} $, with a
limit $(\mathbf{\bar x}^*,\mathbf{C}^*)$. By the continuity of the functionals $\phi(\mathbf{\bar x},\mathbf{C})$ and
$\psi(\mathbf{\bar x},\mathbf{C})$, we have for any $(\mathbf{\bar x},\mathbf{C})\in\mathbb{R}^m\times\mathcal{S}_m^+$
\begin{equation*}
\begin{aligned}
F_\alpha(\mathbf{\bar x}^*,\mathbf{C}^*)& = \lim_{k\to\infty}(\phi(\mathbf{\bar x}^k,\mathbf{C}^k)+\alpha_k \psi(\mathbf{\bar x}^k,\mathbf{C}^k))
\geq \lim_{k\to\infty}(\phi(\mathbf{\bar x},\mathbf{C})+\alpha_k \psi(\mathbf{\bar x},\mathbf{C}))\\
& = \phi(\mathbf{\bar x},\mathbf{C})+\alpha \psi(\mathbf{\bar x},\mathbf{C})=F_\alpha(\mathbf{\bar x},\mathbf{C}).
\end{aligned}
\end{equation*}
That is, $(\mathbf{\bar x}^*,\mathbf{C}^*)$ is a maximizer of $F_\alpha(\mathbf{\bar x},\mathbf{C})$.
The uniqueness of the maximizer to $F_\alpha(\mathbf{\bar x},\mathbf{C})$ and a standard subsequence
argument imply that the whole sequence converges. The desired continuity now follows by the
continuity of $\psi(\mathbf{\bar x},\mathbf{C})$ in $(\mathbf{\bar x},\mathbf{C})$.
\end{proof}
Next we give an important monotonicity relation for $\psi(\mathbf{\bar x}_\alpha,\mathbf{C}_\alpha)$
in $\alpha$, in a manner similar to classical Tikhonov regularization \cite{ItoJinTakeuchi:2011}.
In Appendix \ref{app:sensitivity}, we show that it is actually strictly monotone.
\begin{lemma}\label{lem:mon}
The functional $\psi(\mathbf{\bar{x}}_\alpha,\mathbf{C}_\alpha)$ is monotonically increasing in $\alpha$.
\end{lemma}
\begin{proof}
This result follows by a standard comparison principle.
For any $\alpha_1,\alpha_2$, by the maximizing property of $(\mathbf{C}_{\alpha_1},\mathbf{\bar x}_{\alpha_1})$
and $(\mathbf{C}_{\alpha_2},\mathbf{\bar x}_{\alpha_2})$, we have
\begin{align*}
F_{\alpha_1}(\mathbf{\bar{x}}_{\alpha_1},\mathbf{C}_{\alpha_1}) \ge F_{\alpha_1}(\mathbf{\bar{x}}_{\alpha_2},\mathbf{C}_{\alpha_2})\quad \mbox{and}\quad
F_{\alpha_2}(\mathbf{\bar{x}}_{\alpha_2},\mathbf{C}_{\alpha_2}) \ge F_{\alpha_2}(\mathbf{\bar{x}}_{\alpha_1},\mathbf{C}_{\alpha_1}).
\end{align*}
Summing up these two inequalities and collecting terms yield
\begin{align*}
(\alpha_1-\alpha_2)[\psi(\mathbf{\bar{x}}_{\alpha_1},\mathbf{C}_{\alpha_1})-\psi(\mathbf{\bar{x}}_{\alpha_2},\mathbf{C}_{\alpha_2})] \ge 0.
\end{align*}
Then the desired monotonicity relation follows.
\end{proof}
\begin{theorem}\label{thm:mono}
For any initial guess $\alpha^1>0$,
the sequence $\{\alpha^k\}$ generated by Algorithm \ref{alg:hyper} is monotonically convergent
to some $\alpha^*\geq0$, and if the limit $\alpha^*>0$, then it satisfies
the fixed point equation \eqref{eq:alpha}.
\end{theorem}
\begin{proof}
By the fixed point iteration \eqref{eq:alpha}, we have (with $c=\frac{m}{2}+a-1$)
\begin{equation*}
\begin{split}
\alpha^{k+1}-\alpha^{k} &= \frac{c}{-\psi(\mathbf{\bar{x}}_{\alpha^{k}},\mathbf{C}_{\alpha^{k}})+b} - \frac{c}{-\psi(\mathbf{\bar{x}}_{\alpha^{k-1}},\mathbf{C}_{\alpha^{k-1}})+b}\\ &=\frac{c[\psi(\mathbf{\bar{x}}_{\alpha^{k}},\mathbf{C}_{\alpha^{k}})-\psi(\mathbf{\bar{x}}_{\alpha^{k-1}},\mathbf{C}_{\alpha^{k-1}})]}{(-\psi(\mathbf{\bar{x}}_{\alpha^{k}},\mathbf{C}_{\alpha^{k}})+b)(-\psi(\mathbf{\bar{x}}_{\alpha^{k-1}},\mathbf{C}_{\alpha^{k-1}})+b)}.
\end{split}
\end{equation*}
Since $\psi\leq 0$, the denominator is positive.
By Lemma \ref{lem:mon}, $\alpha^{k+1}-\alpha^{k}$
and $\alpha^k-\alpha^{k-1}$ have the same sign, and thus
$\{\alpha^k\}$ is monotone. Further, for all $\alpha^k$, we have
$0 \le \alpha^k \le \frac{m+2(a-1)}{2b},$
i.e., $\{\alpha^k\}$ is uniformly bounded. Thus
$\{\alpha^k\}$ is convergent. By Lemma \ref{lem:cont}, $\psi(\mathbf{\bar{x}}_\alpha,\mathbf{C}_\alpha)$ is
continuous in $\alpha$ for $\alpha>0$, and $\alpha^*$ satisfies \eqref{eq:alpha}.
\end{proof}
\begin{remark}
The proof of Theorem \ref{thm:mono} provides a constructive approach to the existence of a solution to \eqref{eq:alpha}.
The uniqueness of the solution $\alpha^*$ to \eqref{eq:alpha} is generally not ensured. However, in practice, it
seems to have only two fixed points: one is in the neighborhood of $+\infty$, which is uninteresting, and the other
is the desired one.
\end{remark}
\section{Numerical experiments and discussions}\label{sec:numer}
Now we present numerical results to examine algorithmic features (Sections \ref{ssec:iter}--\ref{ssec:mcmc},
with the example \texttt{phillips}) and to illustrate the VGA (Section \ref{ssec:recon}). All one-dimensional examples are taken from
public domain \texttt{MATLAB} package \texttt{Regutools}\footnote{\url{http://www.imm.dtu.dk/~pcha/Regutools/}, last
accessed on April 15, 2017}, and the discrete problems are of size $100\times 100$. We refer the prior with a zero mean $\bm \mu_0=\mathbf{0}$
and the covariance $\alpha^{-1}\mathbf{I}_m$ and $\alpha^{-1}\mathbf{L}_1^{-1}\mathbf{L}_1^{-t}$ (with $\mathbf{L}_1$
being the 1D first-order forward difference matrix) to as the $L^2$- and $H^1$-prior, respectively, and let
$\bar {\mathbf{C}}_{0}=\mathbf{I}_m$, and $\bar{\mathbf C}_1=\mathbf{L}_1^{-1}\mathbf{L}_1^{-t}$. Unless otherwise
specified, the parameter $\alpha$ is determined in a trial-and-error manner, and in Algorithm
\ref{alg:vb}, the Newton update $\delta\mathbf{\bar x}$ in \eqref{eqn:iter-barx} is computed by
the \texttt{MATLAB} built-in function \texttt{pcg} with a default tolerance, the prior covariance $\mathbf{C}^{-1}_0$
as the preconditioner and a maximum $10$ PCG iterations.
\subsection{Convergence behavior of inner and outer iterations of Algorithm \ref{alg:vb}}\label{ssec:iter}
First, we examine the convergence behavior of inner iterations for updating $\mathbf{\bar x}$ and $\mathbf{C}$,
i.e., \eqref{eqn:iter-barx} and \eqref{eqn:iter-C}, for the example \texttt{phillips} with the $L^2$-prior
$\mathbf{C}_0=1.0\times 10^{-1}\mathbf{\bar C}_0$ and $H^1$-prior $\mathbf{C}_0=2.5\times10^{-3}\mathbf{\bar C}_1$.
To study the convergence, we fix $\mathbf{C}$ at $\mathbf{C}^1=\mathbf{I}$ for $\mathbf{\bar x}$
and present the $\ell^2$-norm of the update $\delta\mathbf{\bar x}$ (initialized with $\mathbf{\bar x}^0=\mathbf{0}$),
and similarly fix $\mathbf{\bar x}$ at the converged iterate $\mathbf{\bar x}^1$ for $\mathbf{C}$ and present
the spectral norm of the change $\delta\mathbf{C}$. For both \eqref{eqn:iter-barx} and \eqref{eqn:iter-C},
these initial guesses are quite far away from the solutions, and thus the choice allows showing their global
convergence behavior. The convergence is fairly rapid and steady for both inner iterations, cf. Fig. \ref{fig:inner}.
For example, for a tolerance $10^{-5}$, the Newton method \eqref{eqn:iter-barx} converges after about 10 iterations,
and the fixed point method \eqref{eqn:iter-C} converges after $4$ iterations,
respectively. The global as well as local superlinear convergence of the Newton method \eqref{eqn:iter-barx} are
clearly observed, confirming the discussions in Section \ref{sec:algorithm}. The convergence behavior of
the inner iterations is similar for both priors. In practice, it is unnecessary to solve the inner iterates to a
very high accuracy, and it suffices to apply a few inner updates within each outer iteration. Since the iteration
\eqref{eqn:iter-C} often converges faster than \eqref{eqn:barx}, we take five Newton
updates and one fixed point update per outer iteration for the numerical experiments below.
\begin{figure}[htb!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.45]{inner_L2_v2} & \includegraphics[scale=0.45]{inner_H1_v2}\\
(a) $L^2$-prior & (b) $H^1$-prior
\end{tabular}
\caption{The convergence of the inner iterations of Algorithm \ref{alg:vb} for \texttt{phillips}.\label{fig:inner}}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.45]{outer_L2_v2} & \includegraphics[scale=0.45]{outer_H1_v2}\\
(a) $L^2$ prior & (b) $H^1$-prior
\end{tabular}
\caption{The convergence of outer iterations of Algorithm \ref{alg:vb} for \texttt{phillips}.}\label{fig:outer}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.45]{Bd_Out_L2_v2} & \includegraphics[scale=0.45]{Bd_Out_H1_v2}\\
(a) $L^2$-prior & (b) $H^1$-prior
\end{tabular}
\caption{The convergence of the lower bound $F(\mathbf{\bar x},\mathbf{C})$ for \texttt{phillips}. \label{fig:1dconv}}
\end{figure}
To examine the convergence of outer iterations, we show the errors of the mean $\mathbf{\bar x}$ and
covariance $\mathbf{C}$ and the lower bound $F(\mathbf{\bar x},\mathbf{C})$ in Figs. \ref{fig:outer}
and \ref{fig:1dconv}, respectively. Algorithm \ref{alg:vb} is terminated when the change of the lower
bound falls below $10^{-10}$. For the $L^2$-prior,
Algorithm \ref{alg:vb} converges after $5$ iterations and the last increments $\delta\mathbf{\bar x}$ and
$\delta\mathbf{C}$ are of order $10^{-8}$ and $10^{-9}$, respectively. This observation holds also
for the $H^1$-prior, cf. Figs. \ref{fig:outer}(b) and \ref{fig:1dconv}(b). Thus, both inner and outer
iterations converge rapidly and steadily, and Algorithm \ref{alg:vb} is very efficient.
\subsection{Low-rank approximation of $\mathbf{A}$ and sparsity of $\mathbf{C}$}\label{ssec:low-rank}
The discussions in Section \ref{ssec:complexity} show that the structure on $\mathbf{A}$ and $\mathbf{C}$ can
be leveraged to reduce the complexity of Algorithm \ref{alg:vb}. Now we evaluate their
influence on the accuracy of the VGA.
First, we examine the influence of low-rank approximation to $\mathbf{A}$. Since the kernel function of
the example \texttt{phillips} is smooth, the inverse problem is mildly ill-posed and the singular values
$\sigma_k$ decay algebraically, cf. Fig. \ref{fig:low-rank}(a). A low-rank matrix $\mathbf{A}_r$
of rank $r\approx10$ can already approximate $\mathbf{A}$ well. To study its influence
on the VGA, we denote by $(\mathbf{\bar x}_r,\mathbf{C}_r)$ and $(\mathbf{\bar x}^*,\mathbf{C}^*)$ the VGA
for $\mathbf{A}_r$ and $\mathbf{A}$, respectively. The errors $e_{\mathbf{\bar x}} =
\|\mathbf{\bar x}_r-\mathbf{\bar x}^*\|$ and $e_{\mathbf{C}}=\|\mathbf{C}_r-\mathbf{C}^*\|$ for
different ranks $r$ are shown in Figs. \ref{fig:low-rank} (b) and (c) for
the $L^2$- and $H^1$-prior, respectively. Too small a rank $r$ of
the approximation $\mathbf{A}_r$ can lead to pronounced errors in both the mean
$\mathbf{\bar x}$ and the covariance $\mathbf{C}$, whereas for a rank of $r= 10$, the errors already fall below
one percent. Interestingly, the decay of the error $e_{\mathbf{\bar x}}$ is much faster than that
of the singular values $\sigma_k$, and the error $e_\mathbf{C}$ decays slower than $e_{\mathbf{\bar x}}$.
The fast decay of the errors $e_{\mathbf{\bar x}}$ and $e_\mathbf{C}$ indicates the robustness of the VGA,
which justifies using low-rank approximations in Algorithm \ref{alg:vb}.
\begin{figure}[h]
\centering
\setlength{\tabcolsep}{0mm}
\begin{tabular}{ccc}
\includegraphics[scale=0.35]{svd_A_v2}&\includegraphics[scale=0.35]{svd_L2_v2}&\includegraphics[scale=0.35]{svd_H1_v2}\\
(a) singular values $\sigma_k$& (b) $L^2$-prior & (c) $H^1$-prior
\end{tabular}
\caption{(a) singular values and (b)--(c): the errors of the mean and covariance for \texttt{phillips}. \label{fig:low-rank}}
\end{figure}
Next we examine the influence of the sparsity assumption on the covariance $\mathbf{C}$, which is used to reduce
the complexity of Algorithm \ref{alg:vb}. Due to the coupling between $\mathbf{\bar x}$ and $\mathbf{C}$,
cf. \eqref{eqn:barx}--\eqref{eqn:C}, the sparsity assumption on $\mathbf{C}$ affects the accuracy of both
$\mathbf{\bar x}$ and $\mathbf{C}$. To illustrate this, we take different sparsity levels $s$ on $\mathbf{C}$
in Algorithm \ref{alg:vb}, i.e., at most $s$ nonzero entries around the
diagonal of $\mathbf{C}$. Surprisingly, a diagonal $\mathbf{C}$ already gives an acceptable approximation measured by
the errors $e_{\mathbf{\bar x}}=\|\mathbf{\bar x}_s-\mathbf{\bar x}^*\|_2$ and $e_\mathbf{C}=\|\mathbf{C}_s-\mathbf{C}^*\|_2$,
where $(\mathbf{\bar x}_s,\mathbf{C}_s)$ is the VGA with a sparsity level $s$. The
errors $e_{\mathbf{\bar x}}$ and $e_\mathbf{C}$ decrease with the sparsity level $s$, cf. Table \ref{tab:err-sparsity}.
Thus the sparsity assumption on $\mathbf{C}$ can reduce significantly the complexity while retaining the accuracy.
\begin{table}[htbp]
\caption{The errors $e_{\mathbf{\bar x}}$ and $e_\mathbf{C}$
v.s. the sparsity level $s$ of $\mathbf{C}$ for
\texttt{phillips}. \label{tab:err-sparsity}}
\label{tab:C_sparse}
\centering
\begin{tabular}{c|cc|cc}
\hline
prior & \multicolumn{2}{|c|}{$L^2$ prior} & \multicolumn{2}{|c}{ $H^1$ prior}\\
$s$ & $e_{\mathbf{\bar x}}$ & $e_{\mathbf{C}}$ & $e_{\mathbf{\bar x}}$ & $e_{\mathbf{C}}$\\
\midrule
$1$ & 6.38e-2 & 9.20e-2 & 1.92e-2 & 7.06e-2 \\
$3$ & 5.62e-2 & 8.10e-2 & 1.27e-2 & 5.42e-2\\
$5$ & 4.88e-2 & 7.02e-2 & 1.00e-2 & 4.29e-2\\
\hline
\end{tabular}
\end{table}
\subsection{Hierarchical parameter choice}\label{ssec:param}
Now we examine the convergence of Algorithm \ref{alg:hyper} for choosing the parameter $\alpha$ in
the prior $p(\mathbf{x})$. By Theorem \ref{thm:mono}, the sequence $\{\alpha^k\}$ generated by Algorithm
\ref{alg:hyper} is monotone. We illustrate this by two initial guesses, i.e., $\alpha^1=0.1$ and
$\alpha^1=10$. Both sequences of iterates generated by Algorithm \ref{alg:hyper} converge monotonically to the limit
$\alpha^* = 0.7778$, and the convergence of Algorithm \ref{alg:hyper} is fairly steady, cf. Fig.
\ref{fig:conv_a}(a). Further, Algorithm \ref{alg:hyper} indeed maximizes the joint lower bound
\eqref{eq:hyperbd} with its maximum attained at $\alpha^*=0.7778$, cf. Fig. \ref{fig:conv_a}(b). Though
not shown, the lower bound $F_\alpha(\mathbf{\bar x},\mathbf{c}|\alpha)$ is also increasing during the
iteration. Thus, the hierarchical approach is
indeed performing model selection by maximizing ELBO.
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.45]{a_cg_conv_v2} & \includegraphics[scale=0.45]{a_hbd_v2}\\
(a) convergence of $\alpha$ & (b) joint lower bound
\end{tabular}
\caption{(a)The convergence of Algorithm \ref{alg:hyper} initialized with $0.1$ and $10$, both
convergent to $\alpha^*=0.7778$ (b)
the joint lower bound versus $\alpha$, for \texttt{phillips}
with $L^2$-prior.\label{fig:conv_a}}
\end{figure}
To illustrate the quality of the automatically chosen parameter $\alpha$, we take
six realizations of the Poisson data $\mathbf{y}$ and compare the mean $\mathbf{\bar x}$ of the
VGA with the optimal regularized solutions, where $\alpha$
is tuned so that the error is smallest (and thus it is infeasible in practice).
The means $\mathbf{\bar x}$ by Algorithm \ref{alg:hyper} are comparable with the optimal ones,
cf. Fig. \ref{fig:hypertest}, and thus the hierarchical approach can yield reasonable approximations.
The parameter $\alpha$ by the hierarchical approach is slightly smaller than the optimal one, cf. Table
\ref{tab:alpha}, and hence the corresponding
reconstruction tends to be slightly more oscillatory than the optimal one. The
value of the parameter $\alpha$ by the hierarchical approach is
relatively independent of the realization, whose precise mechanism is to be ascertained.
\begin{figure}[hbt!]
\centering
\setlength{\tabcolsep}{0mm}
\begin{tabular}{ccc}
\includegraphics[scale=0.35]{h01_v2} & \includegraphics[scale=0.35]{h02_v2} & \includegraphics[scale=0.35]{h03_v2}\\
\includegraphics[scale=0.35]{h04_v2} & \includegraphics[scale=0.35]{h05_v2} & \includegraphics[scale=0.35]{h06_v2}\\
\end{tabular}
\caption{The mean $\mathbf{\bar x}$ of the Gaussian approximation by Algorithm \ref{alg:hyper} (Alg2) and the ``optimal" solution (opt) for
6 realizations of Poisson data for \texttt{phillips} with the $L^2$-prior.\label{fig:hypertest}}
\end{figure}
\begin{table}[hbt!]
\centering
\caption{The values of the hyperparameter $\alpha$ for the results in Fig. \ref{fig:hypertest}.\label{tab:alpha}}
\begin{tabular}{c|cccccc}
\hline
case & 1 & 2 & 3 & 4 & 5 & 6\\
\hline
opt & 2.64 & 3.35 & 2.59 & 1.35 & 9.31 & 4.04\\
Alg 2 & 0.78 & 0.76 & 0.76 & 0.77 & 0.73 & 0.74\\
\hline
\end{tabular}
\end{table}
\subsection{VGA versus MCMC}\label{ssec:mcmc}
Despite the widespread use of variational type techniques in practice, the accuracy of the approximations
is rarely theoretically studied. This has long been a challenging issue for approximate Bayesian inference,
including the VGA. In this part, we conduct an experiment to numerically validate the VGA against the
results by Markov chain Monte Carlo (MCMC). To this end, we employ the standard Metropolis-Hastings algorithm,
with the Gaussian approximation from the VGA as the proposal distribution (i.e., independence sampler). In other
words, we correct the samples drawn from VGA by a Metropolis-Hastings step. The length of the MCMC chain is $2\times 10^5$, and
the last $1\times 10^5$ samples are used for computing the summarizing statistics. The acceptance rate
in the Metropolis-Hastings algorithm is $96.06\%$. This might be attributed to the fact that the VGA approximates
the posterior distribution fairly accurately, and thus nearly all the proposals are accepted. The numerical results are
presented in Fig. \ref{fig:HPD}, where the mean and the $90\%$ highest posterior density (HPD) credible set are
shown. It is observed that the mean and HPD regions by MCMC and VGA are very close to each other, cf.
Figs. \ref{fig:HPD} and \ref{fig:mean_comp}, thereby validating the accuracy of the VGA. The $\ell^2$ error
between the mean by MCMC and GVA is $9.80\times 10^{-3}$, and the error between corresponding
covariance in spectral norm is $6.40\times 10^{-3}$. Just as expected, graphically the means and covariances
are indistinguishable, cf. Fig. \ref{fig:mean_comp}.
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.35]{HPD_Post_revised} & \includegraphics[scale=0.35]{HPD_VGA_revised}\\
(a) MCMC & (b) VGA
\end{tabular}
\caption{The mean and $90\%$ HPD by (a) MCMC and (b) VGA for \texttt{phillips} with $\mathbf{C}_0=1.00\times 10^{-1}\mathbf{\bar C}_0$.\label{fig:HPD}}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{ccc}
\includegraphics[width=0.33\textwidth]{mean_comp_revised} & \includegraphics[width=0.33\textwidth]{cov_post_revised} & \includegraphics[width=0.33\textwidth]{cov_vga_revised}\\
(a) mean & (b) MCMC & (c) VGA
\end{tabular}
\caption{(a) The mean by MCMC and VGA versus the exact solution, and the covariance by (b) MCMC and (c) VGA
for \texttt{phillips} with $\mathbf{C}_0=1.00\times 10^{-1}\mathbf{\bar C}_0$.\label{fig:mean_comp}}
\end{figure}
\subsection{Numerical reconstructions}\label{ssec:recon}
Last, we present VGAs for one- and two-dimensional examples. The numerical results for
the following four 1D examples, i.e., \texttt{phillips}, \texttt{foxgood}, \texttt{gravity} and \texttt{heat}, for both
$L^2$- and $H^1$-priors, are presented in Figs. \ref{fig:ph_L2}-\ref{fig:he_L2}. For the example \texttt{phillips}
with either prior, the mean $\mathbf{\bar{x}}$ by Algorithm \ref{alg:vb} agrees very
well with the true solution $\mathbf{x}^\dagger$. However, near the boundary, the mean $\mathbf{\bar x}$ is less accurate.
This might be attributed to the fact that in these regions, the Poisson count is relatively small, and it may be
insufficient for an accurate recovery. For the $L^2$-prior, the optimal $\mathbf{C}$ is diagonal
dominant, and decays rapidly away from the diagonal, cf. Fig. \ref{fig:ph_L2}(b). For the $H^1$-prior,
$\mathbf{C}$ remains largely diagonally dominant, but the off-diagonal entries decay a bit slower. Thus, it is
valid to assume that $\mathbf{C}$ is dominated by local interactions as in Section \ref{ssec:complexity}.
These observations remain largely valid for the other 1D examples, despite that they are much more ill-posed.
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.25]{ph_L2_x_v2} \includegraphics[scale=0.25]{ph_L2_C_v2}&\includegraphics[scale=0.25]{ph_H1_x_v2} \includegraphics[scale=0.25]{ph_H1_C_v2}\\
(a) $\mathbf{C}_0=1.00\times 10^{-1}\mathbf{\bar C}_0$ & (b) $\mathbf{C}_0=2.5\times 10^{-3}\mathbf{\bar C}_1$
\end{tabular}
\caption{The Gaussian approximation for \texttt{phillips}.\label{fig:ph_L2}}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.25]{fg_L2_x_v2}\includegraphics[scale=0.25]{fg_L2_C_v2} & \includegraphics[scale=0.25]{fg_H1_x_v2}\includegraphics[scale=0.25]{fg_H1_C_v2}\\
(a) $\mathbf{C}_0=1.12\times 10^{1}\mathbf{\bar C}_0$ & (b) $\mathbf{C}_0=9.8\times 10^{-3}\mathbf{\bar C}_1$
\end{tabular}
\caption{The Gaussian approximation for \texttt{foxgood}.\label{fig:fg_L2}}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.25]{gr_L2_x_v2}\includegraphics[scale=0.25]{gr_L2_C_v2}&\includegraphics[scale=0.25]{gr_H1_x_v2}\includegraphics[scale=0.25]{gr_H1_C_v2} \\
(a) $\mathbf{C}_0=1\times 10^{-1}\mathbf{\bar C}_0$ & (b) $\mathbf{C}_0=1.5\times 10^{-3}\mathbf{\bar C}_1$
\end{tabular}
\caption{The Gaussian approximation for \texttt{gravity}.\label{fig:gr_L2}}
\end{figure}
\begin{figure}[hbt!]
\centering
\begin{tabular}{cc}
\includegraphics[scale=0.25]{he_L2_x_v2}\includegraphics[scale=0.25]{he_L2_C_v2}&\includegraphics[scale=0.25]{he_H1_x_v2}\includegraphics[scale=0.25]{he_H1_C_v2}\\
(a) $\mathbf{C}_0=3.2\times 10^{-1}\mathbf{\bar C}_0$ & (b) $\mathbf{C}_0=1\times 10^{0}\mathbf{\bar C}_1$
\end{tabular}
\caption{The Gaussian approximation for \texttt{heat}.\label{fig:he_L2}}
\end{figure}
Last, we test Algorithm \ref{alg:vb} on a 2D image of size $128\times 128$. In this example, the matrix $\mathbf{A}\in \mathbb{R}^{16384\times 16384}$
is a (discrete) Gaussian blurring kernel with a blurring width $99$, variance $1.5$ and a circular boundary condition.
Since the blurring width is large, the matrix $\mathbf{A}$ is indeed low-rank, and we employ a rSVD approximation
of rank $2000$, where the rank is determined by inspecting the singular value spectrum. The true solution $\mathbf{x}^\dag$
consists of two Gaussian blobs, cf. Fig. \ref{fig:2d}(a), and thus we employ a smooth prior with $\mathbf{C}_0 = 6.00\times
10^{-2}\mathbf{L}^{-1} \mathbf{L}^{-t}$, where $\mathbf{L}=\mathbf{I}\otimes\mathbf{L}_1+\mathbf{L}_1\otimes\mathbf{I}$ is
the 2D first-order finite difference matrix. Since the problem size is very large, we restrict $\mathbf{C}$
to be a sparse matrix such that every pixel interacts only with at most four neighboring pixels. This allows
reducing the computational cost greatly. The mean
$\mathbf{\bar x}$ is nearly identical with the true solution $\mathbf{x}^\dag$, and the error is very small, cf. Fig. \ref{fig:2d}.
The structural similarity index between the mean $\mathbf{\bar x}$ and the exact solution $\mathbf{x}^\dag$ is 0.812.
We also compare the VGA solution with the MAP estimator. The $\ell_2$ error of the mean of the VGA is $9.7205$,
which is slightly smaller than that of the MAP estimator ($9.7355$). To indicate the
uncertainty around the mean $\mathbf{\bar x}$, we show in Fig. \ref{fig:2d}(f) the diagonal entries of
$\mathbf{C}$ (i.e., the variance at each pixel). The variances are relatively large at pixels where the mean $\mathbf{\bar x}$ is less accurate.
In summary, the VGA can provide a reliable point estimator together with
useful covariance estimates.
\begin{figure}[hbt!]
\centering
\begin{tabular}{ccc}
\includegraphics[width=0.33\textwidth]{2d_cg_exact} & \includegraphics[width=0.33\textwidth]{2d_cg_poisson_revised} & \includegraphics[width=0.33\textwidth]{2d_cg_map_revised} \\
(a) true solution $\mathbf{x}^\dag$ & (b) Poisson sample $\mathbf{y}$ & (c) MAP $\mathbf{x}_{\text{MAP}}$ \\
\includegraphics[width=0.33\textwidth]{2d_cg_recon}& \includegraphics[width=0.33\textwidth]{2d_cg_error} & \includegraphics[width=0.33\textwidth]{2d_C_diag}\\
(d) mean $\mathbf{\bar x}$ & (e) error $\mathbf{x}^\dag-\mathbf{\bar x}$ & (f) variance $\mathrm{diag}(\mathbf{C})$
\end{tabular}
\caption{The Gaussian approximation for image deblurring.}\label{fig:2d}
\end{figure}
\section{Conclusions}
In this work, we have presented a study of the variational Gaussian approximation to the Poisson data (under the
log linear link function) with respect to the Kullback-Leibler divergence. We derived explicit expressions for
the lower bound functional and its gradient, and proved its strict concavity and existence and uniqueness of an
optimal Gaussian approximation. Then we developed an efficient algorithm for maximizing the functional, discussed
its convergence properties, and described practical strategies for reducing the complexity per iteration. Further,
we analyzed hierarchical modeling for automatically determining the hyperparameter using the variational Gaussian
approximation, and proposed a monotonically convergent algorithm for the joint estimation. These discussions were
supported by extensive numerical experiments.
There are several avenues for further study. First, one of fundamental issues is the quality
of the Gaussian approximation relative to the true posterior distribution. In general this
issue has been long standing, and it also remains to be analyzed for the Poisson model. Second,
the variational Gaussian approximation can be viewed as a nonstandard regularization
scheme, by also penalizing the covariance. This naturally motivates the study on its regularizing
property from the perspective of classical regularization theory, e.g., consistency and
convergence rates. Third, the approach generally gives a very
reasonable approximation. This suggests itself as a preconditioner for sampling techniques, e.g., variational
approximation as the proposal distribution (i.e., independence sampler) in the standard
Metropolis-Hastings type algorithm or as the base distribution for importance sampler. It is
expected to significantly speed up the convergence of these sampling procedures, which is
confirmed by the preliminary experiments. We plan to study these aspects in future works.
\section*{Acknowledgements}
The work of B. Jin is supported by UK EPSRC grant EP/M025160/1, and that
of C. Zhang by a departmental studentship.
|
1,314,259,995,015 | arxiv | \section{Introduction}
People express their thoughts more conveniently on social media than during in-person (often analytical) sessions with experts. As per the National Institute of Mental Health report of 2020 \footnote{https://www.nami.org/mhstats}, 52.9 million adults in the USA suffer from mental illness. "The Health at a Glance Europe 2020" report\footnote{https://health.ec.europa.eu/system/files/2020-12/2020\_healthatglance\_rep\_en\_0.pdf} noted that the COVID-19 pandemic and the subsequent economic crisis caused a growing burden on the mental well-being of the citizens, with evidence of higher rates of stress, anxiety and depression. Previous studies support social media's powerful role in measuring the public's social well-being~\cite{robinson2019measuring}. To this end, we obtain Reddit social media posts demonstrating mental health issues for mental health analysis.
In this research work, we narrow down the problem of \textit{mental health analysis} to \textit{the identification of reasons behind users' intent in their social media posts}. The sequence to sequence (Seq2Seq) models are applied to solve the problem of causal categorization over CAMS dataset\footnote{https://github.com/drmuskangarg/CAMS}. The ground-truth of CAMS dataset contains two-fold annotations (i) \textit{causal category} and (ii) \textit{interpretations}. The textual segments of \textit{interpretation} support decision making for identifying causal categories. However, there exists a major challenge of responsibility and explainability for multi-class causal analysis while applying fine-tuned Seq2Seq models. In this context, we find explanations for inconsistency among resulting accuracy of different classes/ categories. Another key contribution is to find distance among \textit{inferences} and \textit{explanations} to obtain semantic similarity over distributional word representation: (i) \textit{cosine similarity} and (ii) \textit{word mover distance}. \\
\textbf{Definition 1: Inferences -}
The inferences are set of interpreted textual segments by trained human-annotators which appears as ground-truth information in CAMS dataset.\\
\textbf{Definition 2: Explanations -}
The results obtained as the set of top-keywords using explainable AI approaches for multi-class causal categorization of Reddit posts is termed as explanations.\\
We further discuss a potential instance to define this problem of explainable causal analysis in this section. Consider a given sample $A$ where a user $U$ post $A$: ``\textit{Five years now and still no job. I am done with my life.}" The user $U$ is upset about his financial problems/ career due to \textit{unemployment}. We consider this text as the user-generated social media data which demonstrates mental health issues. The intent of a user is \textit{`to end life'} and a key challenge is to find the reason behind this intent. This cause-and-effect relationship aids the causal categorization. The category for sample $A$ is identified as `\textit{Jobs and careers}' because the reason is associated with unemployment. There are five causal categories in annotated CAMS dataset, namely, \textit{(i) bias or abuse, (ii) jobs and careers, (iii) medication, (iv) relationships, and (v) alienation}.
In this research work, we use the CAMS dataset for explanations on multi-class causal categorization. We have made three major contributions in this work. First, we fine-tune deep learning models for multi-class causal categorization. Second, we obtain explainable text for causal categorization using \textit{Local Interpretable Model-Agnostic Explanations (LIME)} and \textit{IG}. Third, two semantic similarity measures: \textit{cosine similarity} and \textit{word mover distance} assist the validation of resulting explainable snippets with annotated inferences. Our experimental results explains the inconsistency among accuracy of different classes and validates the consistency of inferences made by model and human annotators, thereby defining the need of discourses and pragmatics for this problem of causal analysis.
\section{Background}
Our task is defined as a domain-specific problem to find \textit{reasons behind the intent of a user on social media}. After extensive literature surveys, we observe minimal work on this problem. A domain-specific dataset is available for public use to examine the inferences (reasons) and causal categories (multi-class classification) task for mental health data as CAMS dataset~\cite{muskan2022lrec}. The existing solution of a task of causal analysis is given as the use of machine learning and neural models for multi-class categorization of causal categories. The resulting values of f-measure vary for different classes and raise a new research question: \textit{To what extent causal categorization is responsible?} We choose to resolve this problem by finding and validating the explainable texts.
To find the explanations for causal categorization, we explore existing explainable AI methods for natural language processing~\cite{madsen2021post}. Some well-established surveys and tutorials categorize explainable approaches into \textit{local vs global}, \textit{post hoc vs self explaining} and \textit{model agnostic vs model specific}~\cite{danilevsky2020survey}. We choose to observe local explanations with given input features for post-hoc interpretability methods which require less information. To this end, we identify two explainability approaches which are suitable for this study: (i) LIME and (ii) IG.
LIME samples nearby observations and uses model estimates to fit the logistic regression~\cite{ribeiro2016should}. The parameters of logistic regression represent the importance measure and larger the parameters, greater effect will have on the output. The IG is an attempt to assign an attribution value to each input feature which measures the extent to which an input contributes to the final prediction~\cite{sundararajan2017axiomatic}. A recent study is carried out to set a benchmark over three representative NLP tasks (sentiment analysis, textual similarity and reading comprehension) for interpretability of both neural models and saliency methods~\cite{wang2022fine} thereby emphasizing the need of LIME and IG for downstream NLP tasks.
The explainable methods give output in the form of important words/ text segments which serve as the most important input features. As we have available human annotated inferences for causal categorization in the form of text, we use these inferences as ground truth information (text-reference) and resulting explanations (RE) (text-observation). Thus, we use two semantic similarity measures to evaluate the performance of explainable methods for causal categorization- Cosine similarity and Word Mover's distance (WMD). Cosine similarity~\cite{salton1988term} calculates similarity between two words, sentences, paragraph, piece of text etc and evolves from the squared Euclidean distance measure which is used to measure how similar the documents are irrespective of their size. Word Mover's Distance (WMD) outperforms Bag-of-words and TF-IDF in terms of document classification error rates~\cite{kusner2015word
\section{Framework}
In this section, we give a brief overview of the proposed framework for Figure~1 which represents the workflow for explainable causal analysis of mental health on social media data. We bifurcate our framework into three phases:
\begin{itemize}
\item Causal Categorization: The use of neural models for causal categorization of Reddit posts depicting mental illness.
\item Explanations: Finding explanations in the form of text-observations and obtaining top-keywords.
\item Evaluations: Validate the resulting text-observations by comparing them with the human annotations available in the CAMS dataset.
\end{itemize}
Consider a given set of self-reported short-text documents as $D$ where $D={d_1, d_2, ... , d_n}$. In \textit{Phase 1: causal categorization}, we segregate $D$ into training, validation and test set and give training set as an input and we fine-tune the multi-class classifier build model for our task. The model prediction are given as an input to \textit{Phase 2: Finding explanations} along with Reddit posts to obtain explanations. We further obtain these resulting explanations and human-annotated inferences present in the CAMS dataset for \textit{Phase 3: Evaluations} to test and validate the resulting explanations. Furthermore, we discuss three phases of our proposed framework in this section.
\begin{figure}
\begin{center}
\includegraphics[scale=0.9]{figures/iconip_figure.drawio.pdf}
\label{fig:fig1}
\caption{Overview of the proposed framework for explainable causal analysis of mental health on social media data. The framework is divided into three phases - Phase 1: Causal categorization, Phase 2: Finding explanations, Phase 3- Semantic similarity.}
\end{center}
\end{figure}
\subsection{Phase 1: Causal categorization}
To solve the problem of causal categorization, we employ four learning based multi-class classifiers~\cite{chen2018deep}. We exploit following deep learning models and fine-tuned them for prediction:
\begin{itemize}
\item \textbf{LSTM.}
Long-Short Term Memory (LSTM:) is a popular advanced Recurrent neural network architecture for modeling sequential data which allows the information to persist and is trained by taking the sequence of the embedding feature vector.
\item \textbf{BiLSTM:} A Bidirectional LSTM trains two hidden layers on the input sequence. The additional layer reverses the direction of information flow which means that the input sequence flows backward in an additional LSTM layer.
\item \textbf{CNN:} The CNN model efficiently extracts higher level features of the text using convolutional layers and max-pooling layers.
\item \textbf{CNN-LSTM:} A Hybrid CNN-LSTM Model uses CNN layers for feature extraction on input text combined with LSTMs to support sequence prediction.
\end{itemize}
\subsection{Phase 2: Finding explanations}
We obtain local explanations by using following two post-hoc interpretability models:
\begin{itemize}
\item \textbf{LIME:} It is a popular model-agnostic explainable method~\cite{ribeiro2016should} which provides local explanations for predictions of black-box models.LIME is also known as a post-hoc method. For a given model $\dot{F}$ and a given data sample $\alpha$, the method generates a fake dataset $ \alpha 1, \alpha 2, \alpha 3.. \alpha n$ and uses the black box model, $\dot{F}$ to obtain the target class or value for each sample. Subsequently, a white box model, $\bar{G}$ is trained with the generated data set along with the generated target labels. The aim is to train a white-box model for the original data sample and areas close to it even if the model does not perform as well globally. The closeness can be estimated using an appropriate similarity or distance metric. LIME then explains the original example using the white-box model and weights generated by it. The prediction accuracy of the white-box model, $\bar{G}$ gives an estimate of how close it mimics the black-box model, $\dot{F}$ and whether its explanations can be trusted. \\
\item \textbf{Integrated Gradient:} The second method employed for explainability in this work is Integrated Gradients~\cite{sundararajan2017axiomatic}, a gradient-based explanation method. It is a model specific method that uses gradients (for example, using a deep neural network) to assess the importance of a feature on the model’s output. It employs the knowledge associated with the internal model for calculating the gradients of the model’s layers. It computes an attribution score corresponding to each feature by considering the integral of the gradients calculated along a straight path from a baseline instance $u{}'$ to the input instance $u$.
\end{itemize}
\subsection{Phase 3: Evaluations with semantic similarity}
The \textit{human-annotated inferences} in CAMS dataset, which represents the causal explanation in the post, is validated by a senior clinical psychologist and it serves as a ground truth for our predicted explanations. There are two types of similarity measures for identifying document similarity (i) syntactic similarity and (ii) semantic similarity. We omit exact string matching algorithms due to varying number of words in each chunk and inconsistency among length of inferences and resulting keywords. The semantic similarity among texts validates the effectiveness of classifiers. We employ two most widely used semantic similarity measures:
\begin{itemize}
\item \textbf{Cosine similarity:} It is a widely used metric in information retrieval which models text as vector of terms~\cite{salton1988term}. The similarity of two input sentences (documents) can be derived by calculating cosine values of term vectors for the given input using the following equation. The similarity between two vectors of given input documents ($Doc_1 \, Doc_2$) can be defined as:
\begin{equation}
\label{eq1}
Sim(Doc_1, Doc_2) = \frac{Doc_1 \cdot Doc_2}{|| Doc_1|| \, ||Doc_2||} = \frac{\sum_{i=1}^{n}A_i \cdot B_i} {\sqrt{\sum_{i=1}^{n}A _i ^{2}} \sqrt{\sum_{i=1}^{n}B_i ^{2}}}
\end{equation}
where $A_i$ and $B_i$ represent the components of vectors $Doc_1$ and $Doc_2$, respectively.\\
\item \textbf{Word Mover's Distance(WMD):} It is a novel distance metric~\cite{kusner2015word} that is used to measure the dissimilarity between two text documents. The method is different from the conventional models that work on syntactic similarity rather than semantic similarity. The method employs word embedding like Glove and Word2Vec to learn semantically meaningful representations of sentences. It computes distance between two documents A and B as the minimum cumulative distance that the embedded words of document A need to travel to reach the embedded words of document B.
WMD is computed using the cost-matrix having $x_i$ and $x_j$ be embedding of word i and j. The cost matrix $CM\in \mathbb{R}^{m}\times \mathbb{R}^{m}$ is the distance of embeddings, such that $CM_{ij}= ||x_i - x_j ||^{2}$ as referred to in Eq.~\ref{eq2}. The distance between two documents $Doc_1$ and $Doc_2$ is the optimum value of the following
problem:
\begin{equation}
\label{eq2}
\displaystyle{\minimize_{P \in \mathbb{R}^{m \times m}} \, \sum_{ij}CM_{ij}\, P_{ij}}
\end{equation}
such that $P_{ij} \geq 0$
Intuitively, $P_{ij}$ represents the amount of word $i$ that is transported to word $j$. WMD is defined as the minimum total distance to convert one document to another document.
\end{itemize}
\section{Experiments and Evaluation}
This section covers the dataset description, experimental setup, results and performance evaluation of the proposed study.
\subsection{CAMS Dataset}
CAMS dataset consists of 5051 instances (1896 from SDCNL dataset and (ii) 3155 Reddit posts which are available
with subreddit r/depression using Python Reddit API
Wrapper (PRAW)\footnote{https://praw.readthedocs.io/en/stable/}) to categorize the \textit{direct causes} of mental disorders through mentions by users in their posts. Annotation is carried out manually by annotators who are proficient in the language. They work independently for each post and follow the given guidelines. Each annotator takes one hour to annotate about $15-25$ Reddit posts. The annotated files are verified by a clinical psychologist and a rehabilitation counselor. Furthermore, the validation of three annotated files is carried out by Fliess' Kappa inter-observer agreement study. The trained annotators have $61.28\%$ agreement for annotations of CAMS dataset. Despite the increased subjectivity of the task, the trained annotators \textit{substantially agree} with their judgements.
\subsection{Experimental Setup}
Considering a CAMS dataset, we divide it into training, validation and testing set consiting of $1699$, $117$ and $370$ instances, respectively. After preprocessing of the given documents $D$ (Reddit posts), we employ four deep learning methods to predict the causal category, namely, LSTM, BiLSTM, CNN and CNN-LSTM. At the initial layer of the neural network, we use GloVe, a distributional word embedding with dimension vectors of $100$. The GloVe embedding extracts semantics by using information available in neighbouring spaces.
For experimental study, we consider a batch size of $128$, trained on $20$ epochs, with $265$ maximum length of tokens. We use Adam optimizer for all models with one or more dropout layers and optimal learning rate. We fine-tune CNN-LSTM model with a learning rate of $0.0005$ and set a learning rate of $1.46*10^{-3}$ for all other classifiers.
\subsection{Experimental Results}
We perform experiments over the given dataset and obtain results as shown in Figure 2. To illustrate the effectiveness of our models, we give explanations for self-reported text of each causal-category. The given input is a \textit{self-reported text} of the CAMS dataset. The human-annotations are two-fold: (i) human-annotated interpretations (inferences) and (ii) causal category. We further perform explainable causal categorization to compare and contrast the \textit{inferences} with resulting top-keywords (\textit{explanations}). We observe minimal connection among words for Cause 0: No reasons followed by Cause 3: medications. However, the other causal categories seem to have high similarity among inferences and explanations.
\begin{figure}[t]
\begin{center}
\includegraphics[scale=0.99]{figures/iconip_fig0.pdf}
\label{fig2}
\caption{Experimental results for explainable causal categorization for six different categories.}
\end{center}
\end{figure}
\subsubsection{Error Analysis}
Other than the examples given in Figure 2, the medical terms mentioned in inferences and explanations may vary. For instance, prescriptions like \textit{propranalol}, name of diseases, heart problems, specific type of cancer and other antidepressants. This variation induces mismatch in semantic similarity among inferences and explanations for class 3.
\subsection{Performance Evaluation}
We use performance evaluation measures of the confusion matrix to evaluate the results of multi-class categorization. We analyze the results for each causal category and find overall accuracy of the model. Furthermore, we use two evaluation metrics of finding semantic similarity to evaluate explanations obtained by LIME and IG.
\subsubsection{Causal categorization:}
We categorize the text into one of the six categories as mentioned in experimental results section and present the resulting values for multi-class classifiers in Table~\ref{tab1}. We observe the inconsistency in results for among different classes but consistency in variation among classes for different classifiers. To this end, we observe lowest F1 scores for causal category 1: \textit{Bias or Abuse}. The demonstration indicates errors among predictions for \textit{Alienation}/ \textit{Relationship} as they overlap with \textit{Bias or Abuse}. The complex interactions illustrated the perceivable overlap between
\textit{Bias or Abuse}
and
\textit{Relationship} in the following example:
\begin{quote}
\centering
\textit{My friends are ignoring me and I am feeling bad about it. I have lost all my friends and don't want to live anymore.}
\end{quote}
The given example is associated with \textit{biasing} and \textit{friendship},
in a case where someone feels ostracized by their friends.
The emphasis on \textit{friends} tips the balance in favor of the class \textit{Relationship}. However, the major challenge is to train the model in such a way that it understands the inferences and then chooses the most emphasized \textit{causal category} using optimization techniques.
We view this challenge as an open research direction.
\begin{table}[tbp]
\begin{center}
\caption{Performance evaluation of multi-class classifiers for causal categorization of mental illness on social media data where F1:C0, F1: C1, F1:C2, F1:C3, F1:C4 and F1:C5 defines F1-score for 6 categories: cause 0: `No reason', cause 1: `Bias or abuse', cause 2: `jobs and careers', cause 3: `medication', cause 4: `relationships', and cause 5: `alienation', respectvely.}
\label{tab1}
\centering
\begin{tabular}{c|cccccc|c}
\hline
\textbf{Classifier} &\textbf{F1:C0} & \textbf{F1:C1} & \textbf{F1:C2} &\textbf{F1:C3} &\textbf{F1:C4} &\textbf{F1:C5} &\textbf{Accuracy}\\
\hline
LSTM & 0.55 & 0.30 & 0.36 & 0.45 & 0.55 & 0.25 & 0.4514 \\
BiLSTM & 0.59 & 0.25 & 0.53 & 0.44 & 0.58 & 0.43 & 0.5054 \\
CNN & 0.57 & 0.26 & 0.53 & 0.54 & 0.58 & 0.35 & 0.4919 \\
CNN-LSTM & 0.57 & 0.17 & 0.38 & 0.46 & 0.48 & 0.52 & 0.4784\\
\hline
\end{tabular}
\end{center}
\end{table}
There are two possible mitigation techniques to solve this problem: (i) Inconsistency among causal explanations/ inappropriate human-annotated inferences in the dataset, (ii) in-depth analysis of arguments and instances in self-reported text using discourse analysis. In this research work, we hypothesise that if there exists the inconsistency among F1 scores of different classes, there exists an inconsistency among corresponding causal explanations as well. We find causal explanations and validate the results with human-annotated inferences. To this end, we choose to handle the first mitigation approach, thereby, enlisting new frontiers.
\subsubsection{Explainability:}
In this section, we present evaluation of resulting top-keywords using LIME and IG methods. We use\textit{ word mover distance} and \textit{cosine similarity} over distributional word representations of both inferences and resulting keywords. As observed in Table~\ref{tab2}, the explanations of \textit{Class 0: 'No reason' }have maximum distance from human-annotated inferences for all methods. The reason is well-justified with the fact that Reddit posts having no reason behind intent of a user may or may not choose random words from the entire text. These random words does not describe any reason and thus, are the most far away from human-generated inferences. Low values for all other classes signifies the presence of patterns among explanations for other classes. We find \textit{class 2: jobs and careers}, and \textit{class 4: relationships} as the semantically most similar explanations achieved by deep learning methods.
\begin{table}[htbp]
\begin{center}
\caption{Values obtained for semantic similarity among resulting top-keywords and human-annotated inferences using \textit{Word Mover Distance}: More distance indicates less similarity among two different texts. }\label{tab2}
\centering
\begin{tabular}{p{7pc}|p{3pc}p{3pc}p{3pc}p{3pc}p{3pc}p{3pc}}
\hline
\textbf{Method used} &\textbf{Class0} & \textbf{Class1} & \textbf{Class2} &\textbf{Class3} &\textbf{Class4} &\textbf{Class5} \\
\hline
LSTM+LIME & 1.029 & 0.854 & 0.857 & 0.896 & \textbf{0.838} & 0.889 \\
LSTM+IG & 1.097 & 0.890 & 0.870 & 0.926 & \textbf{0.867} & 0.906 \\
BiLSTM+LIME & 1.029 & 0.880 & 0.865 & 0.886 & \textbf{0.852} & 0.876 \\
BiLSTM+IG & 1.117 & 0.900 & 0.898 & 0.919 & \textbf{0.870} & 0.908 \\
CNN+LIME & 1.042 & 0.820 & 0.831 & \textbf{0.817} & 0.823 & 0.843 \\
CNN+IG & 1.123 & 0.907 & 0.882 & 0.912 & \textbf{0.880} & 0.913 \\
CNN-LSTM+LIME & 1.018 & 0.843 & \textbf{0.831} & 0.848 & 0.851 & 0.863 \\
CNN-LSTM +IG & 1.117 & 0.913 & \textbf{0.869} & 0.918 & 0.874 & 0.890 \\
\hline
\end{tabular}
\end{center}
\end{table}
We further analyse the results for cosine similarity as shown in Table~\ref{tab3}. We give input as a string, tokenize the text, use GloVe word embeddings to obtain word vectors, and find the mean of word vectors (obtained for each token). Experimental results demonstrate \textit{class 2: jobs and careers}, and \textit{class 4: relationships} as the most similar explanations to the human-annotated inferences. \textit{Class 3: Medication }, being associated with medical terms are expected to be semantically least similar as we would need domain-specific distributional word representation for evaluation in this category. Thus, class 3 and class 0 are illustrating low scores as compared to other classes.
\begin{table}[htbp]
\begin{center}
\caption{Values obtained for semantic similarity among resulting top-keywords and human-annotated inferences using \textit{Cosine Similarity}: The distance lies between 0 and 1}\label{tab3}
\centering
\begin{tabular}{p{7pc}|p{3pc}p{3pc}p{3pc}p{3pc}p{3pc}p{3pc}}
\hline
\textbf{Method used} &\textbf{Class0} & \textbf{Class1} & \textbf{Class2} &\textbf{Class3} &\textbf{Class4} &\textbf{Class5} \\
\hline
LSTM+LIME & 0.787 & 0.825 & \textbf{0.889} & 0.751 & 0.881 & 0.854 \\
LSTM+IG & 0.723 & 0.779 & \textbf{0.870} & 0.701 & 0.869 & 0.813 \\
BiLSTM+LIME & 0.784 & 0.821 & \textbf{0.881} & 0.751 & 0.867 & 0.857 \\
BiLSTM+IG & 0.716 & 0.773 & \textbf{0.866} & 0.709 & 0.865 & 0.814 \\
CNN+LIME & 0.776 & 0.835 & \textbf{0.898} & 0.822 & 0.894 & 0.861 \\
CNN+IG & 0.729 & 0.765 & \textbf{0.863} & 0.689 & \textbf{0.863} & 0.818 \\
CNN-LSTM+LIME & 0.781 & 0.831 & \textbf{0.878} & 0.811 & 0.868 & 0.852 \\
CNN-LSTM+IG &0.728 & 0.789 & 0.851 &0.690 &\textbf{0.870} &0.815 \\
\hline
\end{tabular}
\end{center}
\end{table}
\subsection{Ethical Considerations}
NLP researchers are responsible for transparency about computational research with sensitive data accessed during model design and deployment. We understand the significance of ethical issues while dealing with a delicate subject of mental health analysis. We use the publicly available dataset and do not plan to disclose any sensitive information about the stakeholders (social media users) thereby preserving the privacy of a user~\cite{conway2014ethical}.
We use publicly available pre-trained base models for our demonstration to avoid any ethical conflicts. We assure that we adhere to all ethical guidelines to solve this task. Development of fair AI technologies in mental healthcare supports unbiased clinical decision-making~\cite{uban2021explainability}. Our research work is fair and there is no intentional bias as we consider explainable causal categories for mental health on CAMS dataset.
\section{Conclusion and Future Scope}
We find the explanations for causal categorization of mental health in social media posts by using LIME and IG methods, followed by performance evaluation by using human-annotated inferences in CAMS dataset. We conclude our work with three key takeaways: (i) less variations among resulting values of all classes for causal explanations as compare to F1 scores in causal categorization validates the human-annotated interpretations for causal categorization; (ii) the results for \textit{Class 0: No reason} and \textit{Class 3: Medication} are least explainable due to randomization and the need of domain-specific analysis, respectively; (iii) the performance evaluation of explanations obtained using explainable NLP is possible with with semantic similarity methods if human-annotated interpretations are predefined.
One of the path-breaking work is performed for causal explanation on social media which is obtained in the form of text~\cite{son2018causal}. The authors mentioned the complexity of this problem and made an attempt to resolve this issue by using discourses. However, the experiments were performed over a limited amount of Facebook data (\textit{often referred as Causal Explanation Analysis (CEA) dataset}) to classify the texts containing causal explanations and thereby extracting causal explanations. Furthermore, the causal explanation detection takes place on CEA dataset by capturing the salient semantics of discourses contained in their keywords with a bottom graph-based word-level salient network~\cite{zuo2020towards}. In this context, we choose to propose domain-specific discourse relation embeddings~\cite{son2021discourse} as a potential future research direction of causal analysis.
\bibliographystyle{splncs04}
|
1,314,259,995,016 | arxiv | \section{Introduction}\label{sec1}
In some communication applications like military communications, it is required not only that the adversary should not decode the message,
but also that it should not detect the presence of communication. Such scenarios are called covert communications.
The fundamental limits of covert communications have been characterized mainly for point-to-point scenarios such as additive white Gaussian noise (AWGN) channel \cite{Bash:2013, Ligong:2016,Bloch:2016}, discrete memoryless channel \cite{Ligong:2016,Bloch:2016}, channels with uncertainly \cite{Che:2014,Liu:2018,Shahzad:2017,Lee:2017}, and channels with multiple antennas \cite{Abdelaziz:2017}. For the standard AWGN channel \cite{Bash:2013,Ligong:2016,Bloch:2016} where a warden utilizes $l$ channel uses to judge the presence of the communication, it was shown that the received power at the warden should be $\Theta(1/\sqrt{l})$ to satisfy the covertness, which in turn restricts the transmit power. Accordingly, the number of information bits that can be communicated covertly over AWGN channels with $l$ channel uses scales with $\sqrt{l}$ (called square root law).
Recently, the study on the fundamental limits of covert communications has been extended to various multi-user scenarios, including broadcast channels \cite{Vincent:2019}, multiple access channels \cite{Keerthi:2016}, interference channels \cite{Cho:2020}, and some multi-hop networks \cite{Wu:2017}, \cite{Sheikholeslami:2018}. As a more general setup, the throughput scaling of the covert communication over a wireless ad hoc network was studied in \cite{Cho:2019} where $n$ nodes of fixed locations want to communicate each other covertly against a set of non-colluding wardens. The authors \cite{Cho:2019} proposed multi-hop (MH) \cite{Gupta:2000} and hierarchical cooperation (HC) \cite{Ozgur:2007}-based schemes, which are modified by introducing a preservation region \cite{Jeon:2011} around each warden. By preventing the transmission of the nodes inside the preservation regions, the nodes outside the preservation regions can increase the transmission power while satisfying the covertness constraint.
In this paper, we study the effect of node mobility on the throughput scaling of the covert communication over the wireless adhoc network. For the case without a covertness constraint, it is known that the mobility of nodes does not increase the capacity scaling when the network area is fixed, i.e., the capacity scaling is linear in $n$ for both cases with and without the mobility of nodes \cite{David:2002,Ozgur:2007}. Hence, it would be an interesting question whether the mobility can increase the throughput scaling in the presence of the covertness constraint. To that end, we assume that $n$ mobile nodes want to communicate each other in a fixed area while keeping the presence of the communication secret from each of $\Theta(n^s)$ wardens ($s>0$). The locations of wardens can be fixed or vary over time. A practical scenario of our model would be the military situation where several soldiers are invading the enemy's area while the soldiers are keeping the presence of the communication secret form each enemy.
Interestingly, we show that the mobility of nodes greatly improves the throughput scaling of the covert communication over the wirelss adhoc network, compared to the case of fixed node location \cite{Cho:2019}. In particular, for $s<1$, while the linear throughput scaling is not possible unless the path loss exponent $\alpha$ is very close to 2 for the case of fixed node location, it is possible with node mobility when the number of channel uses that each warden uses to judge is not too large compared to $n$. For the achievability, we propose a two-hop based scheme where a source node transmits its message to the nearest node, and the node who took over the message forwards it to the intended destination node once the two nodes become close. In our scheme, we also set preservation regions around each warden so the transmissions are not allowed inside those regions. The area of preservation regions is chosen to make the fraction of nodes inside the regions negligible. Note that in our scheme, the communication from a source to its destination consists of two-hop small-range transmissions by exploiting the node mobility. In contrast, the proposed schemes in \cite{Cho:2019} for the case of fixed node location involves long-range transmission (HC-based scheme) or multi-hops (MH-based scheme). We note that the long-range transmission of HC scheme does not degrade the performance in the absence of the covertness constraint as the received power is sufficiently large, but it does degrade under the covertness constraint as the network turns into power-limited.
We note that our scheme operates similarly as the two-hop based scheme \cite{David:2002}, which was proposed for a wireless ad hoc network without a covertness constraint, but there are some technicalities different from \cite{David:2002} due to the presence of the covertness constraint. First, the received power at each warden is precisely evaluated to determine the allowable transmit power at the senders. Next, as the transmit power is severely constrained due to the covertness constraint, the distance between a sender-receiver pair affects the order of the point-to-point communication rate between them. By taking this fact into account, we carefully analyze the communication rate based on the distribution of the distance between a sender-receiver pair.
The remaining of this paper is structured as follows. We introduce the network model and formulate the problem in Section~\ref{sec2}, and present the main results of this paper in Section~\ref{sec3}. To prove the main results, we first derive sufficient and necessary conditions for the covertness constraint in Section~\ref{sec4} and then prove the achievability and the converse parts in Sections~\ref{sec5} and \ref{sec6}, respectively. Finally, Section~\ref{sec7} concludes the paper with some further works.
\section{Problem Statement}\label{sec2}
\subsection{Network Model}\label{sec2A}
In a unit disk, $n$ nodes are uniformly and independently distributed in each discrete time unit $t$.\footnote{Here each time $t$ consists of several channel uses so that the communication rate of $\log (1+\sinr)$ between two communication parties is assumed be achievable for each time unit where $\sinr$ denotes the signal to noise plus interference ratio.} The random process governing the location of each node is assumed to be strict-sense stationary (SSS) and ergodic.
Each node is a source and a destination simultaneously and the $n$ source-destination pairs are randomly determined. In the same area, there are $n_w=\Theta(n^s)$ for $s>0$ {non-colluding wardens.} We consider both cases where the wardens have mobility or not. For the case of no mobility, the wardens are uniformly and independently distributed and their locations are fixed across the time. For the other case, the location of the wardens can change in a SSS and ergodic manner. Each of the $n$ sources wants to communicate with its destination while keeping the presence of the communication secret from each warden. The covertness constraint is described in detail in the next subsection. The network is illustrated in Fig.\ref{fig1}.
The received signal at node $j$ at time $t$ is given as
\begin{align}
Y_j[t] = \sum_{k=1}^{n}H_{jk}[t]X_k[t]+N_j[t],\label{eq:3}
\end{align}
where $X_k[t]$ is the transmitted signal by node $k$, $N_j[t]\sim\cC\cN(0,N_0)$ is the circular symmetric Gaussian noise with zero mean and variance $N_0$, and $H_{jk}[t]$ is the channel gain from node $k$ to node $j$ given by
\begin{align}
H_{jk}[t] = {\sqrt{G}\over{(d_{jk}[t])^{\alpha/2}}}\exp(j\theta_{jk}[t]). \label{eq:1}
\end{align}
Here, $d_{jk}[t]$ is the distance between nodes $k$ and $j$, $\alpha>2$ is the path loss exponent, $\theta_{jk}[t]$ is uniformly and independently distributed phase, and $G$ is given as
\begin{align}
G = \left( {\lambda\over{4\pi}}\right)^2G_sG_r\label{eq:2}
\end{align}
from Friis' formula,
where $G_s$ and $G_r$ are the antenna gains at the sender and the receiver, respectively, and $\lambda$ is the carrier wavelength. Each node has the same average power constraint of $P$. The channel state information is available only at the receivers and the delay toleration of data packets from source to destination is assumed to be sufficiently large. We suppose that all the sender-receiver pairs share a sufficiently long secret key.
The received signal at warden $w$ at time $t$ is
\begin{align}
Z_w[t] = \sum_{k=1}^{n}H'_{wk}[t]X_k[t]+N'_w[t],\label{eq:4}
\end{align}
where $N'_w[t]\sim\cC\cN(0,N_0)$ is the circular symmetric Gaussian noise with variance $N_0$ and $H'_{wk}$ is the channel gain from node $k$ to warden $w$ defined in a similar manner as \eqref{eq:1}.
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY1.png}
\caption{In a unit area, $n$ mobile nodes want to communicate each other in a unit disk while keeping the presence of the communication secret from each of $\Theta(n^s)$ wardens. }\label{fig1}
\end{figure}
\subsection{Covertness Constraint}\label{sec2B}
Each warden tests the received signal over $l$ channel uses to detect whether the nodes are communicating or not. The window of $l$ channel uses can be arbitrarily chosen over the whole communication.\footnote{The $l$ channel uses do not need to be consecutive in our analysis and the results continue to hold if the warden utilizes any arbitrary set of $l$ channel uses.} The communication is said to be covert if it is hard for the warden to determine whether the nodes are communicating (hypothesis $H_1$) or not (hypothesis $H_0$). The optimal hypothesis test of warden $w$ satisfies
\begin{align}
p(H_0|H_1)+p(H_1|H_0)&=1-V(Q_{Z_w^l} \| Q_{N'_{w}}^{\times l})\label{eq:5.2} \\
&\overset{(a)}\geq 1-\sqrt{D(Q_{Z_w^l} \| Q_{N'_{w}}^{\times l})}, \label{eq:5.3}
\end{align}
where $V(\cdot\|\cdot)$ is the total variational distance, $D(\cdot\|\cdot)$ is the Kullback-Leibler divergence, $Q_{Z_w^l}$ (resp. $Q_{N'_{w}}^{\times l}$) is the distribution of the received signal at warden $w$ over $l$ channel uses when the communication occurs (resp. does not occur). Here $N'_w\sim\cC\cN(0,N_0)$, and $Q_{N'_{w}}^{\times l}$ is the $l$-fold of $Q_{N'_w}$. Inequality $(a)$ is by the Pinsker's inequality, e.g.,~\cite{LehmannRomano:TSH}. Thus, if $D(Q_{Z_w^l} \| Q_{N'_{w}}^{\times l})$ is small, the optimal hypothesis test of warden $w$ is similar with a blind test, which satisfies $p(H_0|H_1)+p(H_1|H_0)=1$. Hence, we set the covertness constraint as
\begin{align}
D(Q_{Z_w^l} \| Q_{N'_{w}}^{\times l})\leq\delta \mbox{ for } w=1,2,...,n_w .\label{eq:5}
\end{align}
for some $\delta>0$.
\subsection{Long-Term Throughput}\label{sec2C}
We note that the throughput from a source to its destination varies over time since the nodes have mobility. Let's say node $j$ communicates to its destination node $k$ at time $t$ with the rate of $R_{jk}(n,s,t)$, while satisfying the covertness constraint \eqref{eq:5} for all the wardens. The long-term throughput $\lambda(n,s)$ is said to be feasible if
\begin{align}
\mathop{\mathrm{lim}}_{T\rightarrow\infty} {1\over T}{\sum_{t=1}^{T}R_{jk}(n,s,t)} \geq \lambda(n,s)\label{eq:6}
\end{align}
for all source-destination pairs $(j,k)$. The goal of this paper is to characterize the scaling of the maximally achievable aggregate throughput.
\section{Main results}\label{sec3}
In this section, we state our main results. First, achievable aggregate throughputs are presented in Theorems \ref{thm1} and \ref{thm2}, which are proved in Section \ref{sec5}. For the converse, a trivial upper bound on the aggregate throughput is given in Theorem \ref{thm_ub} and some nontrivial upper bounds under an assumption are given in Theorems \ref{thm3} and \ref{thm4}, which are proved in Section \ref{sec6}.
\begin{theorem}\label{thm1}
For $0< s<1$, the following aggregate throughput is achievable with covertness constraint $\delta$ and testing channel length $l$ for any $\epsilon>0$:
\begin{align}
T(n,s)=\Theta\left(n^{1-\epsilon}\cdot \min \left(\left({n^{(1/2-s/2)(\alpha-2)}}\over \sqrt{l}\right)^{2/\alpha}, 1 \right) \right),\label{eq:7}
\end{align}
with high probability (probability going to $1$ as $n$ goes to infinity).
\end{theorem}
\begin{theorem}\label{thm2}
For $s\geq1$, the following aggregate throughput is achievable with covertness constraint $\delta$ and testing channel length $l$ for any $\epsilon>0$:
\begin{align}
T(n,s) = \Theta\left(n^{1-\epsilon}\cdot \left({n^{{{\alpha}(1/2-s/2)}}\over\sqrt{l}}\right)^{2/\alpha}\right),\label{eq:9}
\end{align}
with high probability.
\end{theorem}
It is known that the aggregate throughput of $\Theta(n^{1-\epsilon})$ for any $\epsilon>0$ is achievable for a wireless adhoc network without any covertness constraint \cite{David:2002}. In the presence of the covertness constraint, our achievable aggregate throughput becomes $\Theta(n^{1-\epsilon})$ when the number of nodes is smaller than the number of wardens and the testing channel length $l$ is sufficiently small compared to the number of nodes. This makes sense since the transmission power would be restricted more severely as more wardens observe more channel outputs.
Let us briefly describe our proposed scheme and provide a sketch of the proof. The details are in Section \ref{sec5}. Our scheme operates in two phases similarly as the scheme in \cite{David:2002}. In each time slot $t$, a certain fraction of $n$ nodes operate as senders and the others as potential receivers. Each sender communicates with the nearest receiver (sender-receiver pair). The senders and the receivers play the roles of sources and relays, respectively, in phase 1 (odd times), and the roles of relays and destinations, respectively, in phase 2 (even times). In phase 1, each node selected as a sender transmits its own source data packet to the receiver. In phase 2, each node selected as a sender selects and transmits the data packet intended for the receiver among the stored data packets. In this process, a sender does not transmit if it is inside a certain area around any warden, which we call a preservation region. The area of preservation regions is chosen to make the fraction of nodes inside preservation regions negligible.
Now, the throughput scalings in Theorems \ref{thm1} and \ref{thm2} are roughly derived as follows (a rigorous proof is in Section \ref{sec5}):
\begin{align}
T(n,s)& \approx{1\over2}\cdot{\mathop{\mathrm{E}}} \left[\sum_k \log (1+\sinr(r_k)) \right]\label{eq:9.01}\\
&= {1\over2}\cdot {\mathop{\mathrm{E}}} \left[\sum_{k: \sinr(r_k)=\Omega(1)} \log (1+\sinr(r_k)) \right. \cr
&~~~+ \left. \sum_{k: \sinr(r_k)=o(1)} \log (1+\sinr(r_k))\right] \\
&\overset{(a)}\gtrsim{1\over2}\cdot{\mathop{\mathrm{E}}} \left[ \sum_{k: \sinr(r_k)=\Omega(1)} \log (1+\sinr(r_k)) \right] \\
&\overset{(b)}\gtrsim n\cdot p(\sinr(r)=\Omega(1)) \Theta(1)\\
&\overset{(c)}\approx{n \cdot p(r \leq r_m) \cdot \Theta(1)}\label{eq:9.1}\\
&\overset{(d)}\approx {n \cdot \min(n r_m^2,1) \cdot \Theta(1),}\label{eq:9.2}
\end{align}
where the expectations are with respect to the random locations of nodes, $r_k$ is the distance between the $k$-th sender-receiver pair, $r$ is the distance between a randomly chosen sender-receiver pair, and $r_m$ is the distance between the sender-receiver pair when $\sinr(r_m)=\Theta(1)$. Here, we have the factor of $\frac{1}{2}$ since our scheme operates in two phases. We can show that $(a)$ is tight in the sense of scaling by noting that the probability that a sender-receiver pair has a distance of order $r$ is proportional to $r^2$ and $\sinr$ is proportional to $r^{-\alpha}$, and $(b)$ is tight up to the logarithmic order. Also, $(c)$ follows from the property that $\sinr(r)$ is decreasing function in $r$ and $(d)$ is because the probability that a sender-receiver pair has a smaller distance than $r_m$ is proportional to the product of $r_m^2$ and $n$ since the corresponding receiver to a sender is the nearest receiver to the sender among other receivers, and because the probability does not exceed 1. The throughput scailngs in Theorems \ref{thm1} and \ref{thm2} are obtained by deriving $r_m$ for each case of $0<s<1$ and $s\geq 1$, while Theorem \ref{thm2} has no minimum term because $n r_m^2$ cannot exceed $1$ in that case.
Next, the following is a trivial upper bound on the aggregate throughput scaling, which is the upper bound without the covertness constraint.
\begin{theorem}\label{thm_ub}
For $s\geq 0$, the aggregate throughput with covertness constraint $\delta$ and testing channel length $l$ is upper-bounded as follows for any $\epsilon>0$:
\begin{align}
T(n,s)=O\left(n^{1+\epsilon} \right),\label{eq:ub}
\end{align}
with high probability.
\end{theorem}
Hence, for $0<s<1$, if the testing channel length $l$ is sufficiently small so that the minimum in \eqref{eq:7} becomes one, the aggregate throughput scaling in Theorem \ref{thm1} is tight.
The difficulty in proving a non-trivial upper bound comes from the fact that the distances between the senders and the wardens, which are related to the upper bound on the transmit power from the covertness constraint, and the distances between the senders and the receivers, which affect the transmission rate, independently vary over time. The optimal transmit power control in such a scenario seems to be a challenging problem. As an alternative, for the upper bound, we assume that each node distant from every warden to a certain extent uses the same power at each channel use.
\begin{theorem}\label{thm3}
For $0<s<1$, the aggregate throughput with covertness constraint $\delta$ and testing channel length $l$ is upper-bounded as follows for any $\epsilon>0$ under the assumption that each node not contained in the regions of radius $\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ uses the same power at each channel use:
\begin{align}
T(n,s)=O\left(n^{1+\epsilon}\cdot \min \left(\left({n^{(1/2-s/2)(\alpha-2)}}\over \sqrt{l}\right)^{2/\alpha}, 1 \right) \right),\label{eq:10}
\end{align}
with high probability.
\end{theorem}
\begin{theorem}\label{thm4}
For $s\geq1$, the aggregate throughput with covertness constraint $\delta$ and testing channel length $l$ is upper-bounded as follows for any $\epsilon>0$ under the assumption that each node not contained in the regions of radius $\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ uses the same power at each channel use:
\begin{align}
T(n,s)=O\left(n^{1+\epsilon}\cdot \left({1\over\sqrt{l}}\right)^{2/\alpha}\right),\label{eq:12}
\end{align}
with high probability.
\end{theorem}
We note that the aggregate throughput scaling in Theorem \ref{thm1} for $0<s<1$ is tight under the assumption that each node not contained in the regions of radius $\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ uses the same power at each channel use. The radius of $\Theta(n^{-(\frac{s}{2}+\epsilon')})$ is set as the same with that of the preservation region introduced in our achievability scheme. Hence, for $0<s<1$, our proposed scheme is scaling-optimal if the nodes outside the preservation regions are not allowed to change the transmit power over time. However, there exists a gap between the lower and upper bounds in Theorems \ref{thm2} and \ref{thm4} for $s\geq 1$. Such a non-tightness comes from the gap between the pessimistic and optimistic derivations of the distance between a sender and the nearest warden.
\begin{remark}\label{remark_1}
Let us compare our results to the case without node mobility \cite{Cho:2019}, which proposed HC \cite{Ozgur:2007} and MH \cite{Gupta:2000} based schemes.
Interestingly, our throughput scaling with node mobility is strictly higher than that without mobility. The HC scheme contains long-range MIMO transmissions and hence the received SNR is much smaller compared to short-range transmissions for the same transmit power determined from the covertness constraint, which degrades the throughput scaling and results in a throughput gap that increases as $\alpha$ increases. On the other hand, the MH scheme consists of small-range transmissions, but it requires multiple hops for a source message to finally reach its destination, which degrades the throughput scaling and results in the throughput gap of $\Omega(n^{1/2})$. Our scheme performs better as the communication from a source to its destination consists of two-hop small-range transmissions, by exploiting the node mobility.
\end{remark}
\section{Sufficient and Necessary Conditions of Covertness}\label{sec4}
In this section, we derive sufficient and necessary conditions for the covertness constraint, which are used for the derivations of lower and upper bounds on the aggregate throughput scaling, respectively.
\subsection{Sufficient Condition of Covertness}\label{sec4A}
In the proposed scheme in Section \ref{sec5B}, each node uses i.i.d. complex Gaussian codebook. For such a code, the Kullback-Leibler divergence \eqref{eq:5} is upper-bounded as:
\begin{align}
D(Q_{Z_w^l} \| Q_{N'_{w}}^{\times l}) &\overset{(a)}=\sum_{u=1}^{l}D(Q_{Z_{w,u}} \| Q_{N'_{w}}) \\
&= \sum_{u=1}^{l}D(\cC\cN(0,\rho_{w,u}+N_0) \| \cC\cN(0,N_0))\label{eq:13.1} \\
&\leq l\cdot D(\cC\cN(0,\rho_{wm}+N_0) \| \cC\cN(0,N_0)) \label{eq:13}\\
&\overset{(b)}= l \cdot \left({\rho_{wm}\over N_0}-\log{{N_0+\rho_{wm}}\over N_0}\right)\label{eq:14} \\
&\overset{(c)}\leq l \cdot \left({\rho_{wm}\over N_0}-\left({\rho_{wm}\over N_0}-{\rho_{wm}^2\over{2 N_0^2}}\right)\right)\label{eq:15} \\
&= {{l\cdot \rho_{wm}^2}\over{2 N_0^2}},\label{eq:16}
\end{align}
where $Z_{w,u}$ is the received signal of warden $w$ at channel use $u$, $\rho_{w,u}$ is the power of $Z_{w,u}$, and $\rho_{wm}$ is defined by $\max(\rho_{w,1}, ...,\rho_{w,l} )$. Here, $(a)$ is due to the use of the i.i.d. complex Gaussian codebook, $(b)$ is proved in {\cite[Equation (80)]{Ligong:2016}}, and $(c)$ follows from $\log(1+a) \geq a -{a^2\over 2}$ for $a>0$.
From \eqref{eq:16}, the covertness constraint \eqref{eq:5} is satisfied if the following inequality holds
\begin{align}
\rho_{wm} &\leq \sqrt{2} N_0 \sqrt{\delta \over l} .\label{eq:18}
\end{align}
\subsection{Necessary Condition of Covertness}\label{sec4B}
The Kullback-Leibler divergence \eqref{eq:5} can be lower-bounded as:
\begin{align}
D&(Q_{Z_w^l} \|Q_{N'_{w}}^{\times l}) = -\mathop{\mathrm{h}} (Z_w^l)+{\mathop{\mathrm{E}}}_{{Z_w^l}}\left[\log{1 \over Q_{N'_w}^{\times l}(Z_w^l)}\right]\label{eq:19} \\
&= \sum_{u=1}^{l}\left( -\mathop{\mathrm{h}} (Z_{w,t}|Z_w^{u-1})+{\mathop{\mathrm{E}}}_{{Z_{w,u}}}\left[\log{1 \over Q_{N'_{w}}(Z_{w,u})}\right]\right)\label{eq:20}\\
&\geq \sum_{u=1}^{l}\left( -\mathop{\mathrm{h}} (Z_{w,u})+{\mathop{\mathrm{E}}}_{{Z_{w,u}}}\left[\log{1 \over Q_{N'_{w}}(Z_{w,u})}\right] \right)\label{eq:21} \\
&=\sum_{u=1}^{l} D(Q_{Z_{w,u}} \|Q_{N'_{w}}) \label{eq:22}\\
&\overset{(a)}\geq l \cdot D(Q_{\bar Z_{w}} \|Q_{N'_{w}}),\label{eq:23}
\end{align}
where $Z_w^{u-1}$ is the received signal of warden $w$ up to channel use $u-1$ and $Q_{\bar Z_{w}} = {1\over l}\sum_{u=1}^{l} Q_{Z_{w,u}}$ is the average distribution of received signal at warden $w$ over $l$ channel uses. The inequality $(a)$ is due to the convexity of Kullback-Leibler divergence.
By \eqref{eq:23}, if the covertness constraint \eqref{eq:5} is satisfied, then it implies
\begin{align}
D(Q_{\bar Z_{w}} \|Q_{N'_{w}}) \leq {\delta \over l}.\label{eq:24}
\end{align}
Furthermore, the marginalized covertness constraint \eqref{eq:24} can be lower-bounded as:
\begin{align}
D(Q_{\bar Z_{w}} &\|Q_{N'_{w}}) = -\mathop{\mathrm{h}}({\bar Z_w})+{\mathop{\mathrm{E}}}_{\bar Z_w}\left[\log{1 \over Q_{N'_{w}}(\bar Z_{w})}\right]\label{eq:25} \\
&= -\mathop{\mathrm{h}}({\bar Z_w}) + {\mathop{\mathrm{E}}}_{\bar Z_w}\left[\log{\left(\pi N_0 \exp\left({|\bar Z_w|^2 \over N_0}\right)\right)}\right] \label{eq:26}\\
&= -\mathop{\mathrm{h}}({\bar Z_w}) + \log{\pi N_0} + {\mathop{\mathrm{E}}}_{\bar Z_w}\left[{|\bar Z_w|^2 \over N_0}\right] \label{eq:27}\\
&\overset{(a)}= -\mathop{\mathrm{h}}({\bar Z_w}) + \log{\pi N_0} + {{\bar \rho_w + N_0 }\over N_0} \label{eq:28}\\
\begin{split}\label{eq:29}
&\overset{(b)}\geq -\log{(\pi e(\bar\rho_w+N_0))}+\log{\pi N_0}\\
&+{{\bar \rho_w + N_0 }\over N_0}
\end{split}
\\
&={{\bar \rho_w }\over N_0} -\log{{\bar\rho_w+N_0}\over N_0},\label{eq:30}
\end{align}
where $\bar\rho_w = {1\over l}\sum_{u=1}^{l} \rho_{w,u} $ is the average received power at warden $w$ over $l$ channel uses. Here, $(a)$ is because ${\mathop{\mathrm{E}}}_{\bar Z_w}[{|\bar Z_w|^2 }]$ is same with ${\bar \rho_w + N_0 }$ since $\bar Z_w$ has the average distribution of $Z_{w,1},...,Z_{w,l}$ and $(b)$ is since the differential entropy is maximized with Gaussian distribution when the second moment is fixed. Since ${\bar \rho_w }$ goes to zero as $l$ goes to infinity, by Taylor expansion, \eqref{eq:30} becomes
\begin{align}
D(Q_{\bar Z_{w}} \|Q_{N'_{w}}) \geq {{\bar \rho_w^2 }\over 2N_0^2} + o({\bar \rho_w^2 }).\label{eq:31}
\end{align}
Consequently, by \eqref{eq:24} and \eqref{eq:31}, if the covertness constraint \eqref{eq:5} is satisfied, then it implies
\begin{align}
{\bar \rho_w } \leq \sqrt{2}N_0\sqrt{\delta \over l}+o(l^{-1/2}).\label{eq:32}
\end{align}
\section{Achievability}\label{sec5}
In this section, we prove Theorems \ref{thm1} and \ref{thm2}. We first introduce preservation regions in Section \ref{sec5A} and explain our two-hop scheme in Section \ref{sec5B}. Then, the achievable aggregate throughput scalings are derived in Sections \ref{sec5C} and \ref{sec5D}. We note that our proof does not rely on whether the wardens have mobility or not.
\subsection{Preservation Region}\label{sec5A}
In Section \ref{sec4A}, we show that a sufficient condition for the covertness constraint is to make the received power at each warden less than some threshold. To increase the transmission power while keeping the received power at the wardens small, we introduce a preservation region of certain radius around each warden, in which the senders do not transmit. Let $r_p$ denote the radius of the preservation regions. As we increase $r_p$, the nodes outside the preservation regions can transmit with a higher power, but the area $\epsilon(n_w, r_p)=\Theta(n_wr_p^2)$ of all the preservation regions also increases. Hence, we set $r_p= \Theta(n^{-({s\over2}+\epsilon)})$ for an arbitrarily small $\epsilon>0$, which is the maximum radius while satisfying $\epsilon(n_w, r_p)\rightarrow 0$.
In the scenarios \cite{Cho:2019}, \cite{Jeon:2011} where the node locations are fixed, such an introduction of preservation regions results in outage, i.e., $\epsilon(n_w, r_p)$ fraction of nodes do not participate in the whole communication at all. However, for our scenario where the nodes have mobility, each node is in outage only for $\epsilon(n_w, r_p)$ fraction of time and hence there are no nodes in outage.
\subsection{Two-Hop Scheme}\label{sec5B}
In the following, we explain our two-hop scheme. The scheme is modified from the scheme in \cite{David:2002} by considering the covertness constraint and preservation regions.
\begin{itemize}
\item For each time $t$, divide $n$ nodes into $\theta n$ senders and $(1-\theta)n$ receivers, where $0<\theta<1$ is fixed for the whole communication time.
\item The sender-receiver pairs are determined in a way that each sender is paired with the nearest receiver for each time $t$. This way is called as sender-centric and described in Fig. \ref{fig2}.
\item Each sender not in a preservation region transmits to its receiver using an i.i.d. complex Gaussian codebook with zero mean and variance of $P_{\rm{tx}}$, which is determined in Section \ref{sec5C} to satisfy the covertness constraint. The senders in a preservation region do not transmit.
\item The overall communication is divided into two phases: phase 1 is activated in odd times and phase 2 is activated in even times. The data packets that the senders transmit to their receivers in each phase are as follows.
\begin{enumerate}
\item Phase 1: The senders and the receivers play the roles of sources and relays, respectively. As shown in Fig. \ref{fig3}, each sender transmits its own source data packets (that have not been transmitted before) to its receiver. If the receiver is the final destination, this corresponds to direct transmission. Otherwise, the receiver keeps this packet and relays to the final destination in phase 2.
\item Phase 2: The senders and the receivers play the roles of relays and destinations, respectively. As shown in Fig. \ref{fig4}, each sender selects and transmits the data packet destined for the receiver among the stored data packets. Direct transmission is also possible if the sender is the source node of the receiver node.
\end{enumerate}
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY2.png}
\caption{Each sender transmits data packets to the nearest receiver when the sender is not in a (shaded) preservation region.}\label{fig2}
\end{figure}
We note that this two-hop scheme exploits multi-user diversity by letting some nodes operate as relays. In the absence of the covertness constraint, it was shown in \cite{David:2002} that such a scheme can achieve the maximum throughput $\Theta(n)$ because the source can transmit data packets independently of the distance to the destination by transmitting to a relay close to the source. In \cite{David:2002}, it is shown that the aggregate throughput of the direct transmission scheme, where each source transmits data packets only when the destination is sufficiently close, cannot achieve $\Theta(n)$ because it dose not use the multi-user diversity in transmitting data packets.
Although the network operation of our scheme seems to be similar with that in \cite{David:2002}, there are some technicalities different from \cite{David:2002} due to the presence of the covertness constraint. First, the received power at each warden is precisely evaluated to determine the allowable transmit power at the senders in Section~\ref{sec5C}. As the transmit power is severely constrained due to the covertness constraint, the distance between a sender-receiver pair affects the order of the point-to-point communication rate between them. By taking this fact into account, we carefully analyze the communication rate based on the distribution of the distance between a sender-receiver pair in Section \ref{sec5D}.
Note that at the beginning of the communication, the senders might have no data packet destined for the receiver in phase~2. However, as the communication process proceeds and phase~1 takes place several times, every node gradually collects and stores data packets of all the other nodes. In the following proof, we assume such a steady state and hence assumes that each sender has a data packet destined for the receiver in phase~2.
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY3.png}
\caption{In phase 1, the senders and the receivers play the roles of sources and relays, respectively. Each sender, not in a (shaded) preservation region, transmits its own source data packets to its receiver. If the receiver is the final destination, this corresponds to direct transmission. }\label{fig3}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY4.png}
\caption{In phase 2, the senders and the receivers play the roles of relays and destinations, respectively. Each sender, not in a (shaded) preservation region, selects and transmits the data packet destined for the receiver among the stored data packets. Direct transmission is also possible if the sender is the source node of the receiver node. }\label{fig4}
\end{figure}
\begin{remark}\label{remark_3}
The sender-receiver pairs can be alternatively determined in a way that each receiver is paired with the nearest sender for each time $t$ (receiver-centric). It can be checked that Theorems \ref{thm1} and \ref{thm2} continue to hold for the scheme operating according to the receiver centric basis.
\end{remark}
\subsection{Allowable Transmission Power}\label{sec5C}
In this subsection, we derive an allowable transmission power. We start by stating how many nodes are inside a certain area with high probability.
\begin{lemma}\label{lem1}
Suppose that $n$ nodes are uniformly and independently distributed in a unit area. Then, the number of nodes in a region of area $A(n)$ is between $((1-\delta)nA(n),(1+\delta)nA(n))$ with a probability larger than $1-{{1-A(n)}\over{\delta^2 n A(n)}}$ for any $\delta>0$.
\end{lemma}
\begin{proof}
We note that the number of nodes in a region of area $A(n)$ corresponds to a sum of i.i.d. Bernoulli random variables \cite{Ozgur:2007}. The probability that a node is in the region is $A(n)$. Hence, the number of nodes in the region can be expressed as $\sum_{i=1}^{n}B_i$ where $B_i$'s are i.i.d. Bernoulli random variables with $p(B_i=1)=A(n)$. Then,
\begin{align}
&p(|\mbox{number of nodes in }A(n)-nA(n)|>\delta nA(n)) \label{eq:36}\\
&=p\left(\left|\sum_{i=1}^{n}B_i-nA(n)\right|>\delta nA(n)\right)\label{eq:38} \\
\begin{split}\label{eq:39}
&=p\left(\left|\sum_{i=1}^{n}B_i-\mathop{\mathrm{E}}\left(\sum_{i=1}^{n}B_i\right)\right| \right. \\
&\ \ \ \ \ \ \ \ \ \ \ \ \ \left. >\frac{{\delta\sqrt{ nA(n)}}}{\sqrt{1-A(n)}} \sqrt{\mathop{\mathrm{Var}} \left(\sum_{i=1}^{n}B_i\right)}\right)
\end{split}\\
&\overset{(a)}<{{1-A(n)}\over{\delta^2 nA(n)}},\label{eq:40}
\end{align}
for any $\delta>0$, where $\mathop{\mathrm{E}}(\sum_{i=1}^{n}B_i)=nA(n)$, and $\mathop{\mathrm{Var}} (\sum_{i=1}^{n}B_i)=nA(n)(1-A(n))$ because $\mathop{\mathrm{Var}}(B_i)=A(n)(1-A(n))$ and $B_1, ..., B_n$ are i.i.d. random variables. The inequality $(a)$ is by Chebyshev's inequality.
\end{proof}
The following corollary is a direct consequence of Lemma~\ref{lem1}.
\begin{corollary}\label{cor1}
Suppose that $n$ nodes are uniformly and independently distributed in a unit area. Then, the number of nodes in the region of area $A(n)=\omega(1/n)$ is between $((1-\delta)nA(n),(1+\delta)nA(n))$ with high probability for any $\delta>0$.
\end{corollary}
Now, we show an allowable transmission power from the sufficient condition for the covertness constraint in Section \ref{sec4A} by deriving an upper bound on the received power at a warden. To derive the upper bound, we first consider a set of disjoint rings, centered at the warden, that covers the whole network as shown in Figs. \ref{fig5} and \ref{fig6}. Then we add up the received power at the warden that the senders in each ring contribute to. In this procedure, we bound the number of senders in each ring by using Corollary \ref{cor1}. Since the corollary is applicable only when the area is $\omega(1/n)$, the width of the smallest ring depends on whether the area of a preservation region is $\omega(1/n)$ or not (corresponding to whether $0<s<1$ or $s\geq 1$), which results in different bounds. The following lemmas present allowable transmission powers satisfying the covertness constraint for $0<s<1$ and $s\geq 1$.
\begin{lemma}\label{lem2}
Let each node transmit using complex Gaussian coodbook with zero mean and variance of $P_{\rm{tx}}$. If $0<s<1$, then $P_{\rm{tx}}=\Theta(l^{-{1\over2}}{n^{-({s\over2}(\alpha-2)+1)-\epsilon}})$ {for any $\epsilon>0$} satisfies the covertness constraint with high probability.
\end{lemma}
\begin{proof}
Let $s_i(u)$ and $c_w(u)$ be the locations of sender $i$ and warden $w$ at the $u$-th channel use the warden observes, respectively. As shown in Fig. \ref{fig5}, we consider a set of disjoint rings with width $n^{-1/2}$, centered at the warden, that covers the whole network except the preservation region around warden $w$. Let $R_{1\beta}$ denote the $(\beta+1)$-th smallest ring. Then, the received power at warden $w$ at channel use $u$, $\rho_{w,u}$, is upper-bounded as:
\begin{align}
&\rho_{w,u}\overset{(a)}\leq \sum_{i:|s_i(u)-c_w(u)|\geq r_p}P_{\rm{tx}}(i,u)\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:41}\\
&\overset{(b)}\leq\sum_{\beta=0}^{\kappa_1(n,u)}\sum_{i\in \cR_{1\beta}}P_{\rm{tx}}(i,u)\cdot {G\over(r_p+\beta n^{-1/2})^\alpha}\label{eq:41.1}\\
&\overset{(c)}\leq\sum_{\beta=0}^{\kappa_1(n,u)}P_{\rm{tx}}(1+\epsilon'')\cdot {G\over(r_p+\beta n^{-1/2})^\alpha}\cdot |\cR_{1\beta}|\label{eq:42}\\
&\overset{(d)}\leq\sum_{\beta=0}^{\kappa_1(n,u)}P_{\rm{tx}}(1+\epsilon'')\cdot {G\over(r_p+\beta n^{-1/2})^\alpha}\cdot (1+\delta)\theta nA(R_{1\beta})\label{eq:42.1}\\
\begin{split}\label{eq:43}
&\overset{(e)}\leq\sum_{\beta=0}^{\kappa_1(n,u)}P_{\rm{tx}}(1+\epsilon'')\cdot {G\over(r_p+\beta n^{-1/2})^\alpha}\\
& \ \ \ \ \cdot (1+\delta)\theta n 2\pi(r_p+(\beta+1)n^{-1/2})n^{-1/2}
\end{split}\\
&\overset{(f)}\leq\int_{r_p-n^{-1/2}}^{{1/\sqrt{\pi}}+|c_w(u)|} \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! P_{\rm{tx}}(1+\epsilon'')\cdot{G\over x^\alpha}\cdot (1+\delta)\theta n2\pi(x+n^{-1/2})dx\label{eq:44}\\
&\leq K_1 P_{\rm{tx}} n r_p^{2-\alpha},\label{eq:45}
\end{align}
with high probability {for any $\epsilon''>0$ and $\delta>0$}, where $P_{\rm{tx}}(i,u)$ is the transmission power of sender $i$ at channel use $u$, $\kappa_1(n,u)={\lfloor{{1/\sqrt{\pi}+|c_w(u)|-r_p}\over n^{-1/2}}\rfloor}$ is the number of rings needed to cover the whole network, $\cR_{1\beta}$ is the set of the senders in $R_{1\beta}$, $A(R_{1\beta})$ is the area of $R_{1\beta}$, and {$K_1$ is a positive constant independent with $n$.} Here, $(a)$ is since the senders in the preservation regions do not transmit, $(b)$ is by assuming that the senders in each ring are at the boundary close to warden $w$, $(c)$ is by using weak law of large numbers (WLLN) , $(d)$ is from Corollary \ref{cor1}, $(e)$ is by upper bounding $A(R_{1\beta})$, and $(f)$ is due to the Riemann sum. Since \eqref{eq:45} holds for arbitrary channel use $u$, $\rho_{wm}$ is upper-bounded as:
\begin{align}
\rho_{wm} \leq K_1 P_{\rm{tx}} n r_p^{2-\alpha}.\label{eq:46}
\end{align}
By \eqref{eq:18} and \eqref{eq:46}, the covertness constraint is satisfied if $K_1 P_{\rm{tx}} n r_p^{2-\alpha} \leq \sqrt{2} N_0 \sqrt{\delta \over l}$, or $P_{\rm{tx}}\leq K'_1 l^{-{1\over2}}n^{-1} r_p^{\alpha-2}$ for a constant $K'_1$ independent with $n$. Since $r_p=\Theta(n^{-(s/2+\epsilon')})$ for {any $\epsilon'>0$}, we conclude that $P_{\rm{tx}}=\Theta(l^{-{1\over2}}{n^{-({s\over2}(\alpha-2)+1)-\epsilon}})$ for {any $\epsilon>0$} satisfies the covertness constraint.
\end{proof}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY5.png}
\caption{A set of disjoint rings to derive an upper bound on the received power at warden $w$ for $0<s<1$. Here, $R_{1\beta}$ denotes the $(\beta+1)$-th smallest ring and each ring has width $n^{-1/2}$.} \label{fig5}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY6.png}
\caption{A set of disjoint rings to derive an upper bound on the received power at warden $w$ for $s\geq 1$. Here, $R_{2\beta}$ denotes the $(\beta+1)$-th smallest ring and each ring except the smallest has width $n^{-1/2}$. The smallest ring has width $n^{-1/2}\log n-r_p$.}\label{fig6}
\end{figure}
\begin{lemma}\label{lem3}
Let each node transmit using complex Gaussian coodbook with zero mean and variance of $P_{\rm{tx}}$. If $s\geq1$, then $P_{\rm{tx}}=\Theta(l^{-{1\over2}}{n^{-{s\alpha\over2}-\epsilon}})$ {for any $\epsilon>0$} satisfies the covertness constraint with high probability.
\end{lemma}
\begin{proof}
Let $s_i(u)$ and $c_w(u)$ be the locations of sender $i$ and warden $w$ at the $u$-th channel use the warden observes, respectively. The proof is similar with that of Lemma \ref{lem2}, but now we consider a set of disjoint rings where the smallest ring has width $n^{-1/2}\log n-r_p$ to apply Corollary \ref{cor1}, while the width of the other rings remains as $n^{-1/2}$ as shown in Fig. \ref{fig6}. Let $R_{2\beta}$ denote the $(\beta+1)$-th smallest ring. Then, $\rho_{w,u}$ is upper-bounded as:
\begin{align}
&\rho_{w,u}\leq \sum_{i:|s_i(u)-c_w(u)|\geq r_p}\!\!\!\!\!P_{\rm{tx}}(i,u)\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}&& \label{eq:47}\\
\begin{split}\label{eq:48}
&\overset{(a)}\leq\sum_{i\in \cR_{20}}P_{\rm{tx}}(i,u)\cdot {G\over r_p^{\alpha}}+\sum_{\beta=1}^{\kappa_2(n,u)}\sum_{i\in \cR_{2\beta}}P_{\rm{tx}}(i,u)\\
&\cdot {G\over(n^{-1/2}\log n+(\beta-1) n^{-1/2})^\alpha}
\end{split}\\
\begin{split}\label{eq:48.1}
&\leq P_{\rm{tx}}(1+\epsilon'')\cdot {G\over r_p^{\alpha}}\cdot |\cR_{20}|+\sum_{\beta=1}^{\kappa_2(n,u)}P_{\rm{tx}}(1+\epsilon'') \\
&\cdot {G\over(n^{-1/2}\log n+(\beta-1) n^{-1/2})^\alpha}\cdot |\cR_{2\beta}|
\end{split}\\
\begin{split}\label{eq:48.2}
&\leq P_{\rm{tx}}(1+\epsilon'')\cdot {G\over r_p^{\alpha}}\cdot (1+\delta)\theta nA(R_{20})+\sum_{\beta=1}^{\kappa_2(n,u)}P_{\rm{tx}}(1+\epsilon'') \\
&\cdot {G\over(n^{-1/2}\log n+(\beta-1) n^{-1/2})^\alpha}\cdot (1+\delta)\theta nA(R_{2\beta})
\end{split}\\
\begin{split}\label{eq:49}
&\leq P_{\rm{tx}}(1+\epsilon'')\cdot {G\over r_p^{\alpha}}\cdot (1+\delta)\theta n2\pi(n^{-1/2}\log n)^2\\
&+\sum_{\beta=1}^{\kappa_2(n,u)} P_{\rm{tx}}(1+\epsilon'')\cdot {G\over(n^{-1/2}\log n+(\beta-1) n^{-1/2})^\alpha}\\
&\cdot (1+\delta)\theta n 2\pi(n^{-1/2}\log n+\beta n^{-1/2})n^{-1/2}
\end{split}\\
\begin{split}\label{eq:50}
&\leq P_{\rm{tx}}(1+\epsilon'')\cdot {G\over r_p^{\alpha}}\cdot (1+\delta)\theta n2\pi(n^{-1/2}\log n)^2 \\
&+\int_{n^{-1/2}(\log n -1)}^{{1/\sqrt{\pi}}+|c_w(u)|} \!\!\!\!\!\!\!\!\!\! P_{\rm{tx}}(1+\epsilon'')\cdot{G\over x^\alpha}\cdot (1+\delta)\theta n2\pi(x+n^{-1/2})dx
\end{split}\\
&\overset{(b)}\leq K_2 P_{\rm{tx}} (\log n)^2 r_p^{-\alpha},\label{eq:51}
\end{align}
with high probability {for any $\epsilon''>0$ and $\delta>0$,} where $P_{\rm{tx}}(i,u)$ is the transmission power of sender $i$ at channel use $u$, $\kappa_2(n,u)={\lfloor{{1/\sqrt{\pi}+|c_w(u)|-n^{-1/2}\log n}\over n^{-1/2}}\rfloor}+1$ is the number of rings needed to cover the whole network, $\cR_{2\beta}$ is the set of the senders in $R_{2\beta}$, $A(R_{2\beta})$ is the area of $R_{2\beta}$, {and $K_2$ is a positive constant independent with $n$.} Here, $(a)$ is by separating $R_{20}$ region from the other regions and $(b)$ is since the first term of \eqref{eq:50} is dominant as $n$ goes to infinity. Because \eqref{eq:51} holds for arbitrary channel use $u$, $\rho_{wm}$ is upper-bounded as:
\begin{align}
\rho_{wm} \leq K_2 P_{\rm{tx}} (\log n)^2 r_p^{-\alpha}.\label{eq:52}
\end{align}
By \eqref{eq:18} and \eqref{eq:52}, the covertness constraint is satisfied if $P_{\rm{tx}}\leq K'_2 l^{-{1\over2}} (\log n)^{-2} r_p^{\alpha}$ where $K'_2$ is a constant independent with $n$. Since $r_p=\Theta(n^{-(s/2+\epsilon')})$ for {any $\epsilon'>0$}, we conclude that $P_{\rm{tx}}=\Theta(l^{-{1\over2}}{n^{-{s\alpha\over2}-\epsilon}})$ for {any $\epsilon>0$} satisfies the covertness constraint.
\end{proof}
\subsection{Aggregate Throughput}\label{sec5D}
In this subsection, we derive the achievable aggregate throughput in Theorems \ref{thm1} and \ref{thm2}. We first derive a feasible long-term throughput for each sender-receiver pair, which we call pairwise throughput in short. A pairwise throughput $R_{\rm{pair}}(n,s)$ is said to be feasible if
\begin{align}
\mathop{\mathrm{lim}}_{T\rightarrow\infty} {1\over T}{\sum_{t=1}^{T}R_{j\rm{th-pair}}(n,s,t)} \geq R_{\rm{pair}}(n,s), ~\forall j \label{eq:55}
\end{align}
where $R_{j\rm{th-pair}}(n,s,t)$ is the throughput of the $j$th sender-receiver pair at time $t$. The following lemma shows a relationship between the aggregate throughput and the pairwise throughput.
\begin{lemma}\label{lem4}
Let $R_{\rm{pair}}(n,s)$ be a feasible pairwise throughput. Then an aggregate throughput of $T(n,s)=\Theta(n) \cdot R_{\rm{pair}}(n,s)$ is achievable with high probability.
\end{lemma}
\begin{proof}
As mentioned in Section \ref{sec5B}, we assume the steady state and hence assume that each sender has a data packet destined for its receiver in phase 2. Since the senders are uniformly and randomly chosen for each phase 1 and phase 2 and we are considering the throughputs in the long-term sense, the aggregate throughput equals to the half (because phase 2 occupies the half of the time) of the product of the number of senders in phase 2 and the pairwise throughput. The proof is completed by noting that the number of senders in phase 2 is between $((1-\delta)\theta n(1-\epsilon(n_w, r_p)),(1+\delta)\theta n(1-\epsilon(n_w, r_p)))$ for any $\delta>0$, as $n$ goes to infinity by Corollary \ref{cor1}.
\end{proof}
Now, we derive a feasible pairwise throughput. Since the nodes use the Gaussian codebook as described in Section \ref{sec5B}, $R_{j\rm{th-pair}}(n,s,t)$ is represented as:
\begin{align}
R_{j\rm{th-pair}}(n,s,t)=\log\left(1+{{P_{\rm{tx}}\cdot {r_j(t)}^{-\alpha}}\over{N_0+I_j(n,s,t)}} \right), \label{eq:56}
\end{align}
where $r_j(t)$ is the distance between the $j$th sender-receiver pair and $I_j(n,s,t)$ is the interference power at the $j$th receiver at time $t$. To derive $R_{\rm{pair}}(n,s)$, we use the following lemmas on the distribution of $r_j(t)$ and an upper bound on $I_j(n,s,t)$.
\begin{lemma}\label{lem5}
Let $F(z)$ be the cumulative distribution of the distance $z$ between a sender-receiver pair at time $t$. For $z=\Theta(n^{-\epsilon})$, $F(z)=1-\exp(-\pi z^2n(1-\theta))$ for {any $\epsilon>0$}.
\end{lemma}
\begin{proof}
For an arbitrary sender-receiver pair, let $s(t)$ and $v(t)$ be the location of the sender and the receiver at time $t$, respectively. Then,
\begin{align}
\begin{split}\label{eq:57}
F(z)&=p(|s(t)-v(t)|\leq z)\\
&=p(\min_{i\in\cV_t}(|s(t)-v_i(t)|)\leq z)
\end{split}\\
&\overset{(a)}=1-\mathop{\mathrm{lim}}_{n\rightarrow\infty}\prod_{i=1}^{n(1-\theta)}p(|s(t)-v_i(t)|> z)\label{eq:58}\\
&=1-\mathop{\mathrm{lim}}_{n\rightarrow\infty}(1-\pi z^2)^{n(1-\theta)}\label{eq:59}\\
&=1-\mathop{\mathrm{lim}}_{n\rightarrow\infty}(1-\pi z^2)^{{1\over{\pi z^2}}\cdot \pi z^2 n(1-\theta)}\label{eq:59.1}\\
&\overset{(b)}=1-\exp(-\pi z^2n(1-\theta)),\label{eq:60}
\end{align}
where $v_i(t)$ is the location of receiver $i$ at time $t$ and $\cV_t$ is the set of receivers at time $t$. Here, $(a)$ is since $n$ nodes are uniformly and randomly distributed and $(b)$ is because $\mathop{\mathrm{lim}}_{x\rightarrow 0}(1-x)^{1/x}\triangleq e^{-1}$ by the definition of Euler's number.
\end{proof}
The following two corollaries, which can be proved similarly with Lemma \ref{lem5}, are used to derive an upper bound on the interference power at a receiver in Lemma \ref{lem6} and an upper bound on the aggregate throughput in Section \ref{sec6}.
\begin{corollary}\label{cor2}
Let $F_r(z)$ be the cumulative distribution of the distance $z=\Theta(n^{-\epsilon})$ between a receiver and the nearest sender from the receiver at time $t$. Then, $F_r(z)=1-\exp(-\pi z^2n\theta)$ for {any $\epsilon>0$.}
\end{corollary}
\begin{corollary}\label{cor3}
Let $F_s(z)$ be the cumulative distribution of the distance $z=\Theta(n^{-\epsilon})$ between a node and the nearest node from the node at time $t$. Then, $F_s(z)=1-\exp(-\pi z^2n)$ for {any $\epsilon>0$.}
\end{corollary}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY7.png}
\caption{A set of disjoint rings to derive an upper bound on the interference power at a receiver. Here, the (dashed) disk of radius $n^{-({1\over2}+\epsilon')}$ {for an arbitrarily small $\epsilon'>0$} denotes the region where there is no sender in the disk with high probability, $R_{r\beta}$ denotes the $(\beta+1)$-th smallest ring, and each ring except the smallest has width $n^{-1/2}$. The smallest ring has width $n^{-1/2}\log n-n^{-({1\over2}+\epsilon')}$.}\label{fig7}
\end{figure}
\begin{lemma}\label{lem6}
Let $I(n,s,t)$ be the interference power at a receiver at time $t$. Then $I(n,s,t)<P_{\rm{tx}}n^{{\alpha\over2}+\epsilon}$ with high probability for {any $\epsilon>0$}.
\end{lemma}
\begin{proof}
Consider an arbitrary receiver in the network and let $v(t)$ denote its location at time $t$. The proof is similar with that of Lemma 3, but now we consider a set of disjoint rings centered at the receiver which cover the whole network except the disk centered at the receiver where there is no sender with high probability. The radius of the disk is $n^{-({1\over2}+\epsilon')}$ {for an arbitrarily small $\epsilon'>0$} by Corollary \ref{cor2} since $F_r(n^{-({1\over2}+\epsilon')})$ converges to zero as $n$ goes to infinity for any $\epsilon'>0$. The smallest ring has width $n^{-1/2}\log n-n^{-({1\over2}+\epsilon')}$ and the other rings have width $n^{-1/2}$ to apply Corollary \ref{cor1}. Let $I(n,s,t)$ be the interference power at the receiver at time $t$ and $R_{r\beta}$ denote the $(\beta+1)$-th smallest ring. Then, $I(n,s,t)$ is upper-bounded as:
\begin{align}
I(n,s,t)&\overset{(a)}\leq\!\!\!\!\!\!\!\!\!\! \sum_{i:|s_i(t)-v(t)|\geq n^{-({1\over2}+\epsilon')}}\!\!\!\!\!\! P_{\rm{tx}}\cdot {G\over{|s_i(t)-v(t)|^\alpha}}\label{eq:61}\\
\begin{split}\label{eq:62}
&\overset{(b)}\leq \sum_{i\in \cR_{r0}}P_{\rm{tx}}\cdot {G\over n^{-\alpha({1\over2}+\epsilon')}}+\sum_{\beta=1}^{\kappa_r(n,t)}\sum_{i\in \cR_{r\beta}}P_{\rm{tx}}\\
&\cdot {G\over(n^{-1/2}\log n+(\beta-1) n^{-1/2})^\alpha}
\end{split}\\
&\overset{(c)}\leq K_r P_{\rm{tx}} (\log n)^2 n^{\alpha({1\over2}+\epsilon')}\label{eq:66}\\
&< P_{\rm{tx}}n^{{\alpha\over2}+\epsilon},\label{eq:67}
\end{align}
with high probability {for any $\epsilon>0$ and $\epsilon'>0$}, where $s_i(t)$ denote the location of the $i$-th sender at time $t$, $\kappa_r(n,t)={\lfloor{{1/\sqrt{\pi}+|v(t)|-n^{-1/2}\log n}\over n^{-1/2}}\rfloor}+1$ is the number of rings needed to cover the whole network, $\cR_{r\beta}$ is the set of the senders in $R_{r\beta}$, {and $K_r$ is a positive constant independent with $n$.} Here, $(a)$ is since the senders in the preservation region do not transmit, $(b)$ is by separating $A_{r0}$ region from the other regions, and $(c)$ can be proved similarly as in \eqref{eq:48.1}-\eqref{eq:50} in the proof of Lemma~\ref{lem3}.
\end{proof}
By using Lemmas \ref{lem5} and \ref{lem6}, we derive a feasible pairwise throughput as follows:
\begin{align}
\begin{split}\label{eq:68}
&\mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}{\sum_{t=1}^{T}R_{j\rm{th-pair}}{(n,s,t)}} \\
&=\mathop{\mathrm{lim}}_{T\rightarrow\infty} {1\over T}{\sum_{t=1}^{T}\log\left(1+{{P_{\rm{tx}}\cdot{r_j(t)}^{-\alpha}}\over{N_0+I_j(n,s,t)}} \right)}
\end{split}\\
&\overset{(a)}\geq{\mathop{\mathrm{E}}}_t\left(\log\left(1+{{P_{\rm{tx}}\cdot {r_j(t)}^{-\alpha}}\over{N_0+I_j(n,s,t)}} \right)\right)(1-\epsilon'')\label{eq:69}\\
\begin{split}\label{eq:70}
&\geq p(r_j(t)< \min(P_{\rm{tx}}^{1\over\alpha},n^{-1/2}))\\
&\cdot {\mathop{\mathrm{E}}}_t\left(\log\left(1+{{P_{\rm{tx}}\cdot {r_j(t)}^{-\alpha}}\over{N_0+I_j(n,s,t)}} \right) \right. \\
&\ \ \ \ \ \ \ \ \ \ \left. \bigg\rvert r_j(t)< \min(P_{\rm{tx}}^{1\over\alpha},n^{-1/2}) \right)(1-\epsilon'')
\end{split}\\
&\overset{(b)}\geq\begin{cases}\label{eq:71}
F(P_{\rm{tx}}^{1\over\alpha})\cdot\log\left(1+{{1}\over{N_0+1}} \right)(1-\epsilon'') \\
\ \ \ \ \ \ \ \ \ \ \ \ \mbox{for} \ \ P_{\rm{tx}}^{1\over\alpha}\leq n^{-1/2},\\
F( n^{-1/2}) \cdot \log\left(1+{{P_{\rm{tx}}\cdot}n^{\alpha\over2}\over{N_0+P_{\rm{tx}}n^{{\alpha\over2}+\epsilon}}} \right)(1-\epsilon'')\\
\ \ \ \ \ \ \ \ \ \ \ \ \mbox{for} \ \ P_{\rm{tx}}^{1\over\alpha}> n^{-1/2},
\end{cases}\\
&\overset{(c)}\geq F(\min(P_{\rm{tx}}^{1\over\alpha},n^{-1/2}))\cdot\log\left(1+{{1}\over{N_0+1}}\right)(1-\epsilon'')\label{eq:72}\\
\begin{split}\label{eq:73}
&\overset{(d)}\geq \min\left({{\pi P_{\rm{tx}}^{2/\alpha}n\theta}\over{1+{\pi P_{\rm{tx}}^{2/\alpha}n\theta}}},{{\pi\theta}\over{1+{\pi\theta}}}\right)\\
&\cdot \log\left(1+{{1}\over{N_0+1}} \right)(1-\epsilon'')
\end{split}\\
&\geq \min( K'_1n^{1-\epsilon'}P_{\rm{tx}}^{2/\alpha},K'_2n^{-\epsilon'}),\label{eq:74}
\end{align}
with high probability for arbitrary $j$ and {any $\epsilon'>0$ and $\epsilon''>0$}, where {$K'_1$ and $K'_2$ are positive constants independent with $n$.} Here, $(a)$ is by WLLN, $(b)$ is by Lemma \ref{lem6}, $(c)$ is since ${x}\over{N_0+x}$ is an increasing function for $x>0$, and $(d)$ follows from Lemma \ref{lem5} and $1-e^{-x}>{x/{(x+1)}}$ for $x>0$.
Since $R_{\rm{pair}}(n,s)=\min( K'_1n^{1-\epsilon'}P_{\rm{tx}}^{2/\alpha},K'_2n^{-\epsilon'})$ is feasible for any $\epsilon'>0$, by Lemma \ref{lem4}, the following aggregate throughput is achievable for $\epsilon'>0$:
\begin{align}
T(n,s)=n\cdot\min( K'_1n^{1-\epsilon'}P_{\rm{tx}}^{2/\alpha},K'_2n^{-\epsilon'}).\label{eq:76}
\end{align}
Now, Theorems \ref{thm1} and \ref{thm2} are proved by substituting $ P_{\rm{tx}}$ in Lemmas \ref{lem2} and \ref{lem3} into \eqref{eq:76}, respectively.
\section{Converse}\label{sec6}
In this section, we prove Theorems \ref{thm_ub}, \ref{thm3}, and \ref{thm4}. We note that the proofs do not depend on whether the wardens have mobility or not.
\subsection{Proof of Theorem \ref{thm_ub}}\label{sec6A}
In this subsection, we derive an upper bound on the aggregate throughput by assuming there is no covertness constraint. Let source node $j$ communicate to its destination node $k_j$. Then, $T(n,s)$ is upper-bounded as:
\begin{align}
&T(n,s)= \mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}\sum_{t=1}^{T}\sum_{j=1}^{n}R_{jk_j}(n,s,t)\label{eq:83.1}\\
&\overset{(a)}\leq\mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}\sum_{t=1}^{T}\sum_{j=1}^{n} \log\left(1+\frac{P}{N_0}\sum_{\scriptstyle {i=1}\atop \scriptstyle {i\neq j}}^{n}{G\over{d_{ji}(t)^\alpha}}\right)\label{eq:83.2}\\
&\overset{(b)}\leq\mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}\sum_{t=1}^{T}\sum_{j=1}^{n}\log\left(1+\frac{P}{N_0}(n-1) Gn^{\alpha(1+\epsilon')}\right)\label{eq:83.3}\\
&\leq n\cdot\log\left(1+{{{P}}\over N_0}\cdot Gn^{\alpha(1+\epsilon')+1}\right)\label{eq:86}\\
&\leq K_{\rm{tr}}n^{1+\epsilon},\label{eq:87}
\end{align}
with high probability {for any $\epsilon>0$ and $\epsilon'>0$}, where $d_{ji}(t)$ is the distance between nodes $j$ and $i$ at time $t$ and {$K_{\rm{tr}}$ is a positive constant independent with $n$.} Here $(a)$ is since $R_{jk_j}(n,s,t)$ is upper-bounded by the capacity of the single input multiple output (SIMO) channel between node $j$ and all the other nodes and $(b)$ is since the probability that the minimum distance between two nodes in the network is smaller than $n^{-(1+\epsilon')}$ converges to zero {for any $\epsilon'>0$} as $n$ goes to infinity, i.e.,
\begin{align}
\begin{split}\label{eq:88}
p&\left(\min_{(j,i),j\neq i}d_{ji}(t)<n^{-(1+\epsilon')}\right) \\
&\leq \sum_{j=1}^{n}p\left(\min_{i\neq j}d_{ji}(t)<n^{-(1+\epsilon')}\right)
\end{split}\\
&=n\cdot p\left(\min_{i \geq 2 }d_{1i}(t)<n^{-(1+\epsilon')}\right)\label{eq:88.1}\\
&\overset{(a)}=n\left(1-\left(1-{\pi\over n^{2+2\epsilon'}}\right)^{n-1}\right),\label{eq:89}\\
&\overset{n\rightarrow \infty} \longrightarrow 0
\end{align}
where $(a)$ is from Collorary \ref{cor3}. This completes the proof.
\subsection{Proof of Theorems \ref{thm3} and \ref{thm4}}\label{sec6B}
Assume that each node not contained in the regions of radius $\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ uses the same power at each channel use. These regions correspond to the preservation regions introduced in the proposed scheme for proving achievability. We note that we do not restrict the behavior of the nodes inside these regions in the converse proof, while we force them not to transmit in the achievability proof.
We start by deriving an upper bound on the transmission power satisfying \eqref{eq:32}. The proof is similar with that of Lemmas \ref{lem2} and \ref{lem3}, but now we derive a lower bound on the received power at a warden instead of an upper bound. To derive the lower bound, we consider a set of disjoint rings, centered at the warden, that is covered by the whole network as shown in Figs. \ref{fig8} and \ref{fig9}. The following lemmas present upper bounds on the transmission power satisfying the covertness constraint for $0<s<1$ and $s\geq1$.
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY8.png}
\caption{A set of disjoint rings to derive a lower bound on the received power at warden $w$ for $0<s<1$. Here, $R_{3\beta}$ denotes the $(\beta+1)$-th smallest ring and each ring has width $n^{-1/2}$.} \label{fig8}
\end{figure}
\begin{lemma}\label{lem7}
Let each node not contained in the regions of radius $r'_p=\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ transmit the same power of $P_{\rm{tx}}$ at each channel use. If $0<s<1$ and the network satisfies the covertness constraint, then $P_{\rm{tx}}\leq K_3 l^{-{1\over2}}{n^{-({s\over2}(\alpha-2)+1)+\epsilon}}$ {for any $\epsilon>0$} and a positive constant $K_3$ independent with $n$ with high probability.
\end{lemma}
\begin{proof}
Let $s_i(u)$ and $c_w(u)$ be the locations of node $i$ and warden $w$ at channel use $u$, respectively.The proof is similar with that of Lemma \ref{lem2}, but now we consider the largest set of disjoint rings that is covered by the whole network as shown in Fig. \ref{fig8}. Let $R_{3\beta}$ denote the $(\beta+1)$-th smallest ring. Then, the received power at warden $w$ at channel use $u$, $\rho_{w,u}$, is lower-bounded as:
\begin{align}
\rho_{w,u}&\geq\sum_{i:s_i(u)\notin\cP_u}P_{\rm{tx}}\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:91}\\
&\overset{(a)}\geq{1\over2}\cdot\!\!\sum_{i:|s_i(u)-c_w(u)|\geq r'_p}P_{\rm{tx}}\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:91.1}\\
&\overset{(b)}\geq{1\over2}\cdot\sum_{i:1/\sqrt{\pi}-|c_w(u)|\geq|s_i(u)-c_w(u)|\geq r'_p}\!\!\!\!\!\!\!\!\!\!P_{\rm{tx}}\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:92}\\
&\overset{(c)}\geq{1\over2}\cdot\sum_{\beta=0}^{\kappa_3(n,u)}P_{\rm{tx}}\cdot {G\cdot |\cR_{3\beta}|\over(r'_p+(\beta+1)n^{-1/2})^\alpha}\label{eq:93}\\
&\overset{(d)}\geq{1\over2}\cdot\sum_{\beta=0}^{\kappa_3(n,u)}P_{\rm{tx}}\cdot {G\cdot(1-\delta)nA(R_{3\beta})\over(r'_p+(\beta+1)n^{-1/2})^\alpha}\label{eq:94}\\
\begin{split}\label{eq:95}
&\overset{(e)}\geq{1\over2}\cdot\sum_{\beta=0}^{\kappa_3(n,u)}P_{\rm{tx}}\cdot {G\over(r'_p+(\beta+1)n^{-1/2})^\alpha}\\
&\cdot (1-\delta)n 2\pi(r'_p+\beta n^{-1/2})n^{-1/2}
\end{split}\\
&\geq\int_{r'_p+n^{-1/2}}^{{1/\sqrt{\pi}}-|c_w(u)|}\!\!\!\!\!\!\!\!P_{\rm{tx}}\cdot{G\over x^\alpha}\cdot (1-\delta)n\pi(x-n^{-1/2})dx\label{eq:96}\\
&\geq K'_3 P_{\rm{tx}} n {r'_p}^{2-\alpha},\label{eq:97}
\end{align}
with high probability {for any $\delta>0$}, where $\cP_u$ is the set of the nodes contained in the regions of radius $r'_p$ around each warden at channel use $u$, $\kappa_3(n,u)={\lfloor{{1/\sqrt{\pi}-|c_w(u)|-r'_p}\over n^{-1/2}}\rfloor}-1$ is the number of the maximum rings covered by the whole network, $\cR_{3\beta}$ is the set of the nodes in $R_{3\beta}$, $A(R_{3\beta})$ is the area of $R_{3\beta}$, {and $K'_3$ is a positive constant independent with $n$.} Here, $(a)$ is since the total area of the regions of radius $r'_p$ around each warden is smaller than $1/2$, $(b)$ is because we only consider the rings inside the network, $(c)$ is by assuming that the nodes in each ring are at the boundary far from warden $w$, $(d)$ is from Corollary \ref{cor1}, and $(e)$ is by lower bounding $A_{3\beta}$. Since \eqref{eq:97} holds for arbitrary channel use $u$, $\bar\rho_w$ is lower-bounded as
\begin{align}
\bar\rho_w \geq K'_3 P_{\rm{tx}} n {r'_p}^{2-\alpha}.\label{eq:98}
\end{align}
By \eqref{eq:32} and \eqref{eq:98}, if the covertness constraint is satisfied, then $ K'_3 P_{\rm{tx}} n {r'_p}^{2-\alpha} \leq \sqrt{2} N_0 \sqrt{\delta \over l}+o(l^{-1/2})$, or $P_{\rm{tx}}\leq K''_3 l^{-{1\over2}}n^{-1} {r'_p}^{\alpha-2}$ for a positive constant $K''_3$ independent with $n$. Since $r'_p=\Theta(n^{-(s/2+\epsilon')})$ {for an arbitrarily small $\epsilon'>0$,} we conclude that $P_{\rm{tx}}\leq K_3 l^{-{1\over2}}{n^{-({s\over2}(\alpha-2)+1)+\epsilon}}$ {for any $\epsilon>0$} satisfies the covertness constraint for a positive constant $K_3$ independent with $n$.
\end{proof}
\begin{figure}
\centering
\includegraphics[width=0.9\columnwidth]{HY9.png}
\caption{A set of disjoint rings to derive a lower bound on the received power at warden $w$ for $s\geq1$. Here, $R_{4\beta}$ denotes the $(\beta+1)$-th smallest ring and each ring except the smallest has width $n^{-1/2}$. The smallest ring has width $n^{-1/2}\log n-r'_p$.}\label{fig9}
\end{figure}
\begin{lemma}\label{lem8}
Let each node not contained in the regions of radius $r'_p=\Theta(n^{-(\frac{s}{2}+\epsilon')})$ around each warden for an arbitrarily small $\epsilon'>0$ transmit the same power of $P_{\rm{tx}}$ at each channel use. If $s\geq1$ and the network satisfies the covertness constraint, then $P_{\rm{tx}}\leq K_4 l^{-{1\over2}}{n^{-{\alpha\over2}-\epsilon}}$ {for any $\epsilon>0$ and a positive constant $K_4$} independent with $n$ with high probability.
\end{lemma}
\begin{proof}
Let $s_i(u)$ and $c_w(u)$ be the locations of node $i$ and warden $w$ at channel use $u$, respectively. The proof is similar with that of Lemma \ref{lem7}, but now we consider a set of disjoint rings where the smallest ring has width $n^{-1/2}\log n-r'_p$ to apply Corollary \ref{cor1}, while the width of the other rings remains as $n^{-1/2}$ as shown in Fig. \ref{fig9}. Let $R_{4\beta}$ denote the $(\beta+1)$-th smallest ring. Then, the received power at warden $w$ at channel use $u$, $\rho_{w,u}$ is lower-bounded as:
\begin{align}
&\rho_{w,u}\geq\sum_{i:s_i(u)\notin\cP_u}P_{\rm{tx}}\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:99}\\
&\geq{1\over2}\cdot\sum_{i:1/\sqrt{\pi}-|c_w(u)|\geq|s_i(u)-c_w(u)|\geq r'_p}\!\!\!\!\!\!\!\!\!\!\!P_{\rm{tx}}\cdot {G\over{|s_i(u)-c_w(u)|^\alpha}}\label{eq:100}\\
\begin{split}\label{eq:101}
&\overset{(a)}\geq{1\over2}\cdot P_{\rm{tx}}\cdot {G\over(n^{-1/2}\log n)^{\alpha}}\cdot |\cR_{40}|\\
&+{1\over2}\cdot\sum_{\beta=1}^{\kappa_4(n,u)}P_{\rm{tx}}\cdot {G\over(n^{-1/2}\log n+\beta n^{-1/2})^\alpha}\cdot |\cR_{4\beta}|
\end{split}\\
&\overset{(b)}\geq K'_4 P_{\rm{tx}}n^{\alpha/2}(\log n)^{2-\alpha},\label{eq:104}
\end{align}
with high probability, where $\cP_u$ is the set of the nodes contained in the regions of radius $r'_p$ around each warden at channel use $u$, $\kappa_4(n,u)={\lfloor{{1/\sqrt{\pi}-|c_w(u)|-n^{-1/2}\log n}\over n^{-1/2}}\rfloor}$ is the number of rings covered by the whole network, $\cR_{4\beta}$ is the set of the nodes in $R_{4\beta}$, and $K'_4$ is a positive constant independent with $n$. Here, $(a)$ is by separating $A_{40}$ region from the other regions and $(b)$ can be proved similarly as in \eqref{eq:94}-\eqref{eq:96} in the proof of Lemma \ref{lem7}. Because \eqref{eq:104} holds for arbitrary time $t$, $\bar\rho_w$ is lower-bounded as:
\begin{align}
\bar\rho_w \geq K'_4 P_{\rm{tx}}n^{\alpha/2}(\log n)^{2-\alpha}.\label{eq:105}
\end{align}
By \eqref{eq:32} and \eqref{eq:105}, if the covertness constraint is satisfied, then $ K'_4 P_{\rm{tx}}n^{\alpha/2}(\log n)^{2-\alpha} \leq \sqrt{2} N_0 \sqrt{\delta \over l}+o(l^{-1/2})$ and we conclude that $P_{\rm{tx}}\leq K_4 l^{-{1\over2}}n^{{\alpha\over2}+\epsilon}$ for a positive constant $K_4$ independent with $n$ and { any $\epsilon>0$.}
\end{proof}
On the other hand, the following lemma presents an upper bound on the throughput of an arbitrary source-destination pair in terms of the distance between the source and its nearest node.
\begin{lemma}\label{lem9}
Let $R(n,s,t)$ be the throughput of an arbitrary source-destination pair and $d_s(t)$ be the distance between the source and the nearest node from the source at time $t$. Then, $R(n,s,t)\leq\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\epsilon}}\over{d_s(t)^\alpha}}\right)$ {for any $\epsilon>0$} with high probability.
\end{lemma}
\begin{proof}
Let us consider source node $j$. Then, $R(n,s,t)$ is upper-bounded as:
\begin{align}
R(n,s,t)&\overset{(a)}\leq\log\left(1+{P_{\rm{tx}} \over N_0}\sum_{\scriptstyle {i=1}\atop \scriptstyle {i\neq j}}^{n}{G\over{d_{ji}(t)^\alpha}}\right)\label{eq:107}\\
&\overset{(b)}\leq \log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\epsilon}}\over{d_s(t)^\alpha}}\right),\label{eq:108}
\end{align}
with high probability for {any $\epsilon>0$}, where $d_{ji}(t)$ is the distance between nodes $j$ and $i$ at time $t$ {and $K$ is a positive constant independent with $n$.} Here $(a)$ is since $R(n,s,t)$ is upper-bounded by the throughput of single input multiple output (SIMO) channel between node $j$ and the other nodes and $(b)$ is proved similar to the proof of Lemma \ref{lem6}, but now there is no node in the disk with radius $d_s(t)$ centered at source $j$.
\end{proof}
Now we are ready to derive an upper bound on the aggregate throughput $T(n,s)$. Let source node $j$ communicate to its destination node $k_j$. Then,
\begin{align}
T(n,s)&=\mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}\sum_{t=1}^{T}\sum_{j=1}^{n}R_{jk_j}(n,s,t)\label{eq:109}\\
&\overset{(a)}\leq\mathop{\mathrm{lim}}_{T\rightarrow\infty}{1\over T}\sum_{t=1}^{T}\sum_{j=1}^{n}\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_{s,j}(t)^\alpha}}\right)\label{eq:110}\\
&\overset{(b)}\leq{\mathop{\mathrm{E}}}_t\left(\sum_{j=1}^{n}\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_{s,j}(t)^\alpha}}\right)\right)(1+\epsilon'') \label{eq:111}\\
&\overset{(c)}= n\cdot{\mathop{\mathrm{E}}}_t\left(\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_s(t)^\alpha}}\right)\right)(1+\epsilon''),\label{eq:112}\\
\begin{split}\label{eq:114}
&= n\cdot p(d_s(t)<P_{\rm{tx}}^{1\over\alpha})\\
&\cdot{\mathop{\mathrm{E}}}_t\left(\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_s(t)^\alpha}}\right)\bigg\rvert d_s(t)<P_{\rm{tx}}^{1\over\alpha}\right)(1+\epsilon'')\\
&+n\cdot p(d_s(t)\geq P_{\rm{tx}}^{1\over\alpha})\\
&\cdot{\mathop{\mathrm{E}}}_t\left(\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_s(t)^\alpha}}\right)\bigg\rvert d_s(t)\geq P_{\rm{tx}}^{1\over\alpha}\right)(1+\epsilon'')
\end{split}\\
\begin{split}\label{eq:115}
&\overset{(d)}\leq K_5\cdot n\cdot p(d_s(t)<P_{\rm{tx}}^{1\over\alpha})\\
&\cdot{\mathop{\mathrm{E}}}_t\left(\log\left(1+{P_{\rm{tx}} \over N_0}\cdot{{Kn^{\delta}}\over{d_s(t)^\alpha}}\right)\bigg\rvert d_s(t)<P_{\rm{tx}}^{1\over\alpha}\right)
\end{split}\\
&\leq K_5\cdot n\cdot F_s(P_{\rm{tx}}^{1\over\alpha})\cdot n^{\epsilon'}\label{eq:116}\\
&\overset{(e)}\leq K_5\cdot n\cdot \min(\pi nP_{\rm{tx}}^{2/\alpha},1) \cdot n^{\epsilon'}\label{eq:117}\\
&\leq K_5 n^{1+\epsilon'}\min(\pi nP_{\rm{tx}}^{2/\alpha},1) ,\label{eq:118}
\end{align}
with high probability {for any $\epsilon'>0$, $\epsilon''>0$, and $\delta>0$}, {where $K$ and $K_5$ are positive constants independent with $n$,} $d_{s,j}(t)$ is the distance between node $j$ and the nearest node from node $j$ at time $t$, and $d_s(t)=d_{s,1}(t)$ is the distance between node $1$ and the nearest node from node $1$ at time $t$. Here, $(a)$ is by Lemma \ref{lem9}, $(b)$ is by WLLN, $(c)$ is because the nodes are i.i.d., $(d)$ is since the first term of \eqref{eq:114} is dominant, {and $(e)$ follows from Corollary \ref{cor3}, $1-e^{-x}<x$ for $x>0$, and $F_s(y)\leq1$ for $y\geq0$.}
Now, Theorems \ref{thm3} and \ref{thm4} are proved by substituting $P_{\rm{tx}}$ in Lemmas \ref{lem7} and \ref{lem8} into \eqref{eq:118}, respectively.
\section{Conclusion}\label{sec7}
In this paper, we showed that the node mobility greatly improves the throughput scaling of the covert communication over a wireless adhoc network. In particular, the aggregate throughput scaling was shown to be linear in $n$ when the number of channels that each warden uses to judge the presence of communication is not too large compared to~$n$. For achievability, we proposed a mobility-assisted scheme where the communication from a source to its destination consists of two-hop small-range transmission. This scheme was shown to be optimal for $0<s<1$ under the assumption that each node distant from every warden to a certain extent uses the same power at each channel use.
We note that our model assumes some impractical situations for the simplicity of analysis. First, it is assumed that the nodes are uniformly and independently distributed in each time $t$. In practice, each node has a correlated trajectory, like random walk model \cite{Gamal:2006-1}. In the case without covertness constraint, it is known that several constraints of node trajectory do not severely affect the throughput scaling. For example, the aggregate throughput still scales linearly in $n$ even if the trajectory of each node is restricted by a random line segment \cite{Diggavi:2005}. Similarly, we conjecture that the throughput scaling will not be severely affected by limited correlation of trajectory even in the presence of the covertness constraint. Second, the delay toleration from source to destination is assumed to be sufficiently large. The trade-off between the delay toleration and the aggregate throughput was studied in the absence of covertness constraint \cite{Gamal:2006-1, Gamal:2006-2}. It would an interesting further work to study the effect of delay toleration constraint on the covert communication over the wireless adhoc network.
Finally, we think that proving a nontrivial upper bound without the assumption of equal transmit power would be a good further work. It seems to be challenging since the distances between the senders and the wardens, which are related to the upper bound on the transmit power from the covertness constraint, and the distances between the senders and the receivers, which affect the transmission rate, independently vary over time.
\bibliographystyle{IEEEtran}
|
1,314,259,995,017 | arxiv | \section{Introduction}
The classical Kloosterman sum is given by
\[\begin{aligned}
S\rb{m, n; q} = \sum\limits_{\substack{x, y \in \mathbb{Z}/q\mathbb{Z} \\ xy \equiv 1\pmod{q}}} \operatorname{e}\rb{\frac{mx + ny}{q}},
\end{aligned}\]
where $\operatorname{e}\rb{x} = e^{2\pi i x}$. Kloosterman sums naturally appear in the Fourier expansion of $\operatorname{GL}(2)$ Poincar\'e series
\[\begin{aligned}
P_m\rb{z; \nu} = \sum\limits_{\gamma \in \Gamma_\infty \backslash \operatorname{SL}\rb{2,\mathbb{Z}}} \operatorname{Im}\rb{\gamma z}^\nu \operatorname{e}\rb{m\rb{\gamma z}},
\end{aligned}\]
which play an important role in number theory. In \cite{BFG1988}, Bump, Friedberg and Goldfeld introduced $\operatorname{GL}(r)$ Poincar\'e series for $r\geq 2$, and gave a generalisation of Kloosterman sums to $\operatorname{GL}(3)$. The notion of Kloosterman sums was then generalised to $\operatorname{GL}(r)$ for $r\geq 2$ by Friedberg \cite{Friedberg1987}, and then to arbitrary simply connected Chevalley groups by D\k abrowski \cite{Dabrowski1993}.
By methods of algebraic geometry, Weil \cite{Weil1948} obtained a bound for $\operatorname{GL}(2)$ Kloosterman sums
\[\begin{aligned}
\vb{S\rb{m, n; q}} \ll \tau\rb{q} \rb{m, n, q}^{1/2} q^{1/2},
\end{aligned}\]
where $\tau$ denotes the divisor function. However, it remains a major open problem to give non-trivial bounds for Kloosterman sums in general, and currently only a small set of examples can be treated. Bounds for $\operatorname{GL}(3)$ Kloosterman sums were first obtained by Larsen \cite[Appendix]{BFG1988} and Stevens \cite{Stevens1987}, and were improved by D\k abrowski and Fisher \cite{DF1997}. Bounds for $\operatorname{GL}(4)$ Kloosterman sums were given by Huang \cite[Appendix]{GSW2020}. Friedberg \cite{Friedberg1987} generalised the results to $\operatorname{GL}(r)$ Kloosterman sums attached to certain Weyl elements. On reductive groups, D\k abrowski and Reeder \cite{DR1998} gave the size of Kloosterman sets, establishing a trivial bound for Kloosterman sums on reductive groups.
Other than Poincar\'e series, another application of Kloosterman sums is found in the relative trace formula, which integrates an automorphic kernel over two subgroups with their respective characters. In particular, a prime application for bounds of Kloosterman sums is the analysis of the arithmetic side of the Petersson/Kuznetsov spectral summation formula. A more detailed description of this can be found in \cite{Blomer2019}.
Now we introduce the main results. Let
\[\begin{aligned}
G = \operatorname{Sp}(2r) &= \cbm{ M \in \operatorname{GL}(2r) }{ M^T JM = J}, & J &= \begin{pmatrix} & I_n\\ -I_n\end{pmatrix}
\end{aligned}\]
be the standard symplectic group, with the standard torus and the standard unipotent subgroup given by
\[\begin{aligned}
T &= \cb{\begin{pmatrix} *\\ &*\\ && * \\ &&&\ddots\\ &&&&*\\ &&&&&*\end{pmatrix}} \subseteq G, & U &= \cb{\begin{pmatrix} 1 & \cdots & * & * & \cdots & *\\ & \ddots & \vdots & \vdots & \ddots & \vdots\\ &&1 & * & \cdots & *\\ &&&1\\ &&&\vdots & \ddots\\ &&& * & \cdots & 1\end{pmatrix}} \subseteq G.
\end{aligned}\]
We denote by $N = N_G(T)$ the normaliser of $T$ in $G$. The Weyl group is given by $W := N_G(T)/T$. For any $n\in N$, we can write $n = wt$ for some $w\in W$, $t\in T$. Let $w: N \to W$ be the canonical projection map with respect to this decomposition. For $n\in N$, we also define $U_n := U \cap n^{-1} U^T n$, and $\ol U_n := U \cap n^{-1} U n$. Note that $U_n, \ol U_n$ depend only on the image $w(n)$ of the canonical projection.
Let $p$ be a rational prime. We have a Bruhat decomposition
\[\begin{aligned}
G\rb{\mathbb{Q}_p} = U\rb{\mathbb{Q}_p} N\rb{\mathbb{Q}_p} U\rb{\mathbb{Q}_p}.
\end{aligned}\]
For $n\in N\rb{\mathbb{Q}_p}$, we define
\[\begin{aligned}
C(n) &= U\rb{\mathbb{Q}_p} n U\rb{\mathbb{Q}_p} \cap G\rb{\mathbb{Z}_p},\\
X(n) &= U\rb{\mathbb{Z}_p} \backslash C(n) / U_n\rb{\mathbb{Z}_p},
\end{aligned}\]
and projection maps
\[\begin{aligned}
u: X(n) &\to U\rb{\mathbb{Z}_p} \backslash U\rb{\mathbb{Q}_p},\\
u': X(n) &\to U\rb{\mathbb{Q}_p} / U_n\rb{\mathbb{Z}_p}
\end{aligned}\]
by the relation $x = u(x) n u'(x)$ for $x \in X(n)$.
Let $n\in N\rb{\mathbb{Q}_p}$, $\psi_p$ a character of $U\rb{\mathbb{Q}_p}$ which is trivial on $U\rb{\mathbb{Z}_p}$, and $\psi'_p$ a character of $U_n\rb{\mathbb{Q}_p}$ trivial on $U_n\rb{\mathbb{Z}_p}$, such that $\psi'_p$ is the restriction of some character of $U\rb{\mathbb{Q}_p}$ trivial on $U\rb{\mathbb{Z}_p}$. Then the local Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi_p, \psi'_p} = \sum\limits_{x\in X(n)} \psi_p\rb{u(x)} \psi'_p\rb{u'(x)}.
\end{aligned}\]
If $\psi'_p$ is given as a character of $U\rb{\mathbb{Q}_p}$ which is trivial on $U\rb{\mathbb{Z}_p}$, we write $\operatorname{Kl}_p\rb{n, \psi_p, \psi'_p}$ to mean $\operatorname{Kl}_p\rb{n, \psi_p, \psi'_p|_{U_n\rb{\mathbb{Q}_p}}}$.
To define a global Kloosterman sum, let $n \in N(\mathbb{Q})$, $\psi = \prod\limits_p \psi_p$ a character of $U(\mathbb{A})$ which is trivial on $\prod\limits_p U\rb{\mathbb{Z}_p}$, and $\psi'$ a character of $U_n\rb{\mathbb{A}}$ trivial on $\prod\limits_p U_n\rb{\mathbb{Z}_p}$, such that $\psi'$ is the restriction of some character of $U\rb{\mathbb{A}}$ trivial on $\prod\limits_p U\rb{\mathbb{Z}_p}$. Then the global Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi'} = \prod\limits_p \operatorname{Kl}_p\rb{n, \psi_p, \psi'_p}.
\end{aligned}\]
\begin{rmk}
This definition of Kloosterman sums is different from the symplectic Kloosterman sums introduced by Kitaoka \cite{Kitaoka1984}, which are more relevant for classical $\operatorname{Sp}(4)$ Fourier expansions with respect to the upper right 2-by-2 block, which however is not a full parabolic subgroup. T\'oth \cite{Toth2013} proved some properties and estimates of such Kloosterman sums. The Kloosterman sums introduced here fit into the general framework of Kloosterman sums defined on reductive groups, see e.g. D\k abrowski \cite{Dabrowski1993}.
\end{rmk}
For $G = \operatorname{Sp}\rb{4, \mathbb{Q}_p}$, a set of simple roots of $G$ with respect to the maximal torus $T$ is given by $\Delta = \cb{\alpha, \beta}$, where
\[\begin{aligned}
\alpha\rb{\operatorname{diag}\rb{y_1, y_2, y_1^{-1}, y_2^{-1}}} &= y_1y_2^{-1}, & \beta\rb{\operatorname{diag}\rb{y_1, y_2, y_1^{-1}, y_2^{-1}}} &= y_2^2.
\end{aligned}\]
Then $\Psi^+ = \cb{\alpha, \beta, \alpha+\beta, 2\alpha+\beta}$ is a set of positive roots. We denote by $s_\alpha$ and $s_\beta$ the simple reflections in the hyperplane orthogonal to $\alpha$ and $\beta$ respectively. Then the Weyl group of $G$ with respect to $T$ is given by
\[\begin{aligned}
W = \cb{1, s_\alpha, s_\beta, s_\alpha s_\beta, s_\beta s_\alpha, s_\alpha s_\beta s_\alpha, s_\beta s_\alpha s_\beta, s_\alpha s_\beta s_\alpha s_\beta}.
\end{aligned}\]
We also denote the long Weyl element $s_\alpha s_\beta s_\alpha s_\beta$ by $w_0$. Characters of $U\rb{\mathbb{Q}_p}$ trivial on $U\rb{\mathbb{Z}_p}$ are given by $\psi_{m_1, m_2}$ for $m_1, m_2\in\mathbb{Z}$, where
\[\begin{aligned}
\psi_{m_1, m_2} \begin{pmatrix} 1& x_1&*&*\\&1&*&x_2\\&&1\\&&-x_1&1\end{pmatrix} = \operatorname{e}\rb{m_1x_1+m_2x_2}.
\end{aligned}\]
Let
\[\begin{aligned}
n_{w, r, s} = \operatorname{diag}\rb{p^{-r}, p^{r-s}, p^r, p^{s-r}} w \in N\rb{\mathbb{Q}_p},
\end{aligned}\]
such that $X\rb{n_{w,r,s}} \neq \emptyset$. The exact conditions $r,s$ have to satisfy are given in \Cref{section:Sp4Kloosterman}, but in general we require $r,s\geq 0$. By counting the number of terms in the Kloosterman sum \cite[Theorem 0.3]{DR1998}, we obtain a trivial bound
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{w,r,s}, \psi, \psi'}} \leq p^{r+s}.
\end{aligned}\]
Now we state the main results of the paper, on non-trivial bounds of $\operatorname{Sp}(4)$ Kloosterman sums.
\begin{thm}\label{thm:abKloosterman_bound}
Let $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, r, s}, \psi, \psi'}} \ll \min\cb{p^{2s} \rb{m_1, p^{r-s}}, p^r \rb{m_2, p^s}^{1/2} \rb{n_2, p^s}^{1/2}}.
\end{aligned}\]
\end{thm}
\begin{thm}\label{thm:baKloosterman_bound}
(Larsen \cite[Appendix]{BFG1988}) Let $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{s_\beta s_\alpha, r, s}, \psi, \psi'}} \ll \min\cb{p^{3r} \rb{m_2, p^{s-2r}}, p^s \rb{m_1, n_1, p^r}}.
\end{aligned}\]
\end{thm}
\begin{thm}\label{thm:abaKloosterman_bound}
Let $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta s_\alpha, r,s}, \psi, \psi'}} \ll \begin{cases}
p^{\frac{r}{3} + \frac{2s}{3} + \frac{2}{3}\min\cb{\operatorname{ord}_p(m_1)+s, \operatorname{ord}_p(n_1)+r} + \frac{1}{3}\operatorname{ord}_p(m_2)} & \text{ if } s\leq r,\\
p^{r+\min\cb{\operatorname{ord}_p(m_2), r+\operatorname{ord}_p(n_1)}} + p^{r+\min\cb{\frac{s}{2}+\operatorname{ord}_p(m_1), r-\frac{s}{2}+\operatorname{ord}_p(n_1)}} & \text{ if } r< s < 2r,\\
p^{r+\min\cb{\operatorname{ord}_p(m_2), r+\operatorname{ord}_p(n_1)}}& \text{ if } s=2r. \end{cases}
\end{aligned}\]
\end{thm}
\begin{thm}\label{thm:babKloosterman_bound}
Let $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{s_\beta s_\alpha s_\beta,r,s}, \psi, \psi'}} \ll \begin{cases}
p^{\frac{s}{2} + \frac{r}{2} + \frac{1}{2} \operatorname{ord}_p(m_1) + \frac{1}{2}\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}} & \text{ if } r\leq \frac{s}{2},\\
p^{s-\frac{r}{2}+\frac{1}{2}\operatorname{ord}_p(m_1)+\frac{1}{2}\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}} & \text{ if } \frac{s}{2} < r < s,\\
p^{s+\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_2)}}. & \text{ if } r=s.\end{cases}
\end{aligned}\]
\end{thm}
\begin{thm}\label{thm:w0Kloosterman_bound}
Let $\psi = \psi_{m_1, m_2}, \psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{w_0, r, s}, \psi, \psi'}} &\ll \min\cb{p^{\frac{1}{2}\operatorname{ord}_p(m_1m_2)},p^{ \frac{1}{2}\operatorname{ord}_p(n_1n_2)}} \rb{s+1} p^{\frac{r}{2} + \frac{3s}{4} + \frac{1}{2}\min\cb{r,s}}.
\end{aligned}\]
\end{thm}
\Cref{thm:abKloosterman_bound} is proved in \Cref{section:abbaKloosterman}. The ingredients for the proof include deep results of Katz \cite{Katz2007} for multi-dimensional mixed exponential sums as well as the $p$-adic stationary phase method for higher prime powers. To prove \Crefrange{thm:abaKloosterman_bound}{thm:w0Kloosterman_bound}, we develop a stratification of $\operatorname{Sp}(2r)$ Kloosterman sums in \Cref{section:stratification}, generalising the stratification of $\operatorname{GL}(r)$ Kloosterman sums introduced by Stevens \cite{Stevens1987}. Let
\[\begin{aligned}
\mathcal T := \cbm{\begin{pmatrix} A\\ & cA^{-1} \end{pmatrix} \in \operatorname{GL}\rb{2r, \mathbb{Z}_p}}{A = \operatorname{diag} \rb{a_1, a_2, \cdots, a_r}, a_1, \cdots, a_r, c \in \mathbb{Z}_p^\times}.
\end{aligned}\]
be a set of diagonal matrices. We will show that for $n\in N\rb{\mathbb{Q}_p}$, there is a group action $\mathcal T\times X(n) \to X(n)$ sending $(t, \gamma)$ to $t\gamma s^{-1}$, where $s = n^{-1}tn$. The Kloosterman sum, as a sum over $X(n)$, can then be partitioned into sums over $\mathcal T$-orbits, in \Cref{thm:Stevens4.10}.
In \Cref{section:Sp4Kloosterman}, we give explicit formulations of $\operatorname{Sp}(4)$ Kloosterman sums $\operatorname{Kl}_p\rb{n, \psi, \psi'}$, in terms of Pl\"ucker coordinates given in \cite{Man2020}. Using \Cref{thm:Stevens4.10}, we prove \Crefrange{thm:abaKloosterman_bound}{thm:w0Kloosterman_bound} in \Cref{section:Sp4Kloosterman_bound}.
Let $F: T\rb{\mathbb{R}^+} \to \mathbb{C}$ be a smooth function with rapid decay. Let $\psi, \psi'$ be characters of $U(\mathbb{R})$ trivial on $U(\mathbb{Z})$. For $g = uy\in G/K$, where $u \in U(\mathbb{R})$, $y \in T\rb{\mathbb{R}^+}$, define $\mathcal F_\psi (g) := \psi\rb{\eta} F\rb{y}$. The symplectic Poincar\'e series associated to $F$ is given by
\[\begin{aligned}
P_\psi (g) = \sum\limits_{\gamma\in P_0 \cap \Gamma \backslash \Gamma} \mathcal F_\psi (\gamma g),
\end{aligned}\]
where $\Gamma = \operatorname{Sp}(2r, \mathbb{Z})$, and $P_0$ is the standard minimal parabolic subgroup of $G$. The $\psi'$-th Fourier coefficient of $P_\psi(g)$ is given by
\[\begin{aligned}
P_{\psi, \psi'} (g) = &\int_{U(\mathbb{Z}) \backslash U(\mathbb{R})} P_\psi\rb{ug} \ol{\psi'} (u) du.
\end{aligned}\]
We compute in \Cref{section:sym_Poincare} the Fourier coefficients $P_{\psi, \psi'} (g)$ of the Poincar\'e series $P_\psi(g)$, in terms of auxiliary Kloosterman sums, which are also defined in \Cref{section:sym_Poincare}. The bounds given in \Crefrange{thm:abKloosterman_bound}{thm:w0Kloosterman_bound} also apply to these auxiliary Kloosterman sums, via \Cref{prp:auxKl}.
\section*{Acknowledgement}
The author would like to thank Valentin Blomer for his guidance on the project.
\section{Stratification of symplectic Kloosterman sums} \label{section:stratification}
Consider the set of diagonal matrices
\[\begin{aligned}
\mathcal T := \cbm{\begin{pmatrix} A\\ & cA^{-1} \end{pmatrix} \in \operatorname{GL}\rb{2r, \mathbb{Z}_p}}{A = \operatorname{diag} \rb{a_1, a_2, \cdots, a_r}, a_1, \cdots, a_r, c \in \mathbb{Z}_p^\times}.
\end{aligned}\]
Note that in general elements of $\mathcal T$ are not symplectic.
\begin{lem}\label{lem:sympconj}
Let $u \in U\rb{\mathbb{Q}_p}$, and $t\in \mathcal T$. then $tut^{-1} \in U\rb{\mathbb{Q}_p}$.
\end{lem}
\begin{proof}
Trivial.
\end{proof}
\begin{lem}\label{lem:torusconj}
Let $n\in N\rb{\mathbb{Q}_p}$, and $t \in \mathcal T$. Then $n^{-1} t n \in \mathcal T$.
\end{lem}
\begin{proof}
Write $n = wa$, with $w\in W$, $a\in T\rb{\mathbb{Q}_p}$. Consider $w^{-1} t w$. Suppose
\[\begin{aligned}
t = \operatorname{diag}\rb{a_1, \cdots, a_r, ca_1^{-1}, \cdots, ca_r^{-1}}.
\end{aligned}\]
Then in general $w^{-1} t w$ has the form
\[\begin{aligned}
w^{-1}tw = \operatorname{diag}\rb{\tau_{\sigma(1)} \rb{a_{\sigma(1)}}, \cdots, \tau_{\sigma(r)} \rb{a_{\sigma(r)}}, \tau_{\sigma(1)} \rb{ca_{\sigma(1)}^{-1}}, \cdots, \tau_{\sigma(r)} \rb{ca_{\sigma(r)}^{-1}}},
\end{aligned}\]
where $\sigma$ is a permutation of $\cb{1,\cdots, r}$, and $\tau_i: \cb{a_i, ca_i^{-1}} \to \cb{a_i, ca_i^{-1}}$ are permutations for $i = 1,\cdots, r$. Since $a_i = c\rb{ca_i^{-1}}^{-1}$, we see that $w^{-1}tw$ is of the form
\[\begin{aligned}
w^{-1} tw = \begin{pmatrix} A'\\&c{A'}^{-1}\end{pmatrix}.
\end{aligned}\]
So $w^{-1}tw \in \mathcal T$. Finally, we see that $n^{-1} t n = a^{-1} w^{-1} t w a = w^{-1} t w \in \mathcal T$.
\end{proof}
Let $\gamma = unu' \in C(n)$, and $t \in \mathcal T$. By \Cref{lem:torusconj}, $s := n^{-1}tn \in \mathcal T$. By \Cref{lem:sympconj}, we see that
\[\begin{aligned}
t\gamma s^{-1} = \rb{tut^{-1}} n \rb{su's^{-1}} \in U\rb{\mathbb{Q}_p} n U\rb{\mathbb{Q}_p} \cap G\rb{\mathbb{Z}_p} = C(n).
\end{aligned}\]
As conjugation by $t$ and $s$ preserves $U\rb{\mathbb{Z}_p}$ and $U_n\rb{\mathbb{Z}_p}$, this induces an action on $X(n)$:
\[\begin{aligned}
\mathcal T \times X(n) &\to X(n), & (t, x) &\mapsto t * x.
\end{aligned}\]
For characters $\psi: U\rb{\mathbb{Q}_p}/U\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$, $\psi': U_n\rb{\mathbb{Q}_p}/U_n\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$, decomposition of $X(n)$ into $\mathcal T$-orbits gives a decomposition of Kloosterman sums:
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi} = \sum\limits_{x\in \mathcal T\backslash X(n)} \sum\limits_{y \in \mathcal T * x} \psi\rb{u(y)} \psi' \rb{u'(y)}.
\end{aligned}\]
Characters of $U\rb{\mathbb{Q}_p}/U\rb{\mathbb{Z}_p}$ has the form
\[\begin{aligned}
\psi \begin{pmatrix} 1 & x_1 & \cdots & * & * & \cdots & \cdots & *\\ & 1 & \ddots & \vdots & \vdots &&& \vdots\\ &&\ddots & x_{r-1} & \vdots &&& *\\&&&1&*&\cdots& * &x_r\\ &&&&1\\ &&&&-x_1&1\\&&&& \vdots & \ddots &\ddots\\ &&&& * & \cdots & -x_{r-1} & 1\end{pmatrix} = \prod\limits_{i=1}^r \operatorname{e}\rb{n_i x_i}, \quad n_i\in\mathbb{Z}.
\end{aligned}\]
We denote this character by $\psi = \psi_{n_1, \cdots, n_r}$. Let $\alpha_i = e_i - e_{i+1}, 1 \leq i \leq r-1$, and $\alpha_r = 2e_r$ be the simple roots of $T$ in $G$. Denote $\Delta = \cb{\alpha_1, \cdots, \alpha_r}$, and $\Delta_w = \cbm{\alpha \in \Delta}{w(\alpha)<0}$. For $x\in X$, suppose
{\small
\[\begin{aligned}
u(x) &= \begin{pmatrix} 1 & x_1 & \cdots & * & * & \cdots & \cdots & *\\ & 1 & \ddots & \vdots & \vdots &&& \vdots\\ &&\ddots & x_{r-1} & \vdots &&& *\\&&&1&*&\cdots& * &x_r\\ &&&&1\\ &&&&-x_1&1\\&&&& \vdots & \ddots &\ddots\\ &&&& * & \cdots & -x_{r-1} & 1\end{pmatrix}, &
u'(x) &= \begin{pmatrix} 1 & x'_1 & \cdots & * & * & \cdots & \cdots & *\\ & 1 & \ddots & \vdots & \vdots &&& \vdots\\ &&\ddots & x'_{r_1} & \vdots &&& *\\&&&1&*&\cdots& * &x'_r\\ &&&&1\\ &&&&-x'_1&1\\&&&& \vdots & \ddots &\ddots\\ &&&& * & \cdots & -x'_{r-1} & 1\end{pmatrix}.
\end{aligned}\]}Note that $x'_i = 0$ unless $\alpha_i \in \Delta_w$. For $x = u(x) n u'(x)$, define projections
\[\begin{aligned}
\kappa_i (x) &= x_i, & \kappa'_i (x) &= x'_i, & 1\leq i\leq r.
\end{aligned}\]
For $t = \operatorname{diag}\rb{a_1, \cdots, a_r, ca_1^{-1}, \cdots, ca_r^{-1}} \in \mathcal T$, we see that
\[\begin{aligned}
tu(x)t^{-1} = \begin{pmatrix} 1 & a_1a_2^{-1}x_1 & \cdots & * & * & \cdots & \cdots & *\\ & 1 & \ddots & \vdots & \vdots &&& \vdots\\ &&\ddots & a_{r-1}a_r^{-1}x_{r_1} & \vdots &&& *\\&&&1&*&\cdots& * & c^{-1}a_r^2x_r\\ &&&&1\\ &&&&-a_1a_2^{-1}x_1&1\\&&&& \vdots & \ddots &\ddots\\ &&&& * & \cdots & -a_{r-1}a_r^{-1}x_{r-1} & 1\end{pmatrix}.
\end{aligned}\]
Hence
\[\begin{aligned}
\kappa_i \rb{t*x} &= a_ia_{i+1}^{-1} \kappa_i(x), & &1\leq i\leq r-1,\\
\kappa_r \rb{t*x} &= c^{-1}a_r^2 \kappa_r(x),
\end{aligned}\]
and
\[\begin{aligned}
\kappa'_i \rb{t*x} &= \tau_{\sigma(i)}\rb{a_{\sigma(i)}} \tau_{\sigma(i+1)}\rb{a_{\sigma(i+1)}}^{-1} \kappa'_i(x), & &1\leq i\leq r-1,\\
\kappa'_r \rb{t*x} &= \tau_{\sigma(r)}\rb{a_{\sigma(r)}} \tau_{\sigma(r)}\rb{ca_{\sigma(r)}^{-1}}^{-1} \kappa'_r(x).
\end{aligned}\]
For $\ell\in\mathbb{N}$, we define
\[\begin{aligned}
A_w(\ell) &:= \rb{\mathbb{Z}/p^\ell \mathbb{Z}}^{\Delta} \times \rb{\mathbb{Z}/p^\ell \mathbb{Z}}^{\Delta_w},\\
V_w(\ell) &:= \cbm{\lambda \times \lambda' \in A_w(\ell)}{\begin{array}{l} \lambda_i, \lambda'_j \in \rb{\mathbb{Z}/p^\ell \mathbb{Z}}^\times \text{, such that } \exists t \in \mathcal T \text{ with }\\ \kappa_i(t*x) = \lambda_i \kappa_i(x), \kappa'_j(t*x) = \lambda'_j \kappa'_j(x)\\ \text{ for } x\in X(n), \; 1\leq i, j\leq r, \; \alpha_j\in \Delta_w \end{array}}.
\end{aligned}\]
Note that $\vb{V_w(\ell)} = \rb{p^\ell \rb{1-p^{-1}}}^r$. For a character $\theta: A_w(\ell) \to \mathbb{C}^\times$, we define
\[\begin{aligned}
S_w\rb{\theta; \ell} = \sum\limits_{v \in V_w(\ell)} \theta(v).
\end{aligned}\]
\begin{thm}\label{thm:Stevens4.10}
Let $n\in N\rb{\mathbb{Q}_p}$, and suppose $\ell$ is large enough such that the matrix entries of $u(x), u'(x)$ lie in $p^{-\ell}\mathbb{Z}_p / \mathbb{Z}_p$ for every $x\in X(n)$. Let $\psi = \psi_{n_1, \cdots, n_r}: U\rb{\mathbb{Q}_p}/U\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$ and $\psi' = \psi_{n'_1, \cdots, n'_r}|_{U_n\rb{\mathbb{Q}_p}}: U_n\rb{\mathbb{Q}_p}/ U_n\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$ be characters. Define the character $\theta_x: A_w(\ell) \to \mathbb{C}^\times$ by
\[\begin{aligned}
\theta_x\rb{\lambda \times \lambda'} = \prod\limits_{i=1}^r \operatorname{e}\rb{\lambda_i n_i \kappa_i(x)} \prod\limits_{\substack{i=1\\ w(\alpha_i)<0}}^r \operatorname{e}\rb{\lambda'_i n'_i \kappa'_i(x)}.
\end{aligned}\]
Then
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi'} = \rb{p^\ell\rb{1-p^{-1}}}^{-r} \sum\limits_{x\in \mathcal T\backslash X(n)} \mathfrak N(x) S_w\rb{\theta_x; \ell},
\end{aligned}\]
where $\mathfrak N(x) = \vb{\mathcal T*x}$ is the size of $\mathcal T$-orbit of $x\in X(n)$.
\end{thm}
\begin{proof}
Rewrite the Kloosterman sum
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi'} = &\sum\limits_{x\in \mathcal T\backslash X(n)} \sum\limits_{y \in \mathcal T * x} \psi\rb{u(y)} \psi' \rb{u'(y)}\\
= & \sum\limits_{x\in \mathcal T\backslash X(n)} \sum\limits_{y \in \mathcal T * x} \prod\limits_{i=1}^r \operatorname{e}\rb{n_i \kappa_i(y)} \prod\limits_{\substack{i=1\\ w(\alpha_i)<0}}^r \operatorname{e}\rb{n'_i \kappa'_i(y)}\\
= &\vb{V_w(\ell)}^{-1} \sum\limits_{x\in \mathcal T\backslash X(n)} \sum\limits_{y \in \mathcal T * x} \sum\limits_{\lambda\times\lambda' \in V_w(\ell)} \prod\limits_{i=1}^r \operatorname{e}\rb{\lambda_i n_i \kappa_i(y)} \prod\limits_{\substack{i=1\\ w(\alpha_i)<0}}^r \operatorname{e}\rb{\lambda'_i n'_i \kappa'_i(y)}\\
= &\vb{V_w(\ell)}^{-1} \sum\limits_{x\in \mathcal T\backslash X(n)} \mathfrak N(x) \sum\limits_{\lambda\times\lambda' \in V_w(\ell)} \prod\limits_{i=1}^r \operatorname{e}\rb{\lambda_i n_i \kappa_i(y)} \prod\limits_{\substack{i=1\\ w(\alpha_i)<0}}^r \operatorname{e}\rb{\lambda'_i n'_i \kappa'_i(y)}\\
= &\rb{p^\ell\rb{1-p^{-1}}}^{-r} \sum\limits_{x\in \mathcal T\backslash X(n)} \mathfrak N(x) S_w\rb{\theta_x; \ell}.
\end{aligned}\]
\end{proof}
\section{$\operatorname{Sp}(4)$ Kloosterman sums} \label{section:Sp4Kloosterman}
Now we give explicit formulations for Kloosterman sums for $G = \operatorname{Sp}\rb{4, \mathbb{Q}_p}$, classified by the image $w(n)$ of the projection onto $W$. Fix $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$, where
\[\begin{aligned}
\psi_{m_1, m_2} \begin{pmatrix} 1& x_1&*&*\\&1&*&x_2\\&&1\\&&-x_1&1\end{pmatrix} = \operatorname{e}\rb{m_1x_1+m_2x_2}.
\end{aligned}\]
\begin{prp}\label{prp:Stevens3.2}
\cite[Theorem 3.2]{Stevens1987} Let $n\in N\rb{\mathbb{Q}_p}$, and $\psi: U\rb{\mathbb{Q}_p}/U\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$, $\psi': U_n\rb{\mathbb{Q}_p}/ U_n\rb{\mathbb{Z}_p}\to \mathbb{C}^\times$ be characters. If $t\in T\rb{\mathbb{Z}_p^\times}$, then
\[\begin{aligned}
\operatorname{Kl}_p\rb{tn, \psi, \psi'} &= \operatorname{Kl}_p\rb{n, \psi_t, \psi'},\\
\operatorname{Kl}_p\rb{nt^{-1}, \psi, \psi'} &= \operatorname{Kl}_p\rb{n, \psi, \psi'_t},
\end{aligned}\]
where $\psi_t (u) = \psi(tut^{-1})$.
\end{prp}
By \Cref{prp:Stevens3.2}, it suffices to consider Kloosterman sums $\operatorname{Kl}_p\rb{n, \psi, \psi'}$ for $n$ such that entries of $n$ are powers of $p$, and $X(n)$ is nonempty. We shall express the Kloosterman sums using Pl\"ucker coordinates, which were introduced in \cite{BFH1990}, and adopted by the author \cite{Man2020} to describe double coset representatives $R_w = P_0 \cap \Gamma \backslash \Gamma \cap G_w / \Gamma_w$. See \Cref{section:Sp4Poincare} for an explicit definition for $R_w$.
\begin{enumerate}[label=(\roman*)]
\item $w = \operatorname{id}$. We have $n = n_{\operatorname{id}} := I_4$, and the Kloosterman sum is trivial:
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{\operatorname{id}}, \psi, \psi'} = 1.
\end{aligned}\]
\item $w = s_\alpha$. We have
\[\begin{aligned}
n = n_{s_\alpha, r} := \begin{pmatrix} & p^{-r}\\ -p^r\\&&& p^r\\&&-p^{-r}\end{pmatrix}
\end{aligned}\]
for $r\geq 0$, and the corresponding Kloosterman sum is actually a $\operatorname{GL}(2)$ Kloosterman sum:
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha, r}, \psi, \psi'} = S\rb{m_1, n_1; p^r}.
\end{aligned}\]
\item $w = s_\beta$. We have
\[\begin{aligned}
n = n_{s_\beta, s} := \begin{pmatrix} 1\\&&&p^{-s}\\&&1\\&-p^s\end{pmatrix}
\end{aligned}\]
for $s\geq 0$, and the corresponding Kloosterman sum is actually a $\operatorname{GL}(2)$ Kloosterman sum:
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\beta, s}, \psi, \psi'} = S\rb{m_2, n_2; p^s}.
\end{aligned}\]
\item $w = s_\alpha s_\beta$. We have
\[\begin{aligned}
n = n_{s_\alpha s_\beta, r, s} := \begin{pmatrix} &&&-p^{-r}\\ p^{r-s}\\ &p^r\\ &&p^{s-r}\end{pmatrix},
\end{aligned}\]
where $0\leq s\leq r$. The Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p \rb{n_{s_\alpha s_\beta, r, s}, \psi, \psi'} = \sum\limits_{\substack{v_4 \ppmod{p^s}\\ (v_4, p) = 1}} \sum\limits_{\substack{v_3 \ppmod{p^r}\\ (v_3, p^{r-s}) = 1}} \operatorname{e}\rb{\frac{m_1\ol{v_3}}{p^{r-s}}} \operatorname{e}\rb{\frac{m_2 \ol{v_4} v_3^2 + n_2 v_4}{p^s}}.
\end{aligned}\]
\item $w = s_\beta s_\alpha$. We have
\[\begin{aligned}
n = n_{s_\beta s_\alpha, r, s} := \begin{pmatrix} & p^{-r}\\ && p^{r-s}\\ &&& p^r\\ -p^{s-r}\end{pmatrix},
\end{aligned}\]
where $2r\leq s$. The Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p \rb{n_{s_\beta s_\alpha, r, s}, \psi, \psi'} = \sum\limits_{\substack{v_{24} \ppmod{p^r}\\ (v_{24}, p) = 1}} \sum\limits_{\substack{v_{34}\ppmod{p^s}\\ (v_{34}, p^{s-2r}) = 1}} \operatorname{e}\rb{\frac{m_1 \ol{v_{24}} v_{34} + n_1 v_{24}}{p^r}} \operatorname{e}\rb{\frac{m_2 \ol{v_{34}}}{p^{s-2r}}}.
\end{aligned}\]
\begin{rmk}
This Kloosterman sum can also be considered as a $\operatorname{GL}(3)$ Kloosterman sum. Precisely, following the notation in \cite[(4.3)]{BFG1988}, we have
\[\begin{aligned}
\operatorname{Kl}_p \rb{n_{s_\beta s_\alpha, r, s}, \psi, \psi'} = p^r S\rb{n_1, m_1, m_2; p^r, p^{s-r}}.
\end{aligned}\]
A non-trivial bound for $\operatorname{Kl}_p \rb{n_{s_\beta s_\alpha, r, s}, \psi, \psi'}$ then follows from Larsen \cite[Appendix]{BFG1988}. This gives a proof to \Cref{thm:baKloosterman_bound}.
\end{rmk}
\item $w = s_\alpha s_\beta s_\alpha$. We have
\[\begin{aligned}
n = n_{s_\alpha s_\beta s_\alpha, r, s} := \begin{pmatrix} &&-p^{-r}\\&p^{r-s}\\mathbf{p}^r\\&&&p^{s-r}\end{pmatrix},
\end{aligned}\]
where $s\leq 2r$. The Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta s_\alpha, r, s}, \psi, \psi'} = &\sum\limits_{\substack{v_2, v_3, v_4 \ppmod{p^r}\\ v_2 = p^{r-a} v'_2, \; (v'_2, p)=1, \; s-r\leq a \leq s/2\\ (v_3, v_4, p^{r-a}) = 1\\ \rb{p^{r-a}, p^a v_3 + v'_2 v_4} = p^{r+a-s}}} \operatorname{e}\rb{\frac{m_1\hat v_2 + n_1v_2}{p^r}} \operatorname{e}\rb{\frac{m_2 u}{p^s}},
\end{aligned}\]
where $\hat v_2$ is chosen modulo $p^r$ such that
\begin{align}\label{eq:abaKloosterman_v2hat}
\hat v_2 v_3 &\equiv -v'_2 p^{s-a} \pmod{p^r}, & \hat v_2 v_4 \equiv p^s \pmod{p^r},
\end{align}
and
\begin{align}\label{eq:abaKloosterman_u}
u \equiv \begin{cases} -\ol{v'_2}^2 v_3 p^{2a+r-s} + \ol{V' v'_2} v_3^2 p^{2a} + \ol{v'_2} v_4 p^{a+r-s} \pmod{p^s} & \text{if } a<\frac{s}{2},\\
-\ol{v'_2}^2 v_3 p^{2a+r-s} + \ol{v'_2} v_4p^{a+r-s} \pmod{p^s} & \text{if } a=\frac{s}{2},\end{cases}
\end{align}
where $V' = p^{s-r-a} \rb{p^a v_3 + v'_2 v_4}$.
\item
$w = s_\beta s_\alpha s_\beta$. We have
\[\begin{aligned}
n = n_{s_\beta s_\alpha s_\beta, r, s} := \begin{pmatrix} &&& -p^{-r}\\ &&p^{r-s}\\ &p^r\\ -p^{s-r}\end{pmatrix},
\end{aligned}\]
where $r\leq s$. The Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\beta s_\alpha s_\beta, r, s}, \psi, \psi'} = \sum\limits_{\substack{v_{13}, v_{14}, v_{23} \ppmod{p^s}\\ \rb{p^s, v_{13}, v_{14}} = p^{s-r}\\ \rb{p^s, v_{14}} \mid v_{13}^2\\ \rb{p^{s-r}, v_{23}, v_{34}} = 1}} \operatorname{e}\rb{\frac{m_1 u}{p^r}} \operatorname{e}\rb{\frac{m_2 \hat v_{14} + n_2 v_{14}}{p^s}},
\end{aligned}\]
where $v_{34} = -p^{-s} \rb{v_{13}^2 + v_{14}v_{23}}$, $u$ is chosen modulo $p^r$ such that
\begin{align}\label{eq:babKloosterman_u}
u v_{13} p^{r-s} &\equiv v_{23} \pmod{p^r}, & u v_{14} p^{r-s} &\equiv - v_{13} \pmod{p^r},
\end{align}
and $\hat v_{14}$ is chosen modulo $p^s$ such that
\begin{align}\label{eq:babKloosterman_v14hat}
\hat v_{14} v_{23} &\equiv -p^{2r} \pmod{p^s}, & \hat v_{14} v_{34} & \equiv v_{14} p^{2r-s} \pmod{p^s}.
\end{align}
\item $w= w_0$. We have
\[\begin{aligned}
n = n_{w_0, r, s} := \begin{pmatrix} && -p^{-r}\\ &&&-p^{r-s}\\ p^r\\ &p^{s-r}\end{pmatrix}.
\end{aligned}\]
The Kloosterman sum is given by
\[\begin{aligned}
\operatorname{Kl}_p \rb{n_{w_0, r, s}, \psi, \psi'} = \sum\limits_{\substack{v_2, v_3, v_4\ppmod{p^r}\\ v_{13}, v_{14}\ppmod{p^s}\\ v_{13}p^r + v_2v_{14} - v_4p^s = 0\\ (p^r, v_2, v_3, v_4) = 1\\ (p^s, v_{13}, v_{14}, v_{23}, v_{34}) = 1}} \operatorname{e}\rb{\frac{m_1\hat v_2 + n_1 v_2}{p^r}} \operatorname{e}\rb{\frac{m_2\hat v_{14} + n_2 v_{14}}{p^s}},
\end{aligned}\]
where $\hat v_2$ is chosen modulo $p^r$ such that
\begin{align}\label{v2hatcong}
\hat v_2 v_2 \equiv p^s \pmod{p^r}, \quad \hat v_2 v_3 \equiv v_{13} \pmod{p^r}, \quad \hat v_2 v_4 \equiv v_{14} \pmod{p^r};
\end{align}
and $\hat v_{14}$ chosen modulo $p^s$ such that
\begin{align}
\begin{aligned}
\hat v_{14}v_{13} &\equiv -v_2p^r \pmod{p^s}, & \hat v_{14}v_{14} &\equiv p^{2r} \pmod{p^s},\\
\hat v_{14}v_{23} &\equiv -v_2^2 \pmod{p^s}, & \hat v_{14} v_{34} &\equiv v_3p^r+v_2v_4\pmod{p^s}.
\end{aligned}
\end{align}
\end{enumerate}
\subsection{Properties of $\operatorname{Sp}(4)$ Kloosterman sums} \label{section:Sp4Kloosterman_prop}
\begin{prp}\label{prp:w0Kloosterman_swap}
Let $n\in N\rb{\mathbb{Q}_p}$, such that $w(n) = w_0$ is the long Weyl element. Let $\psi, \psi': U\rb{\mathbb{Q}_p} / U\rb{\mathbb{Z}_p} \to \mathbb{C}^\times$ be characters. Then
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi'} = \operatorname{Kl}_p\rb{n, \psi', \psi}.
\end{aligned}\]
\end{prp}
\begin{proof}
By \Cref{prp:Stevens3.2}, it suffices to consider the case where
\[\begin{aligned}
n = n_{s_\alpha s_\beta s_\alpha s_\beta} = \begin{pmatrix} && -p^{-r}\\ &&&-p^{r-s}\\ p^r\\ &p^{s-r}\end{pmatrix}.
\end{aligned}\]
Let $x = u n u' \in X(n)$. Write
\[\begin{aligned}
u &= \begin{pmatrix} 1 & \alpha_1 & \alpha_2 & \alpha_3\\ & 1 & \alpha_4 & \alpha_5\\ &&1\\ &&-\alpha_1 & 1\end{pmatrix} \in U\rb{\mathbb{Z}_p} \backslash U\rb{\mathbb{Q}_p}, & u' &= \begin{pmatrix} 1 & \beta_1 & \beta_2 & \beta_3\\ &1 & \beta_4 & \beta_5\\ &&1\\&&-\beta_1 & 1\end{pmatrix} \in U\rb{\mathbb{Q}_p} / U\rb{\mathbb{Z}_p}.
\end{aligned}\]
Then
\[\begin{aligned}
{\small
\gamma = \begin{pmatrix} \alpha_2 p^r & \alpha_2 \beta_1 p^r + \alpha_3 p^{s-r} & \alpha_1 \beta_1 p^{r-s} + \alpha_2\beta_2 p^r + \alpha_3 \beta_4 p^{s-r} - p^{-r} & \alpha_2 \beta_3 p^r + \alpha_3 \beta_5 p^{s-r} - \alpha_1 p^{r-s}\\
\alpha_4 p^r & \alpha_4 \beta_1 p^r + \alpha_5 p^{s-r} & \alpha_4 \beta_2 p^r + \alpha_5 \beta_4 p^{s-r} + \beta_1 p^{r-s} & \alpha_4 \beta_3 p^r + \alpha_5 \beta_5 p^{s-r} - p^{r-s}\\
p^r & \beta_1 p^r & \beta_2 p^r & \beta_3 p^r\\
-\alpha_1 p^r & -\alpha_1 \beta_1 p^r + p^{s-r} & -\alpha_1 \beta_2 p^r + \beta_4 p^{s-r} & -\alpha_1 \beta_3 p^r + \beta_5 p^{s-r}\end{pmatrix} \in G\rb{\mathbb{Z}_p}.
}
\end{aligned}\]
Now let
\[\begin{aligned}
\tilde u &= \begin{pmatrix} 1 & \beta_1 & \beta_2 & -\beta_4\\ & 1 &-\beta_3 & \beta_5\\ &&1\\&&-\beta_1 & 1\end{pmatrix}, & \tilde u' &= \begin{pmatrix} 1 & \alpha_1 & \alpha_2 & -\alpha_4\\ &1 & -\alpha_3 & \alpha_5\\ &&1\\&&-\alpha_1 & 1\end{pmatrix}.
\end{aligned}\]
Then we see that
\[\begin{aligned}
\tilde x = &\tilde u n \tilde u'\\
= &{\small \begin{pmatrix} \beta_2 p^r & \alpha_1\beta_2 p^r - \beta_4 p^{s-r} & \alpha_1 \beta_1 p^{r-s} + \alpha_2 \beta_2 p^r + \alpha_3 \beta_4 p^{s-r} - p^{-r} & -\alpha_4 \beta_2 p^r - \alpha_5 \beta_4 p^{s-r} - \beta_1 p^{r-s}\\
-\beta_3 p^r & -\alpha_1 \beta_3 p^r + \beta_5 p^{s-r} & -\alpha_2 \beta_3 p^r - \alpha_3 \beta_5 p^{s-r} + \alpha_1 p^{r-s} & \alpha_4 \beta_3 p^r + \alpha_5 \beta_5 p^{s-r} - p^{r-s}\\
p^r & \alpha_1 p^r & \alpha_2 p^r & -\alpha_4 p^r\\
-\beta_1 p^r & -\alpha_1 \beta_1 p^r + p^{s-r} & -\alpha_2 \beta_1 p^r - \alpha_3 p^{s-r} & \alpha_4 \beta_1 p^r + \alpha_5 p^{s-r}\end{pmatrix} \in G\rb{\mathbb{Z}_p}}.
\end{aligned}\]
Therefore
\[\begin{aligned}
\operatorname{Kl}_p\rb{n, \psi, \psi'} = &\sum\limits_{x\in X(n)} \psi\rb{u(x)} \psi'\rb{u'(x)} = \sum\limits_{x\in X(n)} \psi\rb{u'(\tilde x)} \psi'\rb{u(\tilde x)}\\
= &\sum\limits_{x\in X(n)} \psi'\rb{u(x)} \psi\rb{u'(x)} = \operatorname{Kl}_p\rb{n, \psi', \psi}.
\end{aligned}\]
\end{proof}
We also give a few reduction formulae for Kloosterman sums, which are straightforward to prove.
\begin{prp}
Let $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. Then
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{w_0, r, 0}, \psi, \psi'} &= S\rb{m_1, n_1; p^r}, & \operatorname{Kl}_p\rb{n_{w_0, 0, s}, \psi, \psi'} &= S\rb{m_2, n_2; p^s},\\
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta s_\alpha, r, 0}, \psi, \psi'} &= c_{p^r}\rb{m_1}, & \operatorname{Kl}_p\rb{n_{s_\beta s_\alpha s_\beta, 0, s}, \psi, \psi'} &= c_{p^s}\rb{m_2},\\
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, r, 0}, \psi, \psi'} &= c_{p^r}\rb{m_1}, & \operatorname{Kl}_p\rb{n_{s_\beta s_\alpha, 0, s}, \psi, \psi'} &= c_{p^s}\rb{m_2}.
\end{aligned}\]
\end{prp}
\section{Bounds for $\operatorname{Sp}(4)$ Kloosterman sums} \label{section:Sp4Kloosterman_bound}
We establish in this section non-trivial bounds for $\operatorname{Kl}_p\rb{n_{w,r,s}, \psi, \psi'}$, that is, prove \Crefrange{thm:abKloosterman_bound}{thm:w0Kloosterman_bound}.
Firstly, $\operatorname{Kl}_p\rb{n_{s_\alpha, r}, \psi, \psi'}$ and $\operatorname{Kl}_p\rb{n_{s_\beta, s}, \psi, \psi'}$ are just $\operatorname{GL}(2)$ Kloosterman sums. A well-known bound for $\operatorname{GL}(2)$ Kloosterman sums is given by \cite{Smith1980}
\begin{align}\label{eq:Kloosterman_GL2bound}
\vb{S(\mu, \nu; p^k)} \leq 2 p^{k/2} (\vb{\mu}_p^{-1}, \vb{\nu}_p^{-1}, p^k)^{1/2}.
\end{align}
So
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n_{s_\alpha, r}, \psi, \psi'}} \ll_{r, \psi, \psi'} p^{r/2}, \text{ and } \vb{\operatorname{Kl}_p\rb{n_{s_\beta, s}, \psi, \psi'}} \ll_{s, \psi, \psi'} p^{s/2}.
\end{aligned}\]
\subsection{Bounds for $\operatorname{Sp}(4)$ Kloosterman sums attached to $w = s_\alpha s_\beta$ and $s_\beta s_\alpha$} \label{section:abbaKloosterman}
We prove non-trivial bounds for Kloosterman sums $\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, r, s}, \psi, \psi'}$ and $\operatorname{Kl}_p\rb{n_{s_\beta s_\alpha, r, s}, \psi, \psi'}$.
\begin{proof}[Proof of \Cref{thm:abKloosterman_bound}]
Without loss of generality, we assume $\operatorname{ord}_p(m_1) \leq r-s$, and $\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2) \leq s$. Observe that
\[\begin{aligned}
\operatorname{Kl}\rb{n_{s_\alpha s_\beta, r,s}, \psi_{m_1, m_2}, \psi_{n_1, n_2}} = p^{k+2l} \operatorname{Kl}\rb{n_{s_\alpha s_\beta, r-k-l, s-l} \psi_{m_1 p^{-k}, m_2 p^{-l}}, \psi_{n_1, n_2 p^{-l}}}
\end{aligned}\]
whenever $p^k \mid \rb{m_1, p^{r-s}}$ and $p^l \mid \rb{m_2, n_2, p^s}$. So we may assume $s=0$, $r=s$, or $p\nmid m_1\rb{m_2, n_2}$.
If $s=0$, then
\[\begin{aligned}
\operatorname{Kl}\rb{n_{s_\alpha s_\beta, r,0}, \psi, \psi'} = \sum\limits_{\substack{v_3\ppmod{p^r}\\ (v_3, p^r) = 1}} e\rb{\frac{m_1\ol{v_3}}{p^r}} \leq p^{\operatorname{ord}_p(m_1)}.
\end{aligned}\]
If $r=s$, then
\[\begin{aligned}
\operatorname{Kl}\rb{n_{s_\alpha s_\beta, r,0}, \psi, \psi'} = \sum\limits_{\substack{v_4\ppmod{p^r}\\ (v_4, p) = 1}} \sum\limits_{v_3\ppmod{p^r}} e\rb{\frac{m_2\ol{v_4}v_3^2 + n_2v_4}{p^r}} \leq p^{r+\frac{\operatorname{ord}_p(m_2)}{2} + \frac{\operatorname{ord}_p(n_2)}{2}}
\end{aligned}\]
is just a summation of quadratic Gauss sums, and is easily evaluated.
Now suppose $p \nmid m_1 \rb{m_2, n_2}$. If $p \mid m_2$ and $s>1$, then
\[\begin{aligned}
\operatorname{Kl}\rb{n_{s_\alpha s_\beta, r,s}, \psi, \psi'} = &\sum\limits_{\substack{v_4\ppmod{p^{s-1}}\\ (v_4, p) = 1}} \sum\limits_{\substack{v_3\ppmod{p^r}\\ (v_3, p^{r-s}) = 1}} \sum\limits_{k=0}^{p-1} e\rb{\frac{m_1\ol{v_3}}{p^{r-s}}} e\rb{\frac{m_2 \ol{v_4} v_3^2 + n_2 \rb{v_4+ k p^{s-1}}}{p^s}}\\
= &p \sum\limits_{k=0}^{p-1} e\rb{\frac{n_2k}{p}} \operatorname{Kl}\rb{n_{s_\alpha s_\beta, r-1, s-1}, \psi_{m_1, m_2/p}, \psi'} = 0.
\end{aligned}\]
If $p \mid m_2$ and $s=1$, the same argument shows that the sum is either 0 or $p$. And similarly, if $p \mid n_2$, the sum is also either 0 or $p$. So we may assume $p\nmid m_1m_2n_2$. When $p$ is odd, it follows that the sum is zero unless $r=2s$. When $s=1$, we have
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, 2,1}, \psi, \psi'} = \sum\limits_{\substack{v_4\ppmod{p}\\ (v_4, p) = 1}} \sum\limits_{\substack{v_3\ppmod{p}\\ (v_3, p) = 1}} e\rb{\frac{m_1\ol{v_3} + m_2 \ol{v_4} v_3^2 + n_2 v_4}{p}}.
\end{aligned}\]
The sum can be converted into a mixed character sum, in the style introduced in \cite{Schmidt1976}. Rewrite the sum
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, 2,1}, \psi, \psi'} = \sum\limits_{a \in\mathbb{F}_p} \sum\limits_{y \in \mathbb{F}_p^\times} e\rb{\frac{a}{p}} Z(a,y),
\end{aligned}\]
where $Z(a,y) = \#\cbm{x \in \mathbb{F}_p^\times}{m_1\ol{y} + m_2 \ol{x} y^2 + n_2 x = a}$. We solve $x = \frac{(ay-m_1)\pm \sqrt{(ay-m_1)^2 - 4m_2n_2y^4}}{2n_2y}$, which may or may not lie in $\mathbb{F}_p^\times$ depending on whether $(ay-m_1)^2 - 4m_2n_2y^4$ is a square in $\mathbb{F}_p$. It follows that $Z(a,y) = 1 + \chi\rb{(ay-m_1)^2 - 4m_2n_2y^4}$, where $\chi$ is the quadratic character of $\mathbb{F}_p$. So
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, 2,1}, \psi, \psi'} = &p \sum\limits_{a \in\mathbb{F}_p} \sum\limits_{y \in \mathbb{F}_p^\times} e\rb{\frac{a}{p}} \rb{1+\chi\rb{(ay-m_1)^2 - 4m_2n_2y^4}}\\
= &p \sum\limits_{a \in\mathbb{F}_p} e\rb{\frac{a}{p}} + \sum\limits_{a \in\mathbb{F}_p} \sum\limits_{y \in \mathbb{F}_p^\times} e\rb{\frac{a}{p}} \chi\rb{(ay-m_1)^2 - 4m_2n_2y^4}\\
= &p \sum\limits_{a \in\mathbb{F}_p} \sum\limits_{y \in \mathbb{F}_p} e\rb{\frac{a}{p}} \chi\rb{(ay-m_1)^2 - 4m_2n_2y^4} - \sum\limits_{a \in \mathbb{F}_p} e\rb{\frac{a}{p}} \chi\rb{m_1^2}\\
= &p \sum\limits_{a \in\mathbb{F}_p} \sum\limits_{y \in \mathbb{F}_p} e\rb{\frac{a}{p}} \chi\rb{(ay-m_1)^2 - 4m_2n_2y^4}.
\end{aligned}\]
This exponential sum is estimated by Katz \cite[Theorem 1.1]{Katz2007} to be
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, 2,1}, \psi, \psi'} \ll p^2.
\end{aligned}\]
If $s>1$, we apply the stationary phase method. Let $f(x,y) = \frac{m_1}{x} + \frac{m_2 x^2}{y} + n_2 y$. Consider the sum
\[\begin{aligned}
S = \sum\limits_{x, y\in \rb{\mathbb{Z}/p^s\mathbb{Z}}^\times} \operatorname{e}\rb{\frac{f(x,y)}{p^s}} = p^{-s} \operatorname{Kl}_p \rb{n_{s_\alpha s_\beta, 2s, s}, \psi, \psi'}.
\end{aligned}\]
Let $j\geq 1$ be such that $2j\leq s$. Define
\begin{align}
D\rb{\mathbb{Z}/p^j\mathbb{Z}} &= \cbm{(x,y) \in \rb{\mathbb{Z}/p^j\mathbb{Z}}^\times \times \rb{\mathbb{Z}/p^j\mathbb{Z}}^\times}{\nabla f(x,y) \equiv 0\pmod{p^j}}\nonumber\\
&= \cbm{\rb{x,y} \in \rb{\mathbb{Z}/p^j\mathbb{Z}}^\times \times \rb{\mathbb{Z}/p^j\mathbb{Z}}^\times}{\begin{array}{l} 2m_2 x^3 \equiv m_1y \pmod{p^j},\\
m_2 x^2 \equiv n_2 y^2 \pmod{p^j}\end{array}}. \label{eq:SP_D_cong_2slr}
\end{align}
The proof then follows from a theorem of D\k abrowski-Fisher.
\begin{thm}\label{thm:DF1.8a}
\cite[Theorem 1.8(a)]{DF1997} Let $f$ and $S$ be defined as above. Let $H_{x,y}$ be the Hessian matrix of $f$ at $(x,y)$, and $t$ be the maximum value of $2-\operatorname{rank}_{\mathbb{F}_p} H_{x,y}$ for $(x,y)$ in $D\rb{\mathbb{Z}/p^j\mathbb{Z}}$. Then $\vb{S} \leq \vb{D\rb{\mathbb{Z}/p^j\mathbb{Z}}} p^{s+t/2}$.
\end{thm}
It is straightforward to check that $\vb{D\rb{\mathbb{Z}/p^j\mathbb{Z}}} \leq 4$, and $H_{x,y}$ is invertible over $\mathbb{F}_p$ for all $(x,y)\in D\rb{\mathbb{Z}/p^j\mathbb{Z}}$, so $\operatorname{rank}_{\mathbb{F}_p} H_{x,y} = 2$. So we deduce from \Cref{thm:DF1.8a} that
\[\begin{aligned}
\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta, r,s}, \psi, \psi'} \leq 4p^{2s}.
\end{aligned}\]
This finishes the proof of the theorem.
\end{proof}
\begin{proof}[Proof of \Cref{thm:baKloosterman_bound}] The bound directly follows from the estimate given by Larsen \cite[Appendix]{BFG1988} for the $\operatorname{GL}(3)$ Kloosterman sum $S\rb{n_1, m_1, m_2; p^r, p^{s-r}}$.
\end{proof}
\subsection{Bounds for $\operatorname{Sp}(4)$ Kloosterman sums attached to $w = s_\alpha s_\beta s_\alpha$ and $s_\beta s_\alpha s_\beta$}
To obtain a non-trivial bound for Kloosterman sums $\operatorname{Kl}_p\rb{n_{s_\alpha s_\beta s_\alpha, r, s}, \psi, \psi'}$ and $\operatorname{Kl}_p\rb{n_{s_\beta s_\alpha s_\beta, r, s}, \psi, \psi'}$, we decompose the Kloosterman sums as in \Cref{section:stratification}.
\begin{proof}[Proof of \Cref{thm:abaKloosterman_bound}]
Let $w = s_\alpha s_\beta s_\alpha$, and $n = n_{s_\alpha s_\beta s_\alpha, r, s}$. Note that we have $s\leq 2r$. Then $\Delta_w = \cb{\alpha}$, and
\[\begin{aligned}
A_w(\ell) = \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^2 \times \rb{\mathbb{Z}/p^\ell\mathbb{Z}}.
\end{aligned}\]
Let $t = \operatorname{diag}\rb{a_1, a_2, ca_1^{-1}, ca_2^{-1}}\in\mathcal T$. Then $s = n^{-1}tn = \operatorname{diag}\rb{ca_1^{-1}, a_2, a_1, ca_2^{-1}}$. We compute
\[\begin{aligned}
\kappa'_1\rb{t*x} = ca_1^{-1}a_2^{-1} \kappa_1'(x).
\end{aligned}\]
So
\[\begin{aligned}
V_w(\ell) = \cbm{\lambda \times \lambda' \in A_w(\ell)}{\begin{array}{l}\lambda_1, \lambda_2, \lambda'_1\in \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^\times,\\\lambda_1\lambda_2\lambda'_1= 1\end{array}}.
\end{aligned}\]
If $\theta:A_w(\ell) \to \mathbb{C}^\times$ is given by
\[\begin{aligned}
\theta \rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{n_1\lambda_1+n_2\lambda_2}{p^\ell}} \operatorname{e}\rb{\frac{n'_1\lambda'_1}{p^\ell}}, \quad n_1, n_2, n'_1\in\mathbb{Z},
\end{aligned}\]
then
\begin{align}\label{eq:abaKloosterman_GL2decomp}
S_w\rb{\theta, \ell} = \sum\limits_{\lambda_2\in\rb{\mathbb{Z}/p^\ell\mathbb{Z}}^\times} \operatorname{e}\rb{\frac{n_2\lambda_2}{p^\ell}} S\rb{n_1\lambda_2^{-1}, n'_1; p^\ell}.
\end{align}
In terms of Pl\"ucker coordinates (see \cite[Section 3.2]{Man2020}), $n = n_{s_\alpha s_\beta s_\alpha, r, s}$ says $v_1 = p^r$, and $v_{14} = p^s$. Suppose $x_{a,b}^{v_3} \in X(n)$ has coordinates
\[\begin{aligned}
\rb{v_1, v_2, v_3, v_4; v_{14}} = \rb{p^r, p^{r-a}, v_3, p^{r-b}; p^s}.
\end{aligned}\]
Let $\delta = \rb{p^{r-a}, p^a v_3 + p^{r-b}}$. Then $v_{14} = \frac{p^{r+a}}{\delta}$. This says $s-r \leq a \leq \frac{s}{2}$, $b\leq r$. Then $\delta = p^{r+a-s}$. Then
\[\begin{aligned}
u'\rb{x_{a,b}^{v_3}} = \begin{pmatrix} 1 & p^{-a} & v_3p^{-r} & p^{-b}\\ & 1 & p^{-b}\\ &&1\\&&-p^{-a}&1\end{pmatrix} \pmod{ U\rb{\mathbb{Z}_p}}.
\end{aligned}\]
Let $X_{a,b}^{v_3}(n) = \mathcal T * x_{a,b}^{v_3}$, and define
\[\begin{aligned}
S_{a,b}^{v_3} \rb{n,\psi,\psi'} = \sum\limits_{x \in X_{a,b}^{v_3}(n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
We also let
\[\begin{aligned}
X_{a,b}(n) = \bigcup\limits_{\substack{v_3\pmod{p^r}\\ \rb{p^{r-a}, p^a v_3 + p^{r-b}} = p^{r+a-s}}} X_{a,b}^{v_3}(n),
\end{aligned}\]
and
\[\begin{aligned}
S_{a,b}\rb{n,\psi,\psi'} = \sum\limits_{x\in X_{a,b}(n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
It is easy to see that
\[\begin{aligned}
X\rb{n} = \coprod\limits_{\substack{s-r\leq a \leq s/2\\ 0\leq b\leq r}} X_{a,b}(n).
\end{aligned}\]
As $r\geq \frac{s}{2} \geq a$, $r\geq b$, we see that $u(x), u'(x)$ have entries in $p^{-r}\mathbb{Z}_p/\mathbb{Z}_p$ for all $x \in X(n)$. Let $\mathcal S_{a,b}$ be a finite subset of $\mathbb{Z}_p$ such that
\[\begin{aligned}
X_{a,b}(n) = \coprod\limits_{v_3\in \mathcal S_{a,b}} X_{a,b}^{v_3} (n).
\end{aligned}\]
By \Cref{thm:Stevens4.10}, we have
\[\begin{aligned}
S_{a,b}\rb{n,\psi,\psi'} = p^{-4r} \rb{1-p^{-1}}^{-2} \sum\limits_{v_3\in \mathcal S_{a,b}} \vb{X_{a,b}^{v_3}(n)} S_w \rb{\theta_{a,b}^{v_3}; 2r},
\end{aligned}\]
where
\[\begin{aligned}
\theta_{a,b}^{v_4} \rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{m_2u\lambda_2}{p^s}} \operatorname{e}\rb{\frac{m_1\hat v_2 \lambda_1 + n_1 p^{r-a} \lambda'_1}{p^r}},
\end{aligned}\]
with $\hat v_2$ and $u$ given as in \eqref{eq:abaKloosterman_v2hat} and \eqref{eq:abaKloosterman_u}. By \eqref{eq:abaKloosterman_GL2decomp}, we have
\begin{align}\label{eq:abaKloosterman_theta_sum}
S_w \rb{\theta_{a,b}^{v_3}; 2r} = \sum\limits_{x, y\in\rb{\mathbb{Z}/p^{2r}\mathbb{Z}}^\times} \operatorname{e}\rb{\frac{m_2u x}{p^s}} \operatorname{e}\rb{\frac{m_1\hat v_2 \ol{x}y + n_1p^{r-a}\ol{y}}{p^r}},
\end{align}
and we easily deduce that
\begin{align}\label{eq:abaKloosterman_Xab}
\sum\limits_{v_3\in\mathcal S_{a,b}} \vb{X_{a,b}^{v_3}(n)} \leq \vb{\mathcal S_{a,b}} p^{a+b} \leq p^{r+a+b}.
\end{align}
We estimate the size of $S_w \rb{\theta_{a,b}^{v_3}; 2r}$ below. We start by computing the order of $\hat v_2$ and $u$ in \eqref{eq:abaKloosterman_theta_sum}. From \eqref{eq:abaKloosterman_v2hat}, it is clear that $\operatorname{ord}_p\rb{\hat v_2} = s-a$. Now we consider $\operatorname{ord}_p(u)$. If $a\neq \frac{s}{2}$, then we have (after putting $v'_2 = \ol{v'_2} = 1$)
\[\begin{aligned}
u = &p^{a+r-s} \rb{-p^av_3 + v_4} + \ol{V'} v_3^2p^{2a}\\
= &p^{a+r-s} \rb{p^av_3 + v_4} - 2 v_3 p^{2a+r-s} + \ol{V'} v_3^2p^{2a}\\
= &p^{2a+2r-2s} V' - 2 v_3 p^{2a+r-s} + \ol{V'} v_3^2p^{2a}\\
= &p^{2a} \ol{V'} \rb{p^{2r-2s} V'^2 - 2p^{r-s}v_3 V' + v_3^2}\\
= &p^{2a} \ol{V'} \rb{p^{r-s}V'-v_3}^2\\
= &p^{2a} \ol{V'} \rb{p^{-a} v_4}^2\\
= &v_4^2 \ol{V'}.
\end{aligned}\]
So $\operatorname{ord}_p(u) = 2\rb{r-b}$. If $a=\frac{s}{2}$, then (again we set $v'_2 = \ol{v'_2} = 1$)
\begin{align}\label{eq:abaKloosterman_u_criterion}
u = &-v_3 p^{2a+r-s} + v_4 p^{a+r-s} = p^{a+r-s} \rb{2v_4 - \rb{p^a v_3 + v_4}}.
\end{align}
This form will be useful in computing $\operatorname{ord}_p(u)$, when more conditions are given.
Case I: Suppose $s<r$. We deduce from \eqref{eq:abaKloosterman_v2hat} that $\operatorname{ord}_p(v_3) = 0, \operatorname{ord}_p(v_4) = a$, so only terms with $r=a+b$ contribute. When $a \neq \frac{s}{2}$, we have $\operatorname{ord}_p(u) = 2\rb{r-b} = 2a$. When $a = \frac{s}{2}$, we still have $\operatorname{ord}_p(u) \leq s = 2a$ trivially. So $\operatorname{ord}_p(u) \leq 2a$ always holds. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^{3r-s} \min\cb{p^{r+2a+\operatorname{ord}_p(m_2)}, p^{s-a+\min\cb{s+\operatorname{ord}_p(m_1), r+\operatorname{ord}_p(n_1)}}}.
\end{aligned}\]
Note that we have $\rb{p^{r-a}, p^a \rb{v_3+1}} = p^{r+a-s}$. A necessary condition for this to hold is that $p^{r-s} \mid v_3+1$. So $\vb{\mathcal S_{a,b}} \leq p^s$. So, from \eqref{eq:abaKloosterman_Xab} we actually have
\[\begin{aligned}
\sum\limits_{v_3\in \mathcal S_{a,b}} \vb{X_{a,b}^{v_3}(n)} \leq p^{s+a+b}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n,\psi,\psi'}} &\leq \sum\limits_{\substack{0\leq a \leq s/2\\ b = r-a}} \vb{S_{a,b}\rb{n,\psi,\psi'}}\\
&\ll \sum\limits_{\substack{0\leq a \leq s/2\\ b = r-a}} p^{-4r} p^{s+a+b} \rb{p^{3r-s} \min\cb{p^{r+2a+\operatorname{ord}_p(m_2)}, p^{s-a+\min\cb{s+\operatorname{ord}_p(m_1), r+\operatorname{ord}_p(n_1)}}}}\\
&\ll \sum\limits_{\substack{0\leq a \leq s/2}} \min\cb{p^{r+2a+\operatorname{ord}_p(m_2)}, p^{s-a+\min\cb{s+\operatorname{ord}_p(m_1), r+\operatorname{ord}_p(n_1)}}}\\
&\ll p^{\frac{r}{3} + \frac{2s}{3} + \frac{2}{3} \min\cb{\operatorname{ord}_p(m_1)+s, \operatorname{ord}_p(n_1)+r} + \frac{1}{3} \operatorname{ord}_p(m_2)}.
\end{aligned}\]
Case II: Suppose $s=r$. We deduce from \eqref{eq:abaKloosterman_v2hat} that when $a\neq 0$, then $\operatorname{ord}_p(v_3) = 0, \operatorname{ord}_p(v_4) \geq a$. So, only terms with $r\geq a+b$ contribute. When $a\neq \frac{s}{2}$, we have $\operatorname{ord}_p(u) = 2\rb{r-b}$. When $a=\frac{s}{2}$, we still have $\operatorname{ord}_p(u) \leq s = 2\rb{r-b}$. So $\operatorname{ord}_p(u) \leq 2\rb{r-b}$ always holds. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^{2r} \min\cb{p^{3r-2b+\operatorname{ord}_p(m_2)}, p^{2r-a+\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_1)}}}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n,\psi,\psi'}} &\leq \sum\limits_{\substack{0\leq a \leq r/2\\ b \leq r-a}} \vb{S_{a,b}\rb{n,\psi,\psi'}}\\
&\ll \sum\limits_{\substack{0\leq a \leq s/2\\ b \leq r-a}} p^{-4r} p^{r+a+b} \rb{p^{2r} \min\cb{p^{3r-2b+\operatorname{ord}_p(m_2)}, p^{2r-a+\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_1)}}}}\\
&\ll \sum\limits_{\substack{0\leq a \leq s/2\\ b \leq r-a}} p^{-r+a+b} \min\cb{p^{3r-2b+\operatorname{ord}_p(m_2)}, p^{2r-a+\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_1)}}}\\
&\ll p^{\frac{5r}{3} + \frac{2}{3}\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_1)} + \frac{1}{3} \operatorname{ord}_p(m_2)}.
\end{aligned}\]
Case III: $2r>s>r$. We consider the following subcases:
\begin{enumerate}[label=(\alph*)]
\item Suppose $a=s-r$. Then the condition $\rb{p^{r-a}, p^a v_3 + p^{r-b}} = 1$ implies $b=r$. So $\operatorname{ord}_p(u) = 0$. We deduce from \eqref{eq:abaKloosterman_v2hat} that $\hat v_2 = 0$. So
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^{3r-s} \min\cb{p^{r+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
\item Suppose $s-r < a < \frac{s}{2}$. Then we deduce from \eqref{eq:abaKloosterman_v2hat} that $\operatorname{ord}_p(v_3) = 0$, $\operatorname{ord}_p(v_4) \geq a$. So $a+b\leq r$. Meanwhile, as $r+a-s < a$, the condition $\rb{p^{r-a}, p^a v_3 + p^{r-b}} = p^{r+a-s}$ says $r-b = r+a-s$, which implies $a+b = s >r$, a contradiction. So there is no contribution from this case.
\item Suppose $a = \frac{s}{2}$. Again, we deduce from \eqref{eq:abaKloosterman_v2hat} that $\operatorname{ord}_p(v_3) = 0$, $\operatorname{ord}_p(v_4) \geq a$. So, only terms with $r\geq a+b$ contribute. In this case, we don't have a good bound for $\operatorname{ord}_p(u)$. So
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^{3r+\min\cb{\frac{s}{2}+\operatorname{ord}_p(m_1), r-\frac{s}{2}+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
\end{enumerate}
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n,\psi,\psi'}} \leq &\sum\limits_{\substack{s-r\leq a \leq s/2\\ b \leq r-a}} \vb{S_{a,b}\rb{n,\psi,\psi'}}\\
\ll &\sum\limits_{\substack{a=s-r\\ b=r}} p^{-4r} p^{r+a+b} \rb{p^{3r-s} \min\cb{p^{r+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}}\\
&+\sum\limits_{\substack{a=s/2\\ b\leq r-s/2}} p^{-4r} p^{r+a+b} \rb{p^{3r+\min\cb{\frac{s}{2}+\operatorname{ord}_p(m_1), r-\frac{s}{2}+\operatorname{ord}_p(n_1)}}}\\
\ll &p^{r+\min\cb{\operatorname{ord}_p(m_2), r+\operatorname{ord}_p(n_1)}} + p^{r+\min\cb{\frac{s}{2}+\operatorname{ord}_p(m_1), r-\frac{s}{2}+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
Case IV: $s = 2r$. In this case, we have $a = r$, and $v_3, v_4 = p^{r-b}$ is arbitrary. We deduce from \eqref{eq:abaKloosterman_v2hat} that $\hat v_2 = 0$. We consider the following subcases:
\begin{enumerate}[label=(\alph*)]
\item Suppose $b=0$. We may assume $v_4=0$. Then $\operatorname{ord}_p(u) = r+\operatorname{ord}_p(v_3)$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^r \min\cb{p^{2r+\operatorname{ord}_p(v_3)+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
Fix $c\leq r$. Then
\[\begin{aligned}
\vb{\cbm{v_3\in\mathcal S_{a,b}}{\operatorname{ord}_p(v_3) = c}} \leq p^{r-c}.
\end{aligned}\]
\item Suppose $b>0$. Then $\operatorname{ord}_p(u) = r-b$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_3}; 2r}} \ll p^r \min\cb{p^{2r-b+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
\end{enumerate}
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n,\psi,\psi'}} \leq &\sum\limits_{\substack{a = r/2\\ b\leq r}} \vb{S_{a,b}\rb{n,\psi,\psi'}}\\
\ll &\sum\limits_{\substack{a=r/2\\ b=0\\ c\leq r}} p^{-4r} p^{r-c+a+b} \rb{p^r \min\cb{p^{2r+c+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}}\\
&+\sum\limits_{\substack{a=r/2\\ b>0}} p^{-4r} p^{r+a+b} \rb{p^r \min\cb{p^{2r-b+\operatorname{ord}_p(m_2)}, p^{2r+\operatorname{ord}_p(n_1)}}}\\
\ll &p^{r+\min\cb{\operatorname{ord}_p(m_2), r+\operatorname{ord}_p(n_1)}}.
\end{aligned}\]
This finishes the proof of the theorem.
\end{proof}
\begin{proof}[Proof of \Cref{thm:babKloosterman_bound}]
Let $w = s_\beta s_\alpha s_\beta$, and $n = n_{s_\beta s_\alpha s_\beta, r, s}$. Note that we have $r\leq s$. Then $\Delta_w = \cb{\beta}$, and
\[\begin{aligned}
A_w\rb{\ell} = \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^2 \times \rb{\mathbb{Z}/p^\ell\mathbb{Z}}.
\end{aligned}\]
Let $t = \operatorname{diag} \rb{a_1, a_2, ca_1^{-1}, ca_2^{-1}} \in \mathcal T$. Then $s = n^{-1}tn = \operatorname{diag} \rb{ca_2^{-1}, ca_1^{-1}, a_2, a_1}$. We compute
\[\begin{aligned}
\kappa'_2 \rb{t * x} = ca_1^{-2} \kappa'_2(x).
\end{aligned}\]
So
\[\begin{aligned}
V_w(\ell) = \cbm{\lambda \times \lambda' \in A_w(\ell)}{\begin{array}{l} \lambda_1, \lambda_2, \lambda'_2 \in \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^\times,\\ \lambda_1^2\lambda_2\lambda'_2 = 1\end{array}}.
\end{aligned}\]
If $\theta: A_w(\ell) \to \mathbb{C}^\times$ is given by
\[\begin{aligned}
\theta\rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{n_1\lambda_1+n_2\lambda_2}{p^\ell}} \operatorname{e}\rb{\frac{n'_2\lambda'_2}{p^\ell}}, \quad n_1, n_2, n'_2\in\mathbb{Z},
\end{aligned}\]
then
\begin{align}\label{eq:babKloosterman_GL2decomp}
S_w\rb{\theta, \ell} = \sum\limits_{\lambda_1\in\rb{\mathbb{Z}/p^\ell\mathbb{Z}}^\times} \operatorname{e}\rb{\frac{n_1\lambda_1}{p^\ell}} S \rb{n_2 \lambda_1^{-2}, n'_2; p^\ell}.
\end{align}
In terms of Pl\"ucker coordinates (see \cite[Section 3.2]{Man2020}), $n = n_{s_\beta s_\alpha s_\beta, r, s}$ says $v_2 = p^r$, and $v_{12} = p^s$. Suppose $x_{a,b}^{v_{23}} \in G\rb{\mathbb{Z}_p}$ has coordinates
\[\begin{aligned}
\rb{v_{12}, v_{13}, v_{14}, v_{23}} = \rb{p^s, p^{s-a}, p^{s-b}, v_{23}}.
\end{aligned}\]
The condition $(v_{12}, v_{14}) \mid v_{13}^2$ says $s-b \leq 2\rb{s-a}$, that is, $2a-b\leq s$. We also have $\max\cb{a,b} = r$. Then
\[\begin{aligned}
u'\rb{x_{a,b}^{v_{23}}} = \begin{pmatrix} 1 & & -v_{23}p^{-s} & p^{-a}\\ &1&p^{-a}&p^{-b}\\&&1\\&&&1\end{pmatrix} \pmod{U\rb{\mathbb{Z}_p}}.
\end{aligned}\]
Let $X_{a,b}^{v_{23}} (n) = \mathcal T * x_{a,b}^{v_{23}}$, and define
\[\begin{aligned}
S_{a,b}^{v_{23}} \rb{n, \psi, \psi'} = \sum\limits_{x\in X_{a,b}^{v_{23}} (n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
We also let
\[\begin{aligned}
X_{a,b} (n) = \coprod\limits_{\substack{v_{23} \pmod{p^s}\\ \rb{p^{s-r}, v_{23}, p^{-b}v_{23}-p^{s-2a}}=1}} X_{a,b}^{v_{23}} (n),
\end{aligned}\]
and
\[\begin{aligned}
S_{a,b}\rb{n, \psi, \psi'} = \sum\limits_{x\in X_{a,b}(n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
It is easy to see that
\[\begin{aligned}
X(n) = \coprod\limits_{\substack{0\leq a, b \leq r\\ \max\cb{a,b} = r\\ 2a-b \leq s}} X_{a,b} (n).
\end{aligned}\]
It is clear that $u(x), u'(x)$ have entries in $p^{-s}\mathbb{Z}_p/\mathbb{Z}_p$ for all $x\in X(n)$. Let $\mathcal S_{a,b}$ be a finite subset of $\mathbb{Z}_p$ such that
\[\begin{aligned}
X_{a,b}(n) = \coprod\limits_{v_{23} \in \mathcal S_{a,b}} X_{a,b}^{v_{23}} (n).
\end{aligned}\]
By \Cref{thm:Stevens4.10}, we have
\[\begin{aligned}
S_{a,b} \rb{n, \psi, \psi'} = p^{-2s} \rb{1-p^{-1}}^{-2} \sum\limits_{v_{23}\in \mathcal S_{a,b}} \vb{X_{a,b}^{v_{23}} (n)} S_w \rb{\theta_{a,b}^{v_{23}}; s},
\end{aligned}\]
where
\[\begin{aligned}
\theta_{a,b}^{v_{23}} \rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{m_1 u \lambda_1}{p^r}} \operatorname{e}\rb{\frac{m_2 \hat v_{14} \lambda_2 + n_2 p^{s-b} \lambda'_2}{p^s}}.
\end{aligned}\]
with $\hat v_{14}$ and $u$ given as in \eqref{eq:babKloosterman_u} and \eqref{eq:babKloosterman_v14hat}. By \eqref{eq:babKloosterman_GL2decomp}, we have
\begin{align}\label{eq:babKloosterman_theta_sum}
S_w\rb{\theta_{a,b}^{v_{23}}; s} = \sum\limits_{x,y \in \rb{\mathbb{Z}/p^s\mathbb{Z}}^\times} \operatorname{e}\rb{\frac{m_1u\ol{x}}{p^r}} \operatorname{e}\rb{\frac{m_2 \hat v_{14} x^2 \ol{y} + n_2 p^{s-b} y}{p^s}},
\end{align}
and we easily deduce that
\begin{align}\label{eq:babKloosterman_Xab}
\sum\limits_{v_{23}\in\mathcal S_{a,b}} \vb{X_{a,b}^{v_{23}} (n)} \leq \vb{\mathcal S_{a,b}} p^{a+b} \leq p^{s+a}.
\end{align}
We estimate the size of $S_w \rb{\theta_{a,b}^{v_{23}}; s}$. We start by computing the order of $\hat v_{14}$ and $u$ in \eqref{eq:babKloosterman_theta_sum}. From \eqref{eq:babKloosterman_u}, we see that
\begin{align}\label{eq:babKloosterman_u_criterion}
u p^{r-a} &\equiv v_{23} \pmod{p^r}, & u p^{r-b} &\equiv -p^{s-a} \pmod{p^r}.
\end{align}
So, if $a=r$, then $u\equiv v_{23}\pmod{p^r}$, and if $b=r$, then $u\equiv -p^{s-a}\pmod{p^r}$. (Recall that $\max\cb{a,b}=r$.) Also, we know that
\begin{align}\label{eq:babKloosterman_v23criterion}
v_{23} = -p^{s-2a+b} + \beta p^b
\end{align}
for some $\beta\in \mathbb{Z}$ such that $\rb{\beta, p^{s-2r+b}} = 1$ (see \cite[Section 3.2]{Man2020}). Meanwhile, from \eqref{eq:babKloosterman_v14hat}, we see that unless $r=s$, we have $\operatorname{ord}_p\rb{\hat v_{14}} = 2r-b$.
Case I: Suppose $r<\frac{s}{2}$. We deduce from \eqref{eq:babKloosterman_v23criterion} that $\operatorname{ord}_p(v_{23}) = b$. From \eqref{eq:babKloosterman_u_criterion}, we deduce $a\geq b$. So we actually have $a=r$, and then $\operatorname{ord}_p(u) = b$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_{23}}; s}} \ll p^{s-r} \min\cb{p^{s+b+\operatorname{ord}_p(m_1)}, p^{r-b+\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}}}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} &\leq \sum\limits_{\substack{a=r\\ 0\leq b\leq r}} \vb{S_{a,b}\rb{n,\psi,\psi'}}\\
&\ll \sum\limits_{\substack{a=r\\ 0\leq b\leq r}} p^{-2s} p^{s+a} \rb{p^{s-r} \min\cb{p^{s+b+\operatorname{ord}_p(m_1)}, p^{r-b+\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}}}}\\
&\ll p^{\frac{s}{2}+\frac{r}{2}+\frac{1}{2}\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}+\frac{1}{2}\operatorname{ord}_p(m_1)}.
\end{aligned}\]
Case II: Suppose $r=\frac{s}{2}$. We consider the following subcases:
\begin{enumerate}[label=(\alph*)]
\item Suppose $b=r$. From \eqref{eq:babKloosterman_u_criterion}, we may assume $u=0$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_{23}}; s}} \ll p^{\frac{3s}{2}+\min\cb{\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2)}}.
\end{aligned}\]
\item Suppose $b<r$. Then $a=r$. From \eqref{eq:babKloosterman_v23criterion}, we see that $v_{23} = \rb{\beta-1} p^b$ for some $\beta\in\mathbb{Z}$ such that $\rb{\beta, p^b} = 1$. So $\operatorname{ord}_p(v_{23}) \geq b$. And from \eqref{eq:babKloosterman_u_criterion}, we deduce that $\operatorname{ord}_p(u) = \operatorname{ord}_p(v_{23})$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_{23}}; s}} \ll p^{s/2} \min\cb{p^{s+\operatorname{ord}_p(v_{23}) + \operatorname{ord}_p(m_1)}, p^{\frac{3s}{2}-b+\min\cb{\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2)}}}.
\end{aligned}\]
\end{enumerate}
Fix $c\geq b$. Then
\[\begin{aligned}
\vb{\cbm{v_{23} \in \mathcal S_{a,b}}{\operatorname{ord}_p(v_{23}) = c}} \leq p^{s-c}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \leq &\sum\limits_{\substack{a,b\leq r\\ \max\cb{a,b}=r}} \vb{S_{a,b}\rb{n, \psi, \psi'}}\\
\ll &\sum\limits_{\substack{b=r\\ a\leq r}} p^{-2s} p^{s+a} \rb{p^{\frac{3s}{2}+\min\cb{\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2)}}}\\
&+\sum\limits_{\substack{a=r\\ b<r\\ b\leq c\leq r}} p^{-2s} p^{s-c+a+b} \rb{p^{s/2} \min\cb{p^{s+\operatorname{ord}_p(v_{23}) + \operatorname{ord}_p(m_1)}, p^{\frac{3s}{2}-b+\min\cb{\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2)}}}}\\
\ll &p^{\frac{5s}{4} + \frac{1}{2}\operatorname{ord}_p(m_1) + \frac{1}{2}\min\cb{\operatorname{ord}_p(m_2), \operatorname{ord}_p(n_2)}}.
\end{aligned}\]
Case III: Suppose $s>r>\frac{s}{2}$. We consider the following subcases:
\begin{enumerate}[label=(\alph*)]
\item Suppose $b=r$. Then $\operatorname{ord}_p(u) = s-a$, and $\operatorname{ord}_p(\hat v_{14}) = r$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_{23}}; s}} \ll p^{s-r} \min\cb{p^{2s-a+\operatorname{ord}_p(m_1)}, p^{r+\min\cb{r+\operatorname{ord}_p(m_2)}, s-r+\operatorname{ord}_p(n_2)}}.
\end{aligned}\]
\item Suppose $b<r$. Then $a=r$. Then from \eqref{eq:babKloosterman_v23criterion} we deduce that $\operatorname{ord}_p(v_{23}) = p^{s-2r+b}$, and hence $\operatorname{ord}_p(u) = p^{s-2r+b}$. We compute
\[\begin{aligned}
\vb{S_w\rb{\theta_{a,b}^{v_{23}}; s}} \ll p^{s-r} \min\cb{p^{2s-2r+b+\operatorname{ord}_p(m_1)}, p^{r-b+\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}}}.
\end{aligned}\]
\end{enumerate}
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \leq &\sum\limits_{\substack{a,b\leq r\\ \max\cb{a,b}=r\\ 2a-b\leq s}} \vb{S_{a,b}\rb{n, \psi, \psi'}}\\
\ll &\sum\limits_{\substack{b=r\\ a\leq r}} p^{-2s} p^{s+a} \rb{p^{s-r} \min\cb{p^{2s-a+\operatorname{ord}_p(m_1)}, p^{r+\min\cb{r+\operatorname{ord}_p(m_2)}, s-r+\operatorname{ord}_p(n_2)}}}\\
&+\sum\limits_{\substack{a=r\\ 2r-s\leq b < r}} p^{-2s} p^{s+a} \rb{p^{s-r} \min\cb{p^{2s-2r+b+\operatorname{ord}_p(m_1)}, p^{r-b+\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}}}}\\
&\ll p^{s-\frac{r}{2}+\frac{1}{2}\operatorname{ord}_p(m_1)+\frac{1}{2}\min\cb{2r+\operatorname{ord}_p(m_2), s+\operatorname{ord}_p(n_2)}}.
\end{aligned}\]
Case IV: $r=s$. In this case we only have to consider terms with $b=r$. Indeed, if $b<r$, then $a=r$, and then by \eqref{eq:babKloosterman_u_criterion}, we see that $u p^{r-b} \equiv -1\pmod{p^r}$, which says $b=r$, a contradiction. When $b=r$, we have $\operatorname{ord}_p(u) = s-a$, and from \eqref{eq:babKloosterman_v14hat} we may assume $\hat v_{14} = 0$. We compute
\[\begin{aligned}
S_w\rb{\theta_{a,b}^{v_{23}}; s} \ll \min\cb{p^{2s-a+\operatorname{ord}_p(m_1)}, p^{s+\operatorname{ord}_p(n_2)}}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \leq &\sum\limits_{\substack{b=s\\ a\leq s}} \vb{S_{a,b}\rb{n, \psi, \psi'}}\\
\ll &\sum\limits_{\substack{b=s\\ a\leq s}} p^{-2s} p^{s+a} \rb{\min\cb{p^{2s-a+\operatorname{ord}_p(m_1)}, p^{s+\operatorname{ord}_p(n_2)}}}\\
\ll &p^{s+\min\cb{\operatorname{ord}_p(m_1), \operatorname{ord}_p(n_2)}}.
\end{aligned}\]
This finishes the proof of the theorem.
\end{proof}
\subsection{Bounds for $\operatorname{Sp}(4)$ Kloosterman sums attached to the long Weyl element} We show that under the stratification introduced in \Cref{section:stratification}, $\operatorname{Kl}_p\rb{n_{w_0, r, s}, \psi, \psi'}$ decomposes into a sum of products of $\operatorname{GL}(2)$ Kloosterman sums. So the Kloosterman sum can be bounded using \eqref{eq:Kloosterman_GL2bound}.
\begin{proof}[Proof of \Cref{thm:w0Kloosterman_bound}]
Let $w = w_0$, and $n = n_{w_0, s, r}$. Then $\Delta_{w_0} = \Delta$, and
\[\begin{aligned}
A_{w_0}(\ell) = \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^2 \times \rb{\mathbb{Z}/p^\ell\mathbb{Z}}^2.
\end{aligned}\]
Let $t = \operatorname{diag}\rb{a_1, a_2, ca_1^{-1}, ca_2^{-1}} \in \mathcal T$. Then $s = n^{-1}tn = \operatorname{diag}\rb{ca_1^{-1}, ca_2^{-1}, a_1, a_2}$. We compute
\[\begin{aligned}
\kappa'_1(t*x) &= a_2a_1^{-1} \kappa'_1(x), & \kappa'_2(t*x) &= c a_2^{-2} \kappa'_2(x).
\end{aligned}\]
So
\[\begin{aligned}
V_{w_0}(\ell) = \cbm{\lambda \times \lambda' \in A_{w_0}(\ell)}{ \lambda_1\lambda'_1 = 1, \lambda_2\lambda'_2 = 1}.
\end{aligned}\]
If $\theta: A_{w_0}(\ell) \to \mathbb{C}^\times$ is given by
\[\begin{aligned}
\theta\rb{\lambda \times \lambda'} &= \prod\limits_{i=1}^2 \operatorname{e}\rb{\frac{n_i\lambda_i}{p^\ell}} \prod\limits_{i=1}^2 \operatorname{e}\rb{\frac{n'_i\lambda'_i}{p^\ell}}, & &n_1, n_2, n'_1, n'_2\in\mathbb{Z},
\end{aligned}\]
then
\begin{align}\label{eq:Kloosterman_GL2decomp}
S_{w_0}\rb{\theta; \ell} = S \rb{n_1, n'_1; p^\ell} S \rb{n_2, n'_2; p^\ell}.
\end{align}
In terms of Pl\"ucker coordinates, $n = n_{w_0, r, s}$ says $v_1 = p^r$, and $v_{12} = p^s$. Suppose $x_{a,b}^{v_3, v_4, v_{13}}\in G(\mathbb{Z}_p)$ has coordinates
\[\begin{aligned}
\rb{v_1, v_2, v_3, v_4; v_{12}, v_{13}, v_{14}} = \rb{p^r, p^{r-a}, v_3, v_4; p^s, v_{13}, p^{s-b}}.
\end{aligned}\]
Note that this also says $r\geq a, s\geq b$. Then
\[\begin{aligned}
u'\rb{x_{a,b}^{v_3, v_4, v_{13}}} = \begin{pmatrix} 1&p^{-a} & v_3p^{-r} & v_4p^{-r}\\ &1&v_{13}p^{-s} & p^{-b}\\ &&1\\&&-p^{-a}&1\end{pmatrix} \pmod{U\rb{\mathbb{Z}_p}}.
\end{aligned}\]
Let $X_{a,b}^{v_3, v_4, v_{13}}(n) = \mathcal T * x_{a,b}^{v_3, v_4, v_{13}}$, and define
\[\begin{aligned}
S_{a,b}^{v_3, v_4, v_{13}} \rb{n, \psi, \psi'} = \sum\limits_{x \in X_{a,b}^{v_3, v_4, v_{13}}(n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
We also let
\[\begin{aligned}
X_{a,b} (n) = \bigcup\limits_{\substack{v_3, v_4 \ppmod{p^r}\\v_{13}\ppmod{p^s}\\\text{conditions}}} X_{a,b}^{v_3, v_4, v_{13}} (n),
\end{aligned}\]
and
\[\begin{aligned}
S_{a,b} \rb{n, \psi, \psi'} = \sum\limits_{x \in X_{a,b}(n)} \psi\rb{u(x)} \psi'\rb{u'(x)}.
\end{aligned}\]
It is easy to see that
\[\begin{aligned}
X(n) = \coprod\limits_{\substack{0\leq a\leq r\\ 0\leq b\leq s}} X_{a,b}(n).
\end{aligned}\]
Now we consider cases $r\geq s$ and $r<s$ separately.
\begin{enumerate}[label=(\roman*)]
\item Suppose $r> s$. As $r\geq a, r\geq s\geq b$, we see that $u(x), u'(x)$ have entries in $p^{-r}\mathbb{Z}_p/\mathbb{Z}_p$ for all $x\in X(n)$. Let $\mathcal S_{a,b}$ be a finite subset of $\mathbb{Z}_p^3$ such that
\[\begin{aligned}
X_{a,b}(n) = \coprod\limits_{(v_3, v_4, v_{13}) \in \mathcal S_{a,b}} X_{a,b}^{v_3, v_4, v_{13}} (n).
\end{aligned}\]
By \Cref{thm:Stevens4.10}, we have
\[\begin{aligned}
S_{a,b} \rb{n, \psi, \psi'} = p^{-2r} \rb{1-p^{-1}}^{-2} \sum\limits_{(v_3, v_4, v_{13}) \in \mathcal S_{a,b}} \vb{X_{a,b}^{v_3, v_4, v_{13}} (n)} S_{w_0} \rb{\theta_{a,b}^{v_3, v_4, v_{13}}; r},
\end{aligned}\]
where
\[\begin{aligned}
\theta_{a,b}^{v_3, v_4, v_{13}} \rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{m_1\hat v_2\lambda_1 + n_1p^{r-a}\lambda'_1}{p^r}} \operatorname{e}\rb{\frac{m_2\hat v_{14} + n_2 p^{s-b}}{p^s}}.
\end{aligned}\]
By \eqref{eq:Kloosterman_GL2decomp}, we have
\[\begin{aligned}
S_{w_0} \rb{\theta_{a,b}^{v_3, v_4, v_{13}}; r} = S\rb{m_1\hat v_2, n_1\hat p^{r-a}; p^r} S\rb{m_2\hat v_{14} p^{r-s}, n_2p^{r-b}; p^r}.
\end{aligned}\]
And we obtain a bound by applying \eqref{eq:Kloosterman_GL2bound}:
\[\begin{aligned}
\vb{S_{w_0}\rb{\theta_{a,b}^{v_3, v_4, v_{13}}; r}} \leq 4 p^r \rb{\gcd\rb{m_1\hat v_2, n_1p^{r-a}, p^r} \gcd\rb{m_2\hat v_{14} p^{r-s}, n_2p^{r-b}, p^r}}^{1/2}.
\end{aligned}\]
\item Suppose $s\geq r$. Then $u(x), u'(x)$ has entries in $p^{-s}\mathbb{Z}_p/\mathbb{Z}_p$ for all $x\in X(n)$. Again, by \Cref{thm:Stevens4.10} we have
\[\begin{aligned}
S_{a,b}\rb{n,\psi,\psi'} = p^{-2s} \rb{1-p^{-1}}^{-2} \sum\limits_{(v_3, v_4, v_{13}) \in \mathcal S_{a,b}} \vb{X_{a,b}^{v_3, v_4, v_{13}} (n)} S_{w_0} \rb{\theta_{a,b}^{v_3, v_4, v_{13}}; s},
\end{aligned}\]
where
\[\begin{aligned}
\theta_{a,b}^{v_3, v_4, v_{13}} \rb{\lambda \times \lambda'} = \operatorname{e}\rb{\frac{\rb{m_1\hat v_2 p^{s-r}}\lambda_1 + \rb{m_2\hat v_{14}} \lambda_2 + \rb{n_1p^{s-a}}\lambda'_1 + \rb{n_2p^{s-b}}\lambda'_2}{p^s}}.
\end{aligned}\]
By \eqref{eq:Kloosterman_GL2decomp}, we have
\[\begin{aligned}
S_{w_0} \rb{\theta_{a,b}^{v_3, v_4, v_{13}}; s} = S\rb{m_1\hat v_2 p^{s-r}, n_1p^{s-a}; p^s} S\rb{m_2\hat v_{14}, n_2p^{s-b}; p^s}.
\end{aligned}\]
Applying \eqref{eq:Kloosterman_GL2bound} gives
\[\begin{aligned}
\vb{S_{w_0} \rb{\theta_{a,b}^{v_3, v_4, v_{13}}; s}} \leq 4 p^s \rb{\gcd\rb{m_1\hat v_2p^{s-r}, n_1p^{s-a}, p^s}, \gcd\rb{m_2\hat v_{14}, n_2p^{s-b}, p^s}}^{1/2}.
\end{aligned}\]
\end{enumerate}
Now we give a bound to the size of $\operatorname{Kl}_p\rb{n, \psi, \psi'}$. To ease computation, we consider a relaxed bound by ignoring $\hat v_2$ and $\hat v_{14}$.
Suppose $r>s$. Then the bound says
\[\begin{aligned}
\vb{S_{w_0}\rb{\theta_{a,b}^{v_3, v_4, v_{13}}; r}} &\leq 4 p^r \rb{\gcd\rb{m_1\hat v_2, n_1p^{r-a}, p^r} \gcd\rb{m_2\hat v_{14} p^{r-s}, n_2p^{r-b}, p^r}}^{1/2}\\
&\leq 4 p^r\rb{\vb{n_1n_2}_p^{-1} p^{2r-a-b}}^{1/2}\\
&= 4 p^{2r-\frac{a+b}{2}} \vb{n_1n_2}_p^{-1/2}.
\end{aligned}\]
Note that
\[\begin{aligned}
\sum\limits_{\rb{v_3, v_4, v_{13}} \in \mathcal S_{a,b}} \vb{X_{a,b}^{v_3, v_4, v_{13}}(n)} \leq \vb{\mathcal S_{a,b}} p^{a+b}.
\end{aligned}\]
Hence
\[\begin{aligned}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} &\leq \sum\limits_{\substack{a\leq r\\b\leq s}} \vb{S_{a,b} \rb{n,\psi, \psi'}}\\
&\leq \sum\limits_{\substack{a\leq r\\ b\leq s}} p^{-2r}\rb{1-p^{-1}}^{-2} 4\vb{n_1n_2}_p^{-1/2} \vb{\mathcal S_{a,b}} p^{2r+\frac{a+b}{2}}\\
&\ll \vb{n_1n_2}_p^{-1/2} \sum\limits_{\substack{a\leq r\\ b\leq s}} \vb{\mathcal S_{a,b}} p^{\frac{a+b}{2}}.
\end{aligned}\]
So it suffices to give an upper bound to $\vb{\mathcal S_{a,b}}$. Such bounds were computed in \cite[Section 5]{Man2020}. Note that we require $r\geq a+b$ in order to have $\mathcal S_{a,b}$ nonempty.
Case I: Suppose $s-r+a\geq 0$.
\begin{enumerate}[label=(\alph*)]
\item If $s-2r+2a+b \geq 0$, then $\vb{\mathcal S_{a,b}}\leq p^{r+s-a-b}$.
\item If $s-2r+2a+b < 0$, then $\vb{\mathcal S_{a,b}}\leq p^{2s-b-\lceil\frac{s-b}{2}\rceil} \leq p^{3s/2-b/2}$.
\end{enumerate}
Case II: Suppose $s-r+a<0$. Then $\vb{\mathcal S_{a,b}}\leq p^{2s-b-\lceil\frac{s-b}{2}\rceil} \leq p^{3s/2-b/2}$.
Combining the cases, we obtain
\[\begin{aligned}
\sum\limits_{\substack{a\leq r\\ b\leq s}} \vb{\mathcal S_{a,b}} p^{\frac{a+b}{2}} &\leq \sum\limits_{\substack{r-s\leq a\leq r\\ 2r-2a-s\leq b\leq r-a}} p^{r+s-\frac{a}{2}-\frac{b}{2}} + \sum\limits_{\substack{r-s\leq a\leq r\\ b<2r-2a-s}} p^{\frac{3s}{2}+\frac{a}{2}} + \sum\limits_{\substack{a<r-s\\ b\leq s}} p^{\frac{3s}{2}+\frac{a}{2}}\\
&\ll \rb{s+1}p^{\frac{r}{2}+\frac{5s}{4}}.\\
\end{aligned}\]
Hence, we have for $r>s$
\begin{align}\label{eq:w0Kloosterman_rgs}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \ll \vb{n_1n_2}_p^{-1/2} \rb{s+1}p^{\frac{r}{2}+\frac{5s}{4}}.
\end{align}
For $r\leq s$, applying the same argument gives
\begin{align}\label{eq:w0Kloosterman_rls}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} &\ll \vb{n_1n_2}_p^{-1/2} \rb{s-r+1} p^{r+\frac{3s}{4}}.
\end{align}
Combining \eqref{eq:w0Kloosterman_rgs} and \eqref{eq:w0Kloosterman_rls}, we get
\begin{align}\label{eq:w0Kloosterman_nn}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \ll \vb{n_1n_2}_p^{-1/2} \rb{s+1} p^{\frac{r}{2} + \frac{3s}{4} + \frac{1}{2}\min\cb{r,s}}.
\end{align}
By \Cref{prp:w0Kloosterman_swap}, we can swap the characters, so
\begin{align}\label{eq:w0Kloosterman_mm}
\vb{\operatorname{Kl}_p\rb{n, \psi, \psi'}} \ll \vb{m_1m_2}_p^{-1/2} \rb{s+1} p^{\frac{r}{2} + \frac{3s}{4} + \frac{1}{2}\min\cb{r,s}}
\end{align}
as well. Combining \eqref{eq:w0Kloosterman_nn} and \eqref{eq:w0Kloosterman_mm} yields the theorem.
\end{proof}
\section{Symplectic Poincar\'e series} \label{section:sym_Poincare}
In this section, we compute the Fourier coefficients of symplectic Poincar\'e series, in terms of auxiliary Kloosterman sums.
\begin{dfn}
\begin{enumerate}[label=(\alph*)]
\item
Let $n\in N\rb{\mathbb{Q}_p}$, and $\psi_p, \psi'_p$ be characters of $U\rb{\mathbb{Q}_p}$ which are trivial on $U\rb{\mathbb{Z}_p}$. Then the local auxiliary Kloosterman sum is defined to be
\[\begin{aligned}
\underline{\operatorname{Kl}}_p\rb{n, \psi_p, \psi'_p} = \sum\limits_{\substack{x\in X(n)\\ x = b_1 n b_2}} \psi_p\rb{b_1} \psi'_p\rb{b_2}
\end{aligned}\]
if $\psi_p\rb{nun^{-1}} = \psi'_p\rb{u}$ for $u \in \ol U_n\rb{\mathbb{Q}_p}$, and zero otherwise. We say $\underline{\operatorname{Kl}}_p\rb{n, \psi_p, \psi'_p}$ is well-defined if $\psi_p\rb{nun^{-1}} = \psi'_p\rb{u}$ for $u \in \ol U_n\rb{\mathbb{Q}_p}$.
\item
Let $n\in N\rb{\mathbb{Q}}$, and $\psi = \prod\limits_p \psi_p$, $\psi' = \prod\limits_p \psi'_p$ be characters of $U\rb{\mathbb{A}}$ which are trivial on $\prod\limits_p U\rb{\mathbb{Z}_p}$. Then the global auxiliary Kloosterman sum is defined to be
\[\begin{aligned}
\underline{\operatorname{Kl}}\rb{n, \psi, \psi'} = \prod\limits_p \underline{\operatorname{Kl}}_p\rb{n, \psi_p, \psi'_p}.
\end{aligned}\]
\end{enumerate}
\end{dfn}
We first show that the auxiliary Kloosterman sums are well-defined.
\begin{prp}\label{prp:Friedberg1.3}
\cite[Proposition 1.3]{Friedberg1987} Let $G = \operatorname{Sp}\rb{2r, \mathbb{Q}_p}$, $n \in N\rb{\mathbb{Q}_p}$, and $x \in X(n)$, with Bruhat decomposition $x = b_1 n b_2$, with $b_1, b_2 \in U\rb{\mathbb{Q}_p}$. Let $\psi, \psi'$ be characters of $U\rb{\mathbb{Q}_p}$ which are trivial on $U\rb{\mathbb{Z}_p}$. Then the quantity $\psi\rb{b_1} \psi'\rb{b_2}$ is well-defined as a function on $X(n)$ if $\psi\rb{nun^{-1}} = \psi'\rb{u}$ for $u \in \ol U_n\rb{\mathbb{Q}_p}$.
\end{prp}
\begin{proof}
Suppose $\psi\rb{nun^{-1}} = \psi'\rb{u}$ for all $u \in \ol U_n\rb{\mathbb{Q}_p}$. Let $x = b_1 n b_2 = b'_1 n b'_2$ be two Bruhat decompositions. This says $b'_1 = \gamma b_1$ for some $\gamma \in U(\mathbb{Z}_p)$, and $b'_2 = b_2 \delta$ for some $\delta \in U_n\rb{\mathbb{Z}_p}$. Then we have
\[\begin{aligned}
U\rb{\mathbb{Z}_p} b_1 n b_2 \delta^{-1} = U\rb{\mathbb{Z}_p} b_1 n b_2,
\end{aligned}\]
which implies $b_2 {b'_2}^{-1} = b_2 \delta^{-1} b_2^{-1} \in \ol U_n\rb{\mathbb{Q}_p}$. Now, from the equivalence of Bruhat decompositions, we deduce that
\[\begin{aligned}
U\rb{\mathbb{Z}_p} n b_2 {b'_2}^{-1} n^{-1} U_n\rb{\mathbb{Z}_p} = U\rb{\mathbb{Z}_p} b_1^{-1} b'_1 U_n\rb{\mathbb{Z}_p},
\end{aligned}\]
which implies $\psi' \rb{b_2{b'_2}^{-1}} = \psi \rb{nb_2{b'_2}^{-1} n^{-1}} = \psi\rb{b_1^{-1} b'_1}$.
\end{proof}
\begin{prp}\label{prp:auxKl}
If $\underline{\operatorname{Kl}}_p\rb{n, \psi_p, \psi'_p}$ is well-defined, then $\underline{\operatorname{Kl}}_p\rb{n, \psi_p, \psi'_p} = \operatorname{Kl}_p\rb{n, \psi_p, \psi'_p}$.
\end{prp}
\begin{proof}
Trivial.
\end{proof}
Let $G = \operatorname{Sp}\rb{4, \mathbb{Q}_p}$, and $\psi = \psi_{m_1, m_2}$, $\psi' = \psi_{n_1, n_2}$. We give a table of conditions for auxiliary $\operatorname{Sp}(4)$ Kloosterman sums $\underline{\operatorname{Kl}}_p\rb{n_{w, r, s}, \psi, \psi'}$ to be well-defined.
\[\begin{aligned}
\begin{array}{|c|c|c|c|}
\hline
w & {\text{Well-definedness conditions}} & w & {\text{Well-definedness conditions}}\\
\hline
\operatorname{id} & m_1=n_1, m_2=n_2 & s_\beta s_\alpha & m_1=n_2=0\\
\hline
s_\alpha & m_2=n_2=0 & s_\alpha s_\beta s_\alpha & n_2 = m_2 p^{2r-2s}\\
\hline
s_\beta & m_1=n_1=0 & s_\beta s_\alpha s_\beta & n_1 = m_1 p^{s-2r}\\
\hline
s_\alpha s_\beta & m_2=n_1=0 & w_0 & -\\
\hline
\end{array}
\end{aligned}\]
\begin{rmk}
From this table, we see that not all Kloosterman sums $\operatorname{Kl}_p\rb{n, \psi, \psi'}$ correspond to a well-defined auxiliary Kloosterman sum $\underline{\operatorname{Kl}}_p\rb{n, \psi, \psi'}$.
\end{rmk}
The Fourier coefficients $P_{\psi, \psi'} (g)$ can be evaluated using the following theorem of Friedberg:
\begin{thm}\label{thm:FriedbergThmA}
\cite[Theorem A]{Friedberg1987} The Fourier coefficient $P_{\psi, \psi'} (g)$ of $\operatorname{Sp}(2r)$ Poincar\'e series is given by
\[\begin{aligned}
P_{\psi, \psi'} (g) = \sum\limits_{\substack{n \in N\rb{\mathbb{Q}}\\ w(n) = w}} \underline{\operatorname{Kl}}\rb{n, \psi, \psi'} \int_{U_w\rb{\mathbb{R}}} \mathcal F_\psi\rb{n u_1 y} \ol{\psi'} \rb{u_1} du_1.
\end{aligned}\]
\end{thm}
\begin{rmk}
In \cite{Friedberg1987}, the statement concerns $\operatorname{GL}(r)$ Poincar\'e series, but the proof also works for $\operatorname{Sp}(2r)$ Poincar\'e series.
\end{rmk}
\subsection{$\operatorname{Sp}(4)$ Poincar\'e series} \label{section:Sp4Poincare}
Let $P_0$ be the standard minimal parabolic subgroup of $G = \operatorname{Sp}(4)$. For $w\in W$, let $G_w = UwDU$, $\Gamma_w = U(\mathbb{Z}) \cap w^{-1} U(\mathbb{Z})^T w$, and $R_w$ be a complete set of coset representatives for $P_0 \cap \Gamma \backslash \Gamma \cap G_w / \Gamma_w$. Define
\[\begin{aligned}
\mathcal N_w = \cbm{n \in N(\mathbb{R})}{\exists \gamma \in R_w \text{ such that } \gamma = b_1 n b_2 \text{ for } b_1, b_2 \in U(\mathbb{R})}.
\end{aligned}\]
For $\psi = \psi_{m_1, m_2}$, and $u_1, u_2 \in \mathbb{R}$, we denote the exponential $\operatorname{e}\rb{m_1 u_1 + m_2 u_2}$ by $\psi\rb{u_1, u_2}$.
Now we compute the Fourier coefficients $P_{\psi, \psi'} (g)$ for $P_\psi (g)$, making use of \Cref{thm:FriedbergThmA}.
\begin{enumerate}[label=(\roman*)]
\item For $w=\operatorname{id}$, we have $n = I$, and the integral just gives $F\rb{y_1, y_2}$. Hence
\[\begin{aligned}
_{\operatorname{id}} P_{\psi, \psi'} = \underline{\operatorname{Kl}}\rb{I, \psi, \psi'} F\rb{y_1, y_2}.
\end{aligned}\]
\item For $w = s_\alpha$, we have
\[\begin{aligned}
\mathcal N_{s_\alpha} &= \cbm{\begin{pmatrix} &1/v_4\\ -v_4\\ &&&v_4\\ &&-1/v_4\end{pmatrix}}{v_4 \geq 1}, & U_{s_\alpha}(\mathbb{R}) &= \cbm{\begin{pmatrix} 1 & u_1\\ &1\\ &&1\\&&-u_1&1\end{pmatrix}}{u_1\in\mathbb{R}}.
\end{aligned}\]
Meanwhile, through Iwasawa decomposition, we obtain that
\[\begin{aligned}
&\int_{U_{s_\alpha}(\mathbb{R})} \mathcal F_\psi \rb{nu_1y} \ol{\psi'}\rb{u_1} du_1\\
= &\int_\mathbb{R} \psi\rb{-\frac{u_1y_2^2}{v_4\sqrt{s_1^2 y_2^2+y_1^2}}, 0} F\rb{\frac{y_1y_2}{v_4\sqrt{u_1^2y_2^2+y_1^2}}, v_4\sqrt{u_1^2y_2^2+y_1^2}} \ol{\psi'}\rb{u_1, 0} du_1.
\end{aligned}\]
Hence,
{\small
\[\begin{aligned}
_{s_\alpha} P_{\psi, \psi'} (g) = &\sum\limits_{v_4\geq 1} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} &1/v_4\\ -v_4\\ &&&v_4\\ &&-1/v_4\end{pmatrix}, \psi, \psi'} \int_\mathbb{R} \psi\rb{-\frac{u_1y_2^2}{v_4\sqrt{s_1^2 y_2^2+y_1^2}}, 0}\\
&F\rb{\frac{y_1y_2}{v_4\sqrt{u_1^2y_2^2+y_1^2}}, v_4\sqrt{u_1^2y_2^2+y_1^2}} \ol{\psi'}\rb{u_1, 0} du_1.
\end{aligned}\]
}
\item For $w = s_\beta$, we have
\[\begin{aligned}
\mathcal N_{s_\beta} &= \cbm{\begin{pmatrix} 1\\ &&&1/v_{23}\\ &&1\\ &-v_{23}\end{pmatrix}}{v_{23} \geq 1}, & U_{s_\beta}(\mathbb{R}) &= \cbm{\begin{pmatrix} 1\\ &1&&u_5\\ &&1\\&&&1\end{pmatrix}}{u_1\in\mathbb{R}}.
\end{aligned}\]
Hence,
{\small
\[\begin{aligned}
_{s_\beta} P_{\psi, \psi'} (g) = &\sum\limits_{v_{23}\geq 1} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} 1\\ &&&1/v_{23}\\ &&1\\ &-v_{23}\end{pmatrix}, \psi, \psi'} \int_\mathbb{R} \psi\rb{0, -\frac{u_5}{v_{23}y_2\sqrt{y_2^4 + u_5^2}}}\\
&F\rb{y_1, \frac{y_2}{v_{23}\sqrt{y_2^4+u_5^2}}} \ol{\psi'}\rb{0, u_5} du_5.
\end{aligned}\]
}
\item For $w = s_\alpha s_\beta$, we have
{\small
\[\begin{aligned}
\mathcal N_{s_\alpha s_\beta} &= \cbm{\begin{pmatrix} &&&-1/v_2\\ v_2/v_{23}\\ & v_2\\ &&v_{23}/v_2\end{pmatrix}}{\begin{array}{l} v_2, v_{23}\geq 1\\ v_{23} \mid v_2\end{array}}, & U_{s_\alpha s_\beta}(\mathbb{R}) &= \cbm{\begin{pmatrix} 1 &&& u_4\\ &1 &u_4 & u_5\\ &&1\\&&&1\end{pmatrix}}{u_4, u_5\in \mathbb{R}}.
\end{aligned}\]
}
Hence,
{\small
\[\begin{aligned}
_{s_\alpha s_\beta} P_{\psi, \psi'} (g) = &\sum\limits_{v_2 \geq 1} \sum\limits_{v_{23} \mid v_2} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} &&&-1/v_2\\ v_2/v_{23}\\ & v_2\\ &&v_{23}/v_2\end{pmatrix}, \psi, \psi'}\\
&\int_\mathbb{R} \int_\mathbb{R} \psi\rb{-\frac{u_4 y_2^2}{v_2 \sqrt{\eta \rb{y_2^4+u_5^2}}}, -\frac{v_2u_4^2u_5}{v_{23}\sqrt{\eta\rb{y_2^4+u_5^2}}}} F\rb{\frac{y_1y_2}{v_2\sqrt{\eta}}, \frac{v_2}{v_{23}} \sqrt{\frac{\eta}{y_2^4+u_5^2}}} \ol{\psi'}\rb{0, u_5} du_4 du_5,
\end{aligned}\]
}where $\eta = y_1^2y_2^4+u_5^2y_1^2+u_4^2y_2^2$.
\item For $w = s_\beta s_\alpha$, we have
{\small
\[\begin{aligned}
\mathcal N_{s_\beta s_\alpha} &= \cbm{ \begin{pmatrix} & 1/v_4\\ && v_4/v_{14}\\ &&&v_4\\ -v_{14}/v_4\end{pmatrix}}{\begin{array}{l} v_4, v_{14}\geq 1\\ v_4^2 \mid v_{14}\end{array}}, & U_{s_\beta s_\alpha}(\mathbb{R}) &= \cbm{\begin{pmatrix} 1 & u_1 & u_2\\ & 1\\ &&1\\ &&-u_1 & 1\end{pmatrix}}{u_1, u_2\in \mathbb{R}}.
\end{aligned}\]
}
Hence,
{\small
\[\begin{aligned}
_{s_\beta s_\alpha} P_{\psi, \psi'} (g) = &\sum\limits_{v_{14}\geq 1} \sum\limits_{v_4^2 \mid v_{14}} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} & 1/v_4\\ && v_4/v_{14}\\ &&&v_4\\ -v_{14}/v_4\end{pmatrix}, \psi, \psi'}\\
& \int_\mathbb{R} \int_\mathbb{R} \psi\rb{-\frac{u_1u_2y_2^2}{v_4\sqrt{\eta \rb{u_1^2y_2^2+y_1^2}}}, -\frac{v_4 u_2}{v_{14} \sqrt{\eta \rb{u_1^2y_2^2+y_1^2}}}} F\rb{\frac{y_1y_2}{v_4\sqrt{u_1^2y_2^2+y_1^2}}, \frac{v_4}{v_{14}} \sqrt{\frac{u_1^2y_2^2+y_1^2}{\eta}}}\\
&\ol{\psi'}\rb{u_1, 0} du_1 du_2,
\end{aligned}\]
}where $\eta = \rb{u_1^2y_2^2+y_1^2}^2 + u_2^2$.
\item For $w = s_\alpha s_\beta s_\alpha$, we have
{\small
\[\begin{aligned}
\mathcal N_{s_\alpha s_\beta s_\alpha} &= \cbm{\begin{pmatrix} &&-1/v_1\\ &v_1/v_{14}\\ v_1\\ &&&v_{14}/v_1\end{pmatrix}}{\begin{array}{l} v_1, v_{14}\geq 1\\ v_{14} \mid v_1^2\end{array}}, & U_{s_\alpha s_\beta s_\alpha} (\mathbb{R}) = \cbm{\begin{pmatrix} 1 & u_1 & u_2 & u_4\\ &1&u_4\\ &&1\\ &&-u_1&1\end{pmatrix}}{u_i \in \mathbb{R}}.
\end{aligned}\]
}
Hence,
{\small
\[\begin{aligned}
_{s_\alpha s_\beta s_\alpha} P_{\psi, \psi'} (g) = &\sum\limits_{v_1 \geq 1} \sum\limits_{v_{14} \mid v_1^2} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} &&-1/v_1\\ &v_1/v_{14}\\ v_1\\ &&&v_{14}/v_1\end{pmatrix}, \psi, \psi'}\\
& \int_\mathbb{R} \int_\mathbb{R} \int_\mathbb{R} \psi\rb{\frac{u_1 u_2 y_2^2 - u_4 y_1^2}{v_1 \sqrt{\eta_1 \eta_2}}, \frac{v_1 \eta_3}{v_{14} \sqrt{\eta_1 \eta_2}}} F\rb{\frac{y_1y_2}{v_1 \sqrt{\eta_2}}, \frac{v_1}{v_{14}} \sqrt{\frac{\eta_2}{\eta_1}}}\\
&\ol{\psi'} \rb{u_1, 0} du_1 du_2 du_4,
\end{aligned}\]
}
where
\[\begin{aligned}
\eta_1 &= \rb{u_1^2y_2^2+y_1^2}^2 + \rb{u_1u_4+u_2}^2,\\
\eta_2 &= u_1^2y_1^2y_2^4+y_1^4y_2^2+u_4^2y_1^2+u_2^2y_2^2,\\
\eta_3 &= u_1^2u_2y_2^4 - u_1^3 u_4 y_2^4 - 2u_1u_4 y_1^2 y_2^2 - u_1 u_4^3 - u_2 u_4^2.
\end{aligned}\]
\item For $w = s_\beta s_\alpha s_\beta$, we have
{\small
\[\begin{aligned}
\mathcal N_{s_\beta s_\alpha s_\beta} &= \cbm{\begin{pmatrix} &&&-1/v_2\\ &&v_2/v_{12}\\ &v_2\\ -v_{12}/v_2\end{pmatrix}}{\begin{array}{l} v_{12}, v_2 \geq 1\\ v_2 \mid v_{12}\end{array}}, & U_{s_\beta s_\alpha s_\beta} (\mathbb{R}) &= \cbm{\begin{pmatrix} 1 && u_2 & u_4\\ &1&u_4&u_5\\ &&1\\&&&1\end{pmatrix}}{u_i\in\mathbb{R}}.
\end{aligned}\]
}
Hence,
{\small
\[\begin{aligned}
_{s_\beta s_\alpha s_\beta} P_{\psi, \psi'} (g) = &\sum\limits_{v_{12}\geq 1} \sum\limits_{v_2 \mid v_{12}} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} &&&-1/v_2\\ &&v_2/v_{12}\\ &v_2\\ -v_{12}/v_2\end{pmatrix}, \psi, \psi'}\\
&\int_\mathbb{R} \int_\mathbb{R} \int_\mathbb{R} \psi\rb{\frac{\eta_3}{v_2\sqrt{\eta_1 \eta_2}}, \frac{v_2 \eta_4}{v_{12}\sqrt{\eta_1 \eta_2}}} F\rb{\frac{y_1y_2}{v_2\sqrt{\eta_1}}, \frac{v_2}{v_{12}} \sqrt{\frac{\eta_1}{\eta_2}}}\\
&\ol{\psi'} \rb{0, u_5} du_2 du_4 du_5,
\end{aligned}\]
}where
\[\begin{aligned}
\eta_1 &= y_1^2y_2^4 + u_5^2y_1^2 + u_4^2y_2^2,\\
\eta_2 &= y_1^4y_2^4 + u_5^2y_1^4+2u_4^2y_1^2y_2^2+u_2^2y_2^4+u_4^4-2u_2u_4^2u_5+u_2^2u_5^2,\\
\eta_3 &= u_4u_5y_1^2 + u_2u_4y_2^2,\\
\eta_4 &= u_4^2u_5 - u_2y_2^4 - u_2u_5^2.
\end{aligned}\]
\item For $w = s_\alpha s_\beta s_\alpha s_\beta$, we have
{\small
\[\begin{aligned}
\mathcal N_{s_\alpha s_\beta s_\alpha s_\beta} &= \cbm{\begin{pmatrix} &&-1/v_1\\ &&&-v_1/v_{12}\\ v_1\\ & v_{12}/v_1\end{pmatrix}}{v_1, v_{12}\geq 1}, & U_{s_\alpha s_\beta s_\alpha s_\beta}(\mathbb{R}) = U(\mathbb{R}).
\end{aligned}\]
}Hence,
{\small
\[\begin{aligned}
_{s_\alpha s_\beta s_\alpha s_\beta} P_{\psi, \psi'} (g) = &\sum\limits_{v_1\geq 1} \sum\limits_{v_{12}\geq 1} \underline{\operatorname{Kl}}\rb{\begin{pmatrix} &&-1/v_1\\ &&&-v_1/v_{12}\\ v_1\\ & v_{12}/v_1\end{pmatrix}, \psi, \psi'}\\
&\int_\mathbb{R} \int_\mathbb{R} \int_\mathbb{R} \int_\mathbb{R} \psi\rb{-\frac{\eta_3}{v_1\sqrt{\eta_1 \eta_2}}, \frac{v_1 \eta_4}{v_{12}\sqrt{\eta_1 \eta_2}}} F\rb{\frac{y_1y_2}{v_1\sqrt{\eta_2}}, \frac{v_1}{v_{12}} \sqrt{\frac{\eta_2}{\eta_1}}}\\
&\ol{\psi'} \rb{u_1, u_5} du_1 du_2 du_4 du_5,
\end{aligned}\]
}where
{\small
\[\begin{aligned}
\eta_1 = &u_1^2u_4^2y_2^4 + y_1^4y_2^4 - 2u_1u_2u_4y_2^4 + u_1^2u_4^2u_5^2 + u_5^2y_1^4 + 2u_4^2y_1^2y_2^2 + u_2^2y_2^4 + 2u_1u_4^3u_5 - 2u_1u_2u_4u_5^2\\
\eta_2 = &u_1^2y_1^2y_2^4 + u_1^2u_5^2y_1^2 + y_1^4y_2^2 + 2u_1u_4u_5y_1^2 + u_4^2y_1^2 + u_2^2y_2^2 + u_4^4 - 2u_2u_4^2u_5 + u_2^2u_5^2,\\
\eta_3 = &u_1y_1^2y_2^4 + u_1u_5^2y_1^2 + u_4u_5y_1^2 + u_2u_4y_2^2,\\
\eta_4 = &u_1^3u_4y_2^4 - u_1^2u_2y_2^4 + u_1^3u_4u_5^2 + 2u_1u_4y_1^2y_2^2 + 2u_1^2u_4^2u_5 - u_1^2u_2u_5^2 - u_5y_1^4 + u_1u_4^3 + u_2u_4^2 - u_2^2u_5.
\end{aligned}\]
}
\end{enumerate}
|
1,314,259,995,018 | arxiv | \section{Introduction}
In a recent analysis of the pair creation of magnetically charged black
holes by a magnetic field in Einstein--Maxwell theory, it was shown that
the creation rate is enhanced by a factor of $\exp(\S_{\scriptscriptstyle BH})$,
where $\S_{\scriptscriptstyle BH}$ is the black hole entropy, relative to the pair
creation rate for GUT monopoles \cite{GGS}. This result is important
because it provides a clue to the problem of the origin of black hole
entropy. In particular, it is consistent with the view that black holes
have $\exp(\S_{\scriptscriptstyle BH})$ internal or horizon quantum states.
In this article the pair creation of non--extreme black holes (with
horizons identified) is considered in a general setting. The result is
always the same---the pair creation rate is enhanced by a factor of
$\exp(\S_{\scriptscriptstyle BH})$ relative to the creation rate for a pair of
compact matter configurations (stars). This result holds for {\it any\/}
generally covariant theory of gravitational and matter fields that can be
expressed in Hamiltonian form. The enhancement in the black hole creation
rate is derived solely from the formal mathematical framework in which
the pair creation rate and the density of quantum states are expressed
as path integrals.
The enhancement in the black hole creation rate applies, in particular,
to the creation of electrically charged black holes
by an electric field in Einstein--Maxwell theory. This result has
been anticipated \cite{GS,DGKT}:
Since the creation rate for magnetically charged black holes is enhanced by
the factor $\exp(\S_{\scriptscriptstyle BH})$, by duality of the electromagnetic field
the creation rate for electrically charged black holes should be enhanced
by the same factor. Although this argument is correct physically,
the details of the
calculation for the electric case are not entirely obvious. The
apparent difficulty stems from the use of instanton
methods in which the leading order approximation to the creation
rate is related to the action of a classical solution, the
instanton. For the case of magnetically charged black holes and
magnetic fields \cite{GGS,GS,DGKT,Gibbons,Ross,DGGH,Yi,HHR}, the
instanton is obtained by the familiar substitution of $-it$ for $t$ in
the magnetic Ernst solution. The resulting instanton consists of a real
Euclidean metric and a real electromagnetic vector potential. On the
other hand, for the case of electrically charged black holes and electric
fields, substitution of $-it$ for $t$ in the electric Ernst solution
yields an instanton that consists of a real Euclidean metric and an
{\it imaginary\/} electromagnetic scalar potential. As shown here,
this result is correct and leads to the expected pair creation rate for
electrically charged black holes.
The appearance of an imaginary scalar potential is
familiar from the path integral construction of the partition function
for an electrically charged black hole \cite{BBWY,CPW}. If the black hole
is rotating, the shift vector for the instanton is imaginary as well
\cite{BMY,BY,CECS}. In general, instantons are stationary solutions
with the following properties: all fields that appear in the Hamiltonian
as Lagrange multipliers are imaginary, and the canonical
variables are real. These are the essential properties
that allow the instanton solution to match the corresponding
Lorentzian solution along a stationary surface.
The pair creation of electrically charged black holes in Einstein--Maxwell
theory is treated as a concrete example in this article. Thus, I begin in
Sec.~2 with a discussion of the electric Ernst solution and its
relationship,
through electromagnetic duality, to the magnetic Ernst solution. Section 3
contains a discussion of the connection between a general stationary
Lorentzian solution of the Einstein--Maxwell equations of motion and its
associated instanton. The instanton for the electric Ernst solution
is displayed explicitly. In Sec.~4, the pair creation rate for
electrically charged black holes in Einstein--Maxwell theory is
computed relative to the pair creation rate
for electrically charged stars. The analysis is generalized in Sec.~5 to
apply to any generally covariant theory of gravitational and matter fields.
The generalization requires a careful comparison of the formal path integral
derivations of black hole pair creation and black hole entropy.
It should be emphasized that the results of this paper rely on the
instanton approximation. Thus, the existence is assumed of instanton
solutions that describe the creation of black holes and compact matter
distributions, in the appropriate physical contexts. On the other hand,
it is not necessary that these classical solutions be known. Typically
they are not. (An exception is the Ernst solution in Einstein--Maxwell
theory.) The central result---the enhancement of black hole pair creation
by the factor $\exp(\S_{\scriptscriptstyle BH})$---does not depend on the details of
the theory or the details of the instanton solutions.
\section{Duality and the Electric Ernst Solution}
Let $\epsilon_{{\mu\nu}\rho\sigma}$ denote the totally antisymmetric
tensor (volume element) with $\epsilon_{0123} = \sqrt{-g}$. The
dual of the electromagnetic field $F_{{\mu\nu}}$ is defined by
\begin{equation}
\hbox{${}^{\rm *}$\kern-1.5pt $F$}^{{\mu\nu}} = (1/2) \epsilon^{{\mu\nu}\rho\sigma} F_{\rho\sigma}
\Longleftrightarrow F^{{\mu\nu}} = -(1/2) \epsilon^{{\mu\nu}\rho\sigma}
\hbox{${}^{\rm *}$\kern-1.5pt $F$}_{\rho\sigma} \ .\eqnum{1}
\end{equation}
The electric and magnetic fields are
\begin{equation}
E^\mu = F^{{\mu\nu}} U_\nu \ ,\qquad B^\mu = -\hbox{${}^{\rm *}$\kern-1.5pt $F$}^{{\mu\nu}} U_\nu
\ , \eqnum{2}
\end{equation}
respectively, where $U^\mu$ is the unit normal vector field of a
family of spacelike
hypersurfaces. The electromagnetic stress tensor can be
written either in terms of $F_{{\mu\nu}}$ or $\hbox{${}^{\rm *}$\kern-1.5pt $F$}_{{\mu\nu}}$:
\begin{eqnarray}
4\pi T_{\mu\nu} & = & F_{\mu\alpha} {F_\nu}^{\alpha} -
(1/4) g_{\mu\nu} F_{\alpha\beta}F^{\alpha\beta} \eqnum{3a}\\
& = & \hbox{${}^{\rm *}$\kern-1.5pt $F$}_{\mu\alpha} {\hbox{${}^{\rm *}$\kern-1.5pt $F$}_\nu}^{\alpha} -
(1/4) g_{\mu\nu} \hbox{${}^{\rm *}$\kern-1.5pt $F$}_{\alpha\beta}\hbox{${}^{\rm *}$\kern-1.5pt $F$}^{\alpha\beta}
\ .\eqnum{3b}
\end{eqnarray}
The classical equations of motion are the Einstein equations
$G_{{\mu\nu}} = 8\pi T_{{\mu\nu}}$ (with Newton's constant equal to 1)
and the Maxwell equations $d{\bbox{F}} = 0$ and
$d{\hbox{${}^{\rm *}$\kern-1.5pt ${\bbox{F}}$}} = 0$. The
electromagnetic field $F_{\mu\nu}$ and its dual $\hbox{${}^{\rm *}$\kern-1.5pt $F$}_{\mu\nu}$ play a
symmetric role. If \{$g_{\mu\nu}$, $F_{\mu\nu}$\} = \{${\tilde g}_{\mu\nu}$,
${\tilde F}_{\mu\nu}$\} is a solution of the Einstein--Maxwell equations,
then \{$g_{\mu\nu}$, $F_{\mu\nu}$\} = \{${\tilde g}_{\mu\nu}$, $-\hbox{${}^{\rm *}$\kern-1.5pt $\tilde F$}_{\mu\nu}$\}
is also a solution. Here, the symbols ${\tilde g}_{\mu\nu}$ and
${\tilde F}_{\mu\nu}$ refer to specific tensors. Thus,
\{$g_{\mu\nu}$, $F_{\mu\nu}$\} = \{${\tilde g}_{\mu\nu}$, ${\tilde F}_{\mu\nu}$\}
implies that the electromagnetic field $F_{\mu\nu}$ is given by the
tensor ${\tilde F}_{\mu\nu}$, whereas \{$g_{\mu\nu}$, $F_{\mu\nu}$\} =
\{${\tilde g}_{\mu\nu}$, $-\hbox{${}^{\rm *}$\kern-1.5pt $\tilde F$}_{\mu\nu}$\} implies that the
electromagnetic field $F_{\mu\nu}$ is given by the tensor
$-\hbox{${}^{\rm *}$\kern-1.5pt $\tilde F$}_{\mu\nu}$ (which is minus the dual of ${\tilde F}_{\mu\nu}$).
According to the definitions (2), the electric and magnetic fields
for the solution \{${\tilde g}_{\mu\nu}$, $-\hbox{${}^{\rm *}$\kern-1.5pt $\tilde F$}_{\mu\nu}$\} are
${\tilde B}^\mu$ and $-{\tilde E}^\mu$, respectively, where
${\tilde E}^\mu$ and ${\tilde B}^\mu$ are the electric and magnetic
fields for the solution \{${\tilde g}_{\mu\nu}$, ${\tilde F}_{\mu\nu}$\}.
The Ernst solution \cite{Ernst} describes a pair of oppositely
charged black holes accelerating apart in an electric or
magnetic field. The electric and magnetic cases are related
by duality as described above. In both cases the metric for the
spacetime region containing one black hole is
\begin{equation}
{\tilde g}_{{\mu\nu}}dx^\mu dx^\nu =\frac{\Lambda^2}{A^2(x-y)^2}
\Bigl[ G(y) dt^2 - G^{-1}(y) dy^2 + G^{-1}(x) dx^2 \Bigr]
+ \frac{G(x)}{A^2(x-y)^2\Lambda^2} d\varphi^2 \ ,\eqnum{4}
\end{equation}
where
\begin{eqnarray}
G(\xi) & = & (1+r_- A\xi)(1-\xi^2 - r_+ A\xi^3) \ ,\eqnum{5a}\\
\Lambda(x,y) & = & (1 + Bqx/2)^2 + \frac{B^2 G(x)}{4A^2(x-y)^2}
\ ,\eqnum{5b}
\end{eqnarray}
and $q^2 = r_+ r_-$. For the magnetic case, the electromagnetic
field is ${\tilde F}_{\mu\nu} = \partial_\mu\MAtilde_\nu -
\partial_\nu\MAtilde_\mu$ where
\begin{equation}
{\MAtilde}_\varphi = -\frac{2}{B\Lambda} (1 + Bqx/2) + k
\ .\eqnum{6}
\end{equation}
For the electric case, the electromagnetic field is
$-\hbox{${}^{\rm *}$\kern-1.5pt $\tilde F$}_{\mu\nu} = \partial_\mu\EAtilde_\nu -
\partial_\nu\EAtilde_\mu$ where
\begin{eqnarray}
{\EAtilde}_t & = & - \frac{B}{2A^2} \biggl[ \frac{G(y)}{(x-y)^2}
(1 + Bqx - Bqy/2) + (1+r_- Ay)(1+r_+ Ay) (1-Bqy/2)\biggr]
\nonumber\\
& & + qy + k \ .\eqnum{7}
\end{eqnarray}
The magnetic Ernst solution is \{${\tilde g}_{\mu\nu}$, $\MAtilde_\mu$\}
and the electric Ernst solution is \{${\tilde g}_{\mu\nu}$, $\EAtilde_\mu$\}.
For both the electric and magnetic Ernst solutions, certain
restrictions must be imposed on the parameters $r_-$, $r_+$,
$A$, and $B$ \cite{GGS,GS,DGKT,Gibbons,Ross,DGGH,Yi,HHR}.
In particular, assume $r_+ A < 2/(3\sqrt{3})$ so that
the three roots of the cubic factor in $G(\xi)$ are real. For
non--extreme black holes, the smallest of these roots, $\xi_2$,
obeys $\xi_2 > -1/(r_-A)$. The angular coordinate $x$ is restricted
to $\xi_3 \leq x \leq \xi_4$, where $\xi_3$ and $\xi_4$ are the
two larger roots of the cubic factor in $G(\xi)$. The poles
$x=\xi_3$ and $x =\xi_4$ are free from conical singularities
if $G'(\xi_3) \Lambda(\xi_3)^2 = -G'(\xi_4) \Lambda(\xi_4)^2$
and the period in $\varphi$ is $4\pi \Lambda(\xi_3)^2/G'(\xi_3)$.
(Note, $\Lambda(\xi_3) = \Lambda(\xi_3,y)$ is independent of
$y$, and similarly for $\Lambda(\xi_4)$.) The black hole event
horizon is the null surface $y=\xi_2$, and the acceleration
horizon is the null surface $y=\xi_3$.
For the electric Ernst
solution the magnitude of the electric field on the
axis $x=\xi_3$ at spatial infinity ($y\to\xi_3$) is
$B G'(\xi_3)/(2 \Lambda(\xi_3)^{3/2})$. The magnitude of the electric
charge of the black hole is
\begin{equation}
\frac{1}{4\pi} \oint {\hbox{${}^{\rm *}$\kern-1.5pt $d\EAtildebold$}}
= \frac{q(\xi_4 - \xi_3) \Lambda(\xi_3)^{3/2}}
{G'(\xi_3) (1 + Bq\xi_4/2)} \ .\eqnum{8}
\end{equation}
The electric Ernst solution coincides with the electric Melvin
solution \cite{Melvin} at spatial infinity, and also in the limit of
vanishing $r_-$ and $r_+$. The metric for the electric Melvin solution
is the same as that for the magnetic Melvin solution, while the
electromagnetic field is determined by the scalar potential
${\EAtilde}_t = B z$. (The notation is that of
Ref.~\cite{GGS}, so here $B$ is the value of the
electric field on the $z$--axis).
\section{Instanton Solutions}
The instanton that enters the calculation of the creation rate for
electrically charged black holes is obtained by the substitution
$t\to -it$ in the electric Ernst solution \{${\tilde g}_{\mu\nu}$,
$\EAtilde_\mu$\}. It is useful to adopt a general notation and to
consider this step from a Hamiltonian point of view.
First, recall that the metric tensor and electromagnetic potential
can be split in space and time according to
\begin{eqnarray}
ds^2 & = & -(N\,dt)^2 + h_{ij} (dx^i + V^i dt)(dx^j + V^j dt)
\ ,\eqnum{9a}\\
\bbox{A} & = & -\Phi dt + A_i (dx^i + V^i dt) \ .\eqnum{9b}
\end{eqnarray}
Here, $N$ is the lapse function, $V^i$ is the shift vector,
$h_{ij}$ is the spatial metric, $\Phi = -A_t + V^i A_i$ is the
scalar potential, and $A_i$ is the vector potential. The canonical
coordinates are $h_{ij}$ and $A_i$, and the canonically conjugate
momenta are
\begin{eqnarray}
P^{ij} & = & -\frac{\sqrt{h}}{32\pi N} (h^{ij} h^{k\ell} -
h^{ik} h^{j\ell}) ({\dot h}_{k\ell} -
2 D_{{\scriptscriptstyle (}k} V_{\ell{\scriptscriptstyle )}}) \ ,\eqnum{10a}\\
{\cal E}^i & = & \frac{\sqrt{h}}{4\pi N} h^{ij} \Bigl( {\dot A}_j +
\partial_j(\Phi - V^k A_k) +
2 V^k \partial_{{\scriptscriptstyle [}j} A_{k{\scriptscriptstyle ]}}\Bigr) \ .\eqnum{10b}
\end{eqnarray}
In Eq.~(10a), $D_k$ denotes the covariant derivative in space.
The lapse $N$, shift $V^i$,
and scalar potential $\Phi$ appear in the Hamiltonian formalism
as Lagrange multipliers for the Hamiltonian, momentum, and
Gauss's law constraints, respectively.
Let \{${\tilde g}_{\mu\nu}$, ${\tilde A}_\mu$\} denote any stationary
real Lorentzian solution of the Einstein--Maxwell equations,
written in stationary coordinates. In terms of the space--time
split (9), this solution is
\{$N$, $V$, $h$, $\Phi$, $A$\} =
\{$\tilde N$, ${\tilde V}$, ${\tilde h}$, ${\tilde\Phi}$,
${\tilde A}$\}, where the fields $\tilde N$, ${\tilde V}^i$,
${\tilde h}_{ij}$, ${\tilde\Phi}$, and ${\tilde A}_i$ are independent
of $t$ and are real. From this Lorentzian solution ({\it i.e.\/},
Eq.~(9) with tildes placed over the fields), the substitution
$t\to -it$ generates another field configuration, namely,
\{$N$, $V$, $h$, $\Phi$, $A$\} =
\{$\bar N$, ${\bar V}$, ${\bar h}$, ${\bar\Phi}$, ${\bar A}$\}, where
\begin{eqnarray}
{\bar N} & = & -i {\tilde N} \ ,\qquad
{\bar V}^i = -i {\tilde V}^i \ , \qquad
{\bar\Phi} = -i {\tilde\Phi} \ , \eqnum{11a}\\
{\bar h}_{ij} & = & {\tilde h}_{ij} \ ,\qquad
{\bar A}_i = {\tilde A}_i \ .\eqnum{11b}
\end{eqnarray}
This is the instanton. Note that the lapse ${\bar N}$, shift
${\bar V}^i$, and scalar potential ${\bar\Phi}$ are imaginary.
If ${\bar V}^i = 0$, the metric for the instanton is real Euclidean;
otherwise the metric is complex.
The instanton is a solution of the Einstein--Maxwell
equations.\footnote{It serves no purpose to introduce the terminology
``Lorentzian equations of motion" and ``Euclidean equations of motion",
since a stationary Lorentzian solution and its associated instanton
satisfy the {\it same\/} equations of motion.} In the
Hamiltonian setting this follows from a few simple observations.
First, according to Eq.~(11b),
the canonical coordinates for the Lorentzian solution and the instanton
coincide. Also, definition (10) shows that the canonical momenta for
the Lorentzian solution are equal to the canonical momenta for the
instanton solution, ${\tilde P}^{ij} = {\bar P}^{ij}$ and
${\tilde{\cal E}}^i = {\bar{\cal E}}^i$. Thus, under the
substitution $t\to -it$, the canonical variables are unchanged and the
Lagrange multipliers are multiplied by the factor $-i$. Now, the
Einstein--Maxwell equations include the Hamiltonian, momentum,
and Gauss's law constraints. The constraints are constructed entirely
from the canonical variables---since they are satisfied for the
Lorentzian solution they are also satisfied for the instanton. The
remaining equations of motion are the evolution equations
${\dot f} = \{f,H\}$. Here, the brackets are Poisson brackets, $f$
denotes any function of the canonical variables, and the Hamiltonian
$H$ is a linear combination of constraints with Lagrange
multipliers as coefficients (plus suitable boundary terms).
For both the Lorentzian solution and
the instanton, the left--hand side ${\dot f}$ vanishes by stationarity.
Then for the Lorentzian solution
the right--hand side $\{f,H\}$ vanishes. The right--hand
side $\{f,H\}$ must vanish for the instanton case as well, since
it just differs from the right--hand side in the Lorentzian case by
an overall factor of $-i$. This shows that the equations of motion
${\dot f} = \{f,H\}$ are satisfied for the instanton configuration.
The stationary Lorentzian solution
\{$N$, $V$, $h$, $\Phi$, $A$\} =
\{$\tilde N$, ${\tilde V}$, ${\tilde h}$, ${\tilde\Phi}$,
${\tilde A}$\} and its associated instanton
\{$N$, $V$, $h$, $\Phi$, $A$\} =
\{$\bar N$, ${\bar V}$, ${\bar h}$, ${\bar\Phi}$,
${\bar A}$\} are characterized by the same canonical data,
including the electric field $E^i = -4\pi {\cal E}^i/\sqrt{h}$.
This is an essential feature of the instanton analysis. It
insures that the Lorentzian and instanton solutions match
along a stationary surface.
Also note that the value of the proper electrostatic potential
as determined by an (Eulerian) observer who is at rest in the
$t={\rm const}$ hypersurfaces, $-A_\mu U^\mu = \Phi/N$, is the
same for the Lorentzian and instanton solutions. Likewise, the
proper velocity of the spatial coordinate system, $V^i/N$, is
the same for the Lorentzian and instanton solutions. In certain
contexts this quantity has a physical meaning. For example,
for the thermodynamical description of a rotating black hole
\cite{BMY,BY} in corotating coordinates, $V^\phi/N$ is the
angular velocity of the black hole with respect to the
Eulerian observers.
The electric Ernst instanton solution is
\begin{eqnarray}
{\bar g}_{{\mu\nu}}dx^\mu dx^\nu & = & \frac{-\Lambda^2}{A^2(x-y)^2}
\Bigl[ G(y) dt^2 + G^{-1}(y) dy^2 - G^{-1}(x) dx^2 \Bigr]
+ \frac{G(x)}{A^2(x-y)^2\Lambda^2} d\varphi^2 \ ,\eqnum{12a}\\
{\EAbar}_t & = & \frac{iB}{2A^2} \biggl[ \frac{G(y)}{(x-y)^2}
(1 + Bqx - Bqy/2) + (1+r_- Ay)(1+r_+ Ay) (1-Bqy/2)\biggr]
\nonumber\\
& & -iqy -ik \ .\eqnum{12b}
\end{eqnarray}
The metric (12a) is real Euclidean since the shift vector for
the Ernst solution vanishes. The metric is regular for
$\xi_2 \leq y \leq \xi_3$ if $G'(\xi_2) = -G'(\xi_3)$ and if the
time coordinate is periodic with period
$4\pi/G'(\xi_3)$ \cite{GGS,GS,DGKT,Gibbons,Ross,DGGH,Yi,HHR}.
The vector field $\EAbar_\mu$ is
regular if $\EAbar_t$ vanishes at both $y=\xi_2$ and $y=\xi_3$.
These conditions are satisfied if $\EAbar_\mu$ is defined
separately in open neighborhoods of $y=\xi_2$ and $y=\xi_3$,
and in each of these neighborhoods the constant $k$ of
Eq.~(12b) is chosen appropriately.
Topologically, the Ernst instanton can be viewed as $\hbox{$I$\kern-3.8pt $R$}^4$
with the interior of a ``tube" $S^1\times S^2$ removed, and
points along the $S^1$ direction identified. This
two--dimensional surface is $y=\xi_2$, and is referred to below
as the Euclidean wormhole. The acceleration horizon of the
Lorentzian Ernst solution corresponds to the two--dimensional
surface $y = \xi_3$ of the instanton solution. For the instanton,
the wormhole $y=\xi_2$ surrounds the surface $y=\xi_3$.
\section{Black Hole Pair Creation in Einstein--Maxwell Theory}
In the path integral for Einstein--Maxwell theory, each history
\{$g_{\mu\nu}$, $A_\mu$\} enters with a weight $\exp(\S)$ where
\begin{equation}
\S[g_{\mu\nu},A_\mu] = \frac{i}{16\pi} \int_{{\cal M}} d^4x \sqrt{-g}
(R -F_{\mu\nu} F^{\mu\nu}) + (\hbox{boundary terms}) \ .\eqnum{13}
\end{equation}
I will refer to $\S$ as the action.
The path integral is ultimately defined as a
sum over either Lorentzian metrics, or Euclidean metrics, or some
other class of metrics. For the purpose of computing the leading
order (exponential) contribution to the path integral this issue is
not important. In particular, the instanton can be viewed as a stationary
point in a sum over real $N$, $V$, $h$, $\Phi$, and $A$
that lies off the axis of integration. Alternatively, one can
rotate the integration contours for $N$, $V$, and $\Phi$ in the
complex plane so that the instanton lies on the axis of integration.
The boundary terms in $\S$ depend on the boundary conditions that are
appropriate for the problem at hand. Here, the pair creation rate for
electrically charged black holes is computed relative to the pair
creation rate for electrically charged stars. In the
instanton approximation this is given by the exponential of $\S$ for the
electric Ernst instanton (eEi) divided by the exponential of $\S$ for
the charged star instanton (csi). Thus, all that is required is the
difference $\S[{\rm eEi}] - \S[{\rm csi}]$. I will assume that the
stars are compact, and that the matter, charge, and stress inside
the stars are distributed in such a way that the gravitational and
electromagnetic fields outside the stars coincide with the fields of
the electric Ernst solution exterior to a pair of closed surfaces that
surround the black holes. Then the instanton for the charged star is
topologically $\hbox{$I$\kern-3.8pt $R$}^4$ and coincides with the electric Ernst instanton
everywhere except in the interior of a ``tube" $S^1\times S^2$ that
encompasses the matter (for the charged star instanton) or wormhole
(for the Ernst instanton).
In this case the boundary terms that appear in Eq.~(13) cancel in the
calculation of $\S[{\rm eEi}] - \S[{\rm csi}]$.
The calculation $\S[{\rm eEi}] - \S[{\rm csi}]$ is easily carried
out using the Hamiltonian approach with the electric Ernst and
charged star instantons foliated along the surfaces of constant
stationary time $t$. The Hamiltonian form of the action for
Einstein--Maxwell theory is
\begin{equation}
\S = i \int dt\, d^3x \Bigl( P^{ij} {\dot h}_{ij} + {\cal E}^i {\dot A}_i
- N\H - V^i\H_i + \Phi{\cal G} \Bigr) + (\hbox{boundary terms})
\ ,\eqnum{14}
\end{equation}
where $\H$, $\H_i$, and ${\cal G}$ are the Hamiltonian, momentum, and
Gauss's law constraints. For the theory that describes the charged
star solution, the matter fields contribute extra ``$p\dot q$" terms
to $\S$ and also contribute terms to the constraints. (The matter
fields might also contribute boundary terms at infinity. These will
vanish for the charged star instanton since the matter distribution
is compact.) The boundary terms in Eq.~(14) include the boundary
terms at infinity from Eq.~(13) and also boundary terms that
arise from total derivatives and integrations by parts in the
space--time decomposition.\footnote{For Einstein gravity, a
systematic derivation of the Hamiltonian action (14) from the
covariant action (13), including boundary terms, was given in
Ref.~\cite{BY}. Electromagnetic, Yang--Mills, and other matter
fields can be incorporated into that derivation in a
straightforward manner.} The surfaces $t={\rm const}$
extend from infinity to the ``acceleration horizon"
two--surface $y=\xi_3$, which serves as a boundary for the
three--dimensional hypersurfaces. The boundary terms in Eq.~(14)
include boundary terms at $y=\xi_3$. These terms, like the boundary
terms at infinity, cancel in the calculation of the difference
$\S[{\rm eEi}] - \S[{\rm csi}]$.
For the electric Ernst instanton, but not for the charged star
instanton, the hypersurfaces $t={\rm const}$ intersect at the
Euclidean wormhole. This two--dimensional surface constitutes
an inner boundary $b$ of topology $S^2$ for the
hypersurfaces. In passing from the Lagrangian form (13)
to the Hamiltonian form (14) of $\S$, the various total derivatives
and integrations by parts introduce boundary terms at $b$. These
boundary terms can be derived by cutting out a small region
surrounding the wormhole, then taking the limit as the excised
region vanishes. In this way an inner boundary $B$ of topology
$S^2\times S^1$ is introduced into the Ernst instanton manifold.
Under the simplifying assumption that the (outward pointing) unit
normal vector field $n^\mu$ of $B$ lies in the $t={\rm const}$
hypersurfaces, the boundary terms are \cite{BMY,BY,CECS}
\begin{equation}
\S\bigr|_{B} = -i\int dt\int_{b} d^2x \sqrt{\sigma}
\Bigl[ (n^i \partial_i N)/(8\pi) + V^i n^j(2P_{ij} +
A_i{\cal E}_j)/\sqrt{h} - \Phi n_i{\cal E}^i/\sqrt{h} \Bigr] \ .\eqnum{15}
\end{equation}
Here, $\sigma$ denotes the determinant of the metric on $b$.
With $\S$ written in Hamiltonian form (14), all boundary terms
except those displayed in Eq.~(15) cancel in the calculation
$\S[{\rm eEi}] - \S[{\rm csi}]$. Furthermore, for both the
electric Ernst instanton and the charged star instanton, the
(four--dimensional) volume integral terms in $\S$ vanish---the
``$p\dot q$" terms vanish by stationarity and the remaining
terms vanish because the constraints hold. Therefore
$\S[{\rm eEi}] - \S[{\rm csi}]$ is equal to the boundary term
(15) evaluated at the electric Ernst instanton. Recall that for
the Ernst instanton the shift vector $V^i = {\bar V}^i$ is zero
and by regularity the scalar potential
$\Phi = -\EAbar_t$ vanishes at the wormhole $b$. Thus
only the first term in Eq.~(15) is nonzero. Setting
$N={\bar N} = -i{\tilde N}$, we have
\begin{equation}
\S[{\rm eEi}] - \S[{\rm csi}] =
-\int dt\int_{b} d^2x \sqrt{\sigma}
(n^i\partial_i {\tilde N})/(8\pi) \ .\eqnum{16}
\end{equation}
At each point of $b$, the quantity $-\int dt(n^i\partial_i
{\tilde N})$ is the rate of change of proper circumference
with respect to proper radius for the circular trajectories
of $\partial/\partial t$ in the neighborhood of $B$.
(The minus sign appears because the
normal $n^\mu$ points in the direction of decreasing radius.)
By regularity of the metric this equals $2\pi$. Therefore
Eq.~(16) becomes
\begin{equation}
\S[{\rm eEi}] - \S[{\rm csi}] = A_{\scriptscriptstyle BH}/4 \ ,\eqnum{17}
\end{equation}
where $A_{\scriptscriptstyle BH}$ is the area $\int_{b} d^2x \sqrt{\sigma}$
of the wormhole. In turn, $A_{\scriptscriptstyle BH}$ equals the horizon
area of each black hole in the Lorentzian Ernst solution.
Equation (17) shows that, in the instanton approximation, the
pair creation rate for electrically charged black holes is
enhanced by a factor of $\exp(A_{\scriptscriptstyle BH}/4)$ relative to the
pair creation rate for electrically charged stars.
Note that, with the calculation organized as
above, the detailed forms of the electric Ernst solution and the
charged star solution are not needed. Thus, the result (17)
shows that the pair creation rate for black holes in
Einstein--Maxwell theory is always enhanced by the factor
$\exp(A_{\scriptscriptstyle BH}/4)$ relative to the pair creation rate for
matter distributions.
\section{Black Hole Pair Creation in General}
The inner boundary terms (15) that yield the black hole entropy factor
for pair creation in Einstein--Maxwell theory are precisely the same
terms that yield the black hole entropy in the path integral analysis
of the partition functions \cite{BMY,BY,CECS}. The partition functions
are obtained from the density of states $\nu$ by Laplace transforms---for
example, the grand canonical partition function is obtained from
$\nu$ by a Laplace transform that replaces energy with inverse temperature
as the independent thermodynamical variable.
For the purpose of deriving the entropy it is most convenient to work
directly with the density of states $\nu$. The entropy is given
by the logarithm of $\nu$.
The density of states is a function of the
thermodynamical extensive variables such as energy, angular momentum,
electric charge, {\it etc\/}. Expressed as a path integral, $\nu$ is
a sum over all fields that fit inside an outer boundary (the
periodically identified history of a ``box") of topology
$S^2\times S^1$. The extensive variables are fixed as boundary
conditions on this outer boundary \cite{BY}. Consider the action for
such a path integral, for any generally covariant theory of
gravitational and matter fields. For the moment, let the manifold
${\cal M}$ have topology ${\cal M}=\Sigma\times S^1$, where space $\Sigma$ has
boundary $\partial\Sigma = S^2$ and ${\cal M}$ has boundary
$\partial{\cal M} = S^2\times S^1$. I will assume that the action
can be written in Hamiltonian form
\begin{equation}
\S[\lambda,q,p] = i \int dt\, d^3x \Bigl( p_a {\dot q}^a
- \lambda^{\scriptscriptstyle A}{\cal C}_{\scriptscriptstyle A} \Bigr) + ({\hbox{boundary
terms}}) \ .\eqnum{18}
\end{equation}
Notice that the volume integral contribution to the
Hamiltonian is written as a linear combination of constraints
${\cal C}_{\scriptscriptstyle A}({q},{p})$ with Lagrange multipliers $\lambda^{\scriptscriptstyle A}$.
This form for the Hamiltonian follows from general covariance and the
property that under reparametrizations in $t$ the canonical
variables $ p_a$ and $ q^a$ transform as scalars and the Lagrange
multipliers transform as scalar densities \cite{HandT}. I will assume
that these conditions hold. It also follows that the boundary terms
in (18) cannot depend solely on the canonical
variables---each term, expressed as an integral over $\partial{\cal M}$, must
include a Lagrange multiplier as a factor in its integrand in order to
transform properly under reparametrizations in $t$.
The boundary terms in the action (18) must
be correlated with the boundary conditions on $\partial{\cal M}$ in such a
way that the boundary terms in the
variation $\delta\S$ vanish. There are two types of boundary terms
in $\delta\S$, namely, those that arise from variation of the boundary terms
in $\S$, and those that arise from integration by parts.
Integration by
parts occurs when the constraints ${\cal C}_{\scriptscriptstyle A}$ contain spatial
derivatives
of the canonical variables. Thus, the boundary terms in $\delta\S$ that
arise through integration by parts necessarily involve variations of the
canonical variables. On the other hand, the explicit boundary terms in $\S$,
upon variation, generate boundary terms in $\delta\S$ that involve
variations of the Lagrange multipliers. These boundary terms
can never be canceled by the boundary terms that come from integration by
parts.
Consequently, if the action $\S$ includes
any explicit boundary terms, then $\delta\S$ will include boundary
terms that involve variations of quantities that depend on the Lagrange
multipliers.
Because the
boundary terms in $\delta\S$ must vanish by virtue of the boundary
conditions,
we see that the boundary data for this action will include fixation
of quantities that depend on the Lagrange multipliers.
Armed with these observations it follows that the action appropriate
for the path integral representation of the density of states $\nu$
(the ``microcanonical action" \cite{BY}) is given by Eq.~(18) with
{\it no boundary terms\/}. Here is the reason:
The density of states is a function of the thermodynamical extensive
variables which, by definition, are properties of the
states of the system. These variables appear at the classical level as
functions of the canonical variables $p_a$ and $q^a$. Thus, the path
integral for $\nu$ must come from an action in which the fixed boundary
data are functions only of the canonical variables---by the arguments above,
such an action has no explicit boundary terms.
Now consider a stationary Lorentzian black hole solution
\{${\tilde\lambda}^{\scriptscriptstyle A}$,
${\tilde q}^a$, ${\tilde p}_a$\} of the classical equations
of motion. The black hole's entropy is
found by evaluating the path integral for the density
of states $\nu$, with the boundary data fixed to those values that
characterize
the black hole. The path integral is given approximately by its integrand
$\exp(\S[\lambda,q,p])$ evaluated at a complex black hole extremum
\{${\bar\lambda}^{\scriptscriptstyle A}$, ${\bar q}^a$, ${\bar p}_a$\}.
The complex black hole is obtained from the Lorentzian black hole
by the relations
\begin{equation}
{\bar\lambda}^{\scriptscriptstyle A} = -i{\tilde\lambda}^{\scriptscriptstyle A} \ ,\qquad
{\bar q}^a = {\tilde q}^a \ ,\qquad
{\bar p}_a = {\tilde p}_a \ .\eqnum{19}
\end{equation}
The complex black hole satisfies the boundary conditions, since the
boundary conditions involve only the canonical variables.
The fact that the complex black hole is a solution of the classical equations
of motion follows
from an obvious generalization of the arguments given in Sec.~3.
Alternatively, observe that
\{${\bar\lambda}^{\scriptscriptstyle A}$, ${\bar q}^a$, ${\bar p}_a$\} is obtained from
\{${\tilde\lambda}^{\scriptscriptstyle A}$, ${\tilde q}^a$, ${\tilde p}_a$\}
by a reparametrization $t\to -it$, where $p_a$ and $q^a$ transform as
scalars and $\lambda^{\scriptscriptstyle A}$ transforms as a scalar density.
The path integral constructed from the action (18) with no boundary
terms yields the contribution to $\nu$ from the topological sector
$\Sigma\times S^1$. However, the complex black hole is an extremum
of the action on a manifold with topology
${\cal M} = S^2\times\hbox{$I$\kern-3.8pt $R$}^2$ and boundary $\partial{\cal M} = S^2\times S^1$. Thus,
the action
(18) with no boundary terms is not correct as it stands for use in the
approximation $\exp(\S[{\bar\lambda},{\bar q},{\bar p}])$ to the
density of states. It is, however, correct with regard to
the lack of boundary terms at $\partial{\cal M}$. The action to be
used in the approximate evaluation of $\nu$ can be found by starting from
(18), with no boundary terms, and writing this action in manifestly
covariant (Lagrangian) form. The manifold can then be
chosen to have topology $S^2\times\hbox{$I$\kern-3.8pt $R$}^2$. The resulting Lagrangian action
can be written in Hamiltonian form
if a small region surrounding the center of the disk $\hbox{$I$\kern-3.8pt $R$}^2$, where the
hypersurfaces intersect, is removed. This introduces an inner boundary
$B=S^2\times S^1$ in ${\cal M}$. In passing to the Hamiltonian form of the
action, total derivatives and integrations by parts will generate
boundary terms at the inner boundary $B$.
There will be no boundary terms at the outer boundary, however, since
none were present in the original action.
{}From the discussions above it follows that the black hole entropy is
approximated by $\S_{\scriptscriptstyle BH} \approx
\S[{\bar\lambda},{\bar q},{\bar p}]$, and
the action $\S[\lambda,q,p]$ has the
form (18) where {\it only inner boundary terms are present\/}.
In evaluating this action at the complex solution
\{${\bar\lambda}^{\scriptscriptstyle A}$, ${\bar q}^a$, ${\bar p}_a$\},
the limit is taken in which the excised region vanishes.
Since $\partial{\bar q}/\partial t=0$ (by stationarity) and
${\cal C}_{\scriptscriptstyle A}(\bar q,\bar p) = 0$, the only nonzero contribution to
the entropy $\S_{\scriptscriptstyle BH} \approx \S[{\bar\lambda},{\bar q},{\bar p}]$
comes from the inner boundary terms.\footnote{It should be emphasized
that the method discussed here, which generalizes the analysis of
Refs.~\cite{BMY,BY,CECS}, can be used to derive an explicit expression
for black hole entropy for any generally covariant theory of
gravitational and matter fields that can be placed in Hamiltonian form.
This method shows that the entropy $\S_{\scriptscriptstyle BH} \approx
\S[{\bar\lambda},{\bar q},{\bar p}]$ is a ``geometrical" quantity
(constructed from the gravitational and matter fields)
defined locally at the black hole horizon. The local, geometrical
character of black hole entropy has been examined in detail in
Ref.~\cite{Wald} using Noether charge techniques.} Note that the
resulting entropy is real, since each inner boundary term must
contain a Lagrange multiplier factor (in order to transform properly
under reparametrizations) and the Lagrange multipliers
${\bar\lambda}^{\scriptscriptstyle A}$ are imaginary.
The inner boundary terms that yield the black hole entropy coincide with
the inner boundary terms that yield the relative enhancement
factor for black hole pair creation. This is not
difficult to see. Recall from the example of black hole pair creation in
Einstein--Maxwell theory that the action for the instanton that describes
black hole pair creation, when written
in Hamiltonian form, includes boundary terms at infinity as
well as boundary terms at the acceleration and black hole horizons.
The action for the instanton that describes pair creation of matter
distributions (stars) contains the same boundary terms at infinity and
at the acceleration horizon, but of course no horizon boundary terms.
The volume integral contributions to the Hamiltonian actions for both the
black hole instanton and the star instanton vanish because the instantons
are stationary and satisfy the constraints. Thus, in taking the difference
between the
actions for the black hole instanton and the star instanton, only the
inner boundary terms from the black hole horizon survive. Those inner
boundary terms are derived by the same analysis as the inner boundary
terms for black hole entropy. Namely,
they arise when the Lagrangian action is expressed in Hamiltonian
form in the presence of the boundary $B$ of a small excised region
around the black hole event horizon where the hypersurfaces
intersect.
The enhancement factor for black hole pair creation is obtained
by evaluating the inner boundary terms at the black hole instanton
solution, while the entropy of a black hole is obtained by evaluating
the {\it same\/} inner boundary terms at the complex black hole
solution. But the black hole instanton is related to a real Lorentzian
solution, which represents a physical black hole pair, by the
substitution $t\to -it$. This relationship agrees precisely
with the relationship between either of the two physical black holes and
the complex solution that yields its entropy. We are therefore led to the
main conclusion that the enhancement in the pair creation rate for black
holes is given by the factor $\exp(\S_{\scriptscriptstyle BH})$ for any
generally covariant theory of gravitational and matter fields.
\section{Acknowledgments}
I would like to thank G.~T.~Horowitz and R.~M.~Wald for helpful remarks,
and J.~W.~York for helpful discussions and comments on the manuscript.
|
1,314,259,995,019 | arxiv | \section{Introduction: Quantum Gravitational String Phenomenology}
The recent flurry of activity, largely triggered by \cite{delaFuente:2014aca,Rudelius:2015xta,Montero:2015ofa}, in constraining phenomenological string model building using Quantum Gravity swampland criteria \cite{Vafa:2005ui,Ooguri:2006in,ArkaniHamed:2006dz,Ooguri:2016pdq,Freivogel:2016qwc,Obied:2018sgi,Harlow:2018jwu,Harlow:2018tng} (see \cite{Brennan:2017rbf} for a recent review) is giving birth to an emerging field, which can deservedly claim the designation of \textbf{Quantum Gravitational String Phenomenology}.
The application of constraints convincingly argued to hold in any theory of Quantum Gravity is leading to new breakthroughs. In particular, the Weak Gravity Conjecture (WGC) \cite{ArkaniHamed:2006dz} (see \cite{Cheung:2014vva,delaFuente:2014aca,Rudelius:2015xta,Montero:2015ofa,Brown:2015iha,Brown:2015lia,Heidenreich:2015wga,Hebecker:2015rya,Bachlechner:2015qja,Junghans:2015hba,Ibanez:2015fcv,Hebecker:2015zss,Heidenreich:2016aqi,Montero:2016tif,Ibanez:2017oqr,Gonzalo:2018tpb,Gonzalo:2018dxi} for different formulations and applications) has motivated the remarkable statement that stable non-supersymmetric Anti de Sitter (AdS) vacua are not possible in Quantum Gravity \cite{Ooguri:2016pdq,Freivogel:2016qwc}. This AdS-WGC constraint is largely motivated by the application of the refined WGC to systems of branes in the near horizon limit, and has received direct support from the study of decays of non-supersymmetric AdS vacua in string theory via bubbles of nothing \cite{Ooguri:2017njy}. The AdS-WGC has been argued to have far-reaching implications for particle physics and its scales \cite{Ibanez:2017oqr,Gonzalo:2018tpb,Gonzalo:2018dxi}.
There are also recent proposals of swampland criteria attempting to rule out de Sitter vacua as well \cite{Obied:2018sgi,Garg:2018reu,Ooguri:2018wrx}, possibly in certain regimes under parametric control. This claim clashes with familiar roadmaps for the construction of de Sitter vacua in string theory \cite{Kachru:2003aw,Balasubramanian:2005zx}, see \cite{Cicoli:2018kdo,Kachru:2018aqn} for recent discussion. A key ingredient in the parametric control of these scenarios is the presence of warped throats \cite{Klebanov:2000hb,Giddings:2001yu} at whose bottom the supersymmetry breaking sectors are localized, so that they undergo a redshift crucial for the tunability of the 4d vacuum energy. Starting from the original proposal of supersymmetry breaking by anti-D3-branes \cite{Kachru:2003aw}, there is a rich variety of proposals, see e.g. \cite{Burgess:2003ic,Kallosh:2015nia,Retolaza:2015nvh}. Hence, it is interesting to explore the interplay of non-supersymmetric warped throats with constraints from Quantum Gravity.
In this paper we consider non-compact warped throats and constrain these 5d backgrounds by proposing a new swampland conjecture, the {\em local AdS-WGC}, which generalizes the AdS-WGC to locally AdS warped throats. The conjecture is motivated by considering the near horizon limit of systems of fractional D-branes at singularities, but should hold more in general. Although it does not constrain metastable non-supersymmetric throats, hence has no direct implication for e.g. anti-D3-brane models, it can be used to rule out large classes of warped throats with supersymmetry breaking sectors at their bottom. We study this phenomenon in several explicit examples, shedding new light on already known instabilities in supersymmetry breaking D-brane models, such as the dP$_1$ theory, and unveiling novel decay channels in AdS or locally AdS backgrounds. For instance, we explicitly discuss warped throats with supersymmetry broken by the introduction of anti-orientifold planes.
A remarkable feature of these examples is that the non-supersymmetric backgrounds are stable at the classical level, and that the pathologies arise at the quantum level, often by nucleation of bubbles hosting interiors of more stable vacua. This is consistent with the interpretation of these constraints as arising from consistency in Quantum Gravity.
The paper is organized as follows: In Section \ref{sec:dimer-intro} we review systems of D-branes at singularities and fractional branes using the powerful toolkit of dimer diagrams. In Section \ref{sec:local-ads-wgc} we propose the local AdS-WGC criterion; we derive it in section \ref{sec:derivation-deformed}, and use it in section \ref{sec:deformation-dsb} to reinterpret the properties of supersymmetric and non-supersymmetric warped throats dual to fractional D3-branes in toric singularities. In section \ref{sec:metastable} we discuss the situation for throats with meta-stable supersymmetry breaking. In Section \ref{sec:dsb-throat} we consider an illustrative example of a system of D3-branes with Dynamical Supersymmetry Breaking due to strong dynamics and consider its embedding into warped throats. The D-brane gauge theory is discussed in section \ref{sec:dsb-gauge}, and in section \ref{sec:dsb-ads} we describe the instabilities that arise when embedded into AdS or locally AdS warped throats, in agreement with the (local) AdS-WGC implications for non-supersymmetric throats; in section \ref{sec:ntwo} we describe the local AdS-WGC statement in an explicit example illustrating how it applies to non-supersymmetric throats from ${\cal N}=2$ fractional branes. Section \ref{sec:anti-oplanes} treats warped throats with supersymmetry broken by the presence of anti-orientifold-planes. In section \ref{sec:anti-oplanes-throats} we discuss generalities about such throats. In section \ref{sec:anti-oplanes-o3} we focus on anti-O3-planes, describe their different kinds and their interaction with systems of D3-branes. In section \ref{sec:anti-oplanes-o3-throats} we discuss the corresponding gravitational backgrounds and describe their instabilities, in agreement with the (local) AdS-WGC statement.
Finally, in Section \ref{sec:conclu} we give our conclusions.
\section{Review of dimers and fractional branes}
\label{sec:dimer-intro}
Here we briefly review some ingredients of the dimer diagram description of D3-branes at singularities. The initiated reader is welcome to skip it and jump into the next sections.
The gauge theories on D3-branes at toric CY threefold singularities are nicely encoded in a combinatorial graph known as dimer diagram \cite{Franco:2005rj,Franco:2005sm} (see also \cite{Kennaway:2007tq,Yamazaki:2008bt} and references therein). They are (bipartite) graph tilings of ${\bf T}^2$, or equivalently infinite periodic graphs in ${\bf R}^2$. Their faces correspond to gauge factors, edges represent bi-fundamental chiral multiplets (oriented e.g. clockwise around black nodes, and counterclockwise around white nodes), and nodes represent superpotential couplings (with sign determined by the node color). As an illustration, the diagram for the conifold is shown in figure \ref{fig:conifold-dimer2}(a).
The corresponding gauge theory \cite{Klebanov:1998hh} has gauge group $U(n_1)\times U(n_2)$, bi-fundamental chiral multiplets in two copies of the representation $(~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2)+(~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_1,~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_2)$, denoted by $A_i$, $B_i$, $i=1,2$, and a superpotential $W=\epsilon_{ik}\epsilon_{jl} A_iB_jA_kB_l$.
The geometric information about the CY singularity is encoded in simple combinatorial objects in the dimer, whose discussion we skip, directing the interested reader to the references. We just mention that the geometries are encoded in web diagrams, which specify the fibration structure of the corresponding toric geometry. The web diagram can be obtained by constructing the zig-zag paths in the dimer (these are paths constructed out of sequences of edges which turn maximally left at black nodes and maximally right at white nodes) and translating the non-trivial $(p,q)$ windings of the path on the two non-trivial 1-cycles in ${\bf T}^2$ into the $(p,q)$ labels of external legs in the web diagram. The web diagram for the conifold is shown in figure \ref{fig:conifold-dimer2}(b).
%
\begin{figure}[htb]
\begin{center}
\includegraphics[scale=.4]{conifold-dimer2}
\caption{\small (a) Dimer diagram for the theory of D-branes at a conifold. The dashed line is the unit cell in the periodic array. (b) Web diagram of the conifold. We have displayed it with a finite size ${\bf S}^2$ (middle segment) for clarity; the actual singularity arises when this ${\bf S}^2$ is blown-down.}
\label{fig:conifold-dimer2}
\end{center}
\end{figure}
The choice of ranks $n_i$ in the gauge groups of the dimer theories is arbitrary, but constrained by cancellation of RR tadpoles. These are equivalent to cancellation of non-abelian gauge anomalies (understood as formally imposed for all gauge factors, even those of possible empty faces). These conditions also guarantee the cancellation of mixed $U(1)$ anomalies thanks to Green-Schwarz couplings. There are in fact topological $BF$ couplings with RR 2-forms making all $U(1)$ factors massive (even the non-anomalous ones, see \cite{Ibanez:1998qp}). Supersymmetry of the configuration implies that blow-up modes couple as (field dependent) FI terms to the D3-branes. Although these $U(1)$'s are massive, it still makes sense to discuss them if the corresponding couplings to localized closed string modes are taken into account.
The choice of all ranks equal $n_i\equiv N$ for all $i$ is always allowed, and corresponds to D3-branes which can move off the singularity, as signaled by corresponding flat directions in the D3-brane gauge theory. This in fact underlies the way in which the dimer encodes the CY threefold geometry, as the moduli space of a single such D-brane. These D3-branes are referred to as dynamical, or regular (since, for orbifold singularities, they are associated to the regular representation of the orbifold group \cite{Douglas:1996sw}).
Other rank assignments consistent with the RR tadpole constraints are known as fractional branes. They can be regarded as D5-branes wrapped on 2-cycles (collapsed at the singularity) such that their dual 4-cycle is non-compact. This allows the RR charge carried by the D-branes to escape to infinity. These can always be written as combinations of certain basis of fractional branes, which fall into different classes, as described in \cite{Franco:2005zu}, as follows:
\smallskip
$\bullet$ The so-called ${\cal N}=2$ fractional branes correspond to an overall increase of ranks in a subset of faces bounded by zig-zag paths associated to the same $(p,q)$ 1-cycle in the dimer ${\bf T}^2$. They are associated to parallel external legs in the web diagram, or equivalently to curves of ${\bf C}^2/{\bf Z}_k$ singularities sticking out of the singularity at the origin. The gauge theory on these fractional D3-branes has a flat direction, parametrized by the meson obtained by concatenation of bifundamentals joining the faces bounded by the zig-zag paths in the dimer. The flat direction describes the possibility of moving the fractional D-brane off the origin along the curve of singularities, to become a fractional brane of ${\bf C}^2/{\bf Z}_k$, namely a D5-brane wrapped on one of the collapsed 2-cycles of the orbifold singularity. The gauge theory on this branch is the ${\cal N}=2$ $A_{k-1}$ quiver gauge theory \cite{Douglas:1996sw}, hence the name. An example of ${\cal N}=2$ fractional brane is shown in Figure \ref{Nequalstwo}.
\begin{figure}[!htp]
\begin{center}
\includegraphics[scale=0.3]{Nequalstwo}
\caption{a) Dimer diagram showing an ${\cal N}=2$ fractional brane in the PdP$_4$ theory; b) Web diagram, displaying the corresponding mobile $\bf P_1$ as a red discontinuous segment.}
\label{Nequalstwo}
\end{center}
\end{figure}
\smallskip
$\bullet$ The so-called deformation branes are associated to complex deformations of the CY threefold singularity. They are associated to splittings of the web diagram into sub-webs in equilibrium. The rank assignment corresponds to an overall increase of ranks in the subset of faces bounded by the splitting. Namely, the homological sum of the zig-zag paths associated to the sub-web removed (in a given complex deformation, the two sub-webs give the same result, due to the condition that the total sum of $(p,q)$ charges for external legs is zero). They correspond to checkerboard pictures on the dimer. The complex deformation of the geometry has a field theory counterpart, in which the gauge theory on the fractional branes confines and has a complex deformed moduli space. The resulting gauge theories are associated to the two sub-webs \cite{Franco:2005fd,GarciaEtxebarria:2006aq}. An example of a deformation fractional brane is shown in Figure \ref{deformation-brane}.
\begin{figure}[!htp]
\begin{center}
\includegraphics[scale=0.4]{deformation-brane}
\caption{a) Dimer diagram showing a deformation fractional brane in the dP$_3$ theory; b) Web diagram, and its splitting into subwebs in equilibrium, with the finite size ${\bf S}^3$ displayed as a red discontinuous segment suspended between the subwebs.}
\label{deformation-brane}
\end{center}
\end{figure}
The gauge theory arising from a set of $N$ regular D3-branes and $M$ (deformation) fractional branes, leads to RG flows with a sequence of Seiberg duality cascades, along which the overall number of D3-branes $N$ is reduced in multiples of $M$, and the number $M$ of D5-branes remains fixed. The gravity dual corresponds to a warped throat supported by
RR 3-form fluxes on the 3-cycles associated to the complex deformed singularity, and NSNS flux in the dual (non-compact) 3-cycle. Their combination $G_3=F_3-\tau H_3$ is an ISD 3-form of type $(2,1)$, thus preserving supersymmetry \cite{Becker:1996gj,Dasgupta:1999ss,Grana:2000jj}. The throat is locally similar to AdS$_5\times {\bf X}_5$, but with logarithmic changes in the cosmological constant and the RR 5-form flux along the radial direction.
The simplest example is the conifold, studied exhaustively in \cite{Klebanov:2000hb} both from the viewpoints of field theory and of its gravity dual warped throat. The generalization of duality cascades in gauge theories associated with fractional branes in more general singularities has been studied in \cite{Franco:2004jz,Franco:2005fd}. We will consider the gravity dual of deformation branes in general singularities in section \ref{sec:derivation-deformed}.
$\bullet$ The last class corresponds to the remaining kind of fractional branes. Their corresponding rank assignments on faces have no correspondence with a set of zig-zag paths defining a sub-web in equilibrium. Therefore, there is no geometric complex deformation of the singularity associated to them. Indeed, contrary to deformation fractional branes, their infrared dynamics involves non-abelian gauge dynamics (even for the minimal such fractional brane) and results in the absence of a supersymmetric vacuum (hence they were dubbed DSB branes in \cite{Franco:2005zu}, see also \cite{Berenstein:2005xa,Bertolini:2005di}). On the other hand, similarly to deformation fractional branes, they can trigger duality cascades in the presence of $N$ regular D3-branes, which define some warped throats (albeit with naked singularities in the infrared region) \cite{Franco:2004jz}. The discussion of the infrared dynamics, supersymmetry breaking, and its implications for the gravity dual and the deformed AdS-WGC are discussed in section \ref{sec:derivation-deformed}
\medskip
In this paper we will also exploit systems of D-branes at orientifolds of toric singularities. They can be usefully encoded in suitable modifications of dimer diagrams. The general description was provided in \cite{Franco:2007ii}, and corresponds to modding out the dimer diagram by a ${\bf Z}_2$ involution. There are two kinds of orientifold quotients, classified by their fixed sets being lines or points. Two such orientifolds of the conifold theory are shown in figure \ref{fig:coni-orientifold}. It is easy to construct other examples, see later and \cite{Franco:2007ii}.
\begin{figure}[htb]
\begin{center}
\includegraphics[scale=.4]{coni-orientifold}
\caption{\small Dimer diagrams for orientifolds of the conifold with fixed points (a) or fixed lines (b).}
\label{fig:coni-orientifold}
\end{center}
\end{figure}
In the following we will mainly focus on models with orientifold fixed points in the dimer. For this class, the rules are as follows (see \cite{Franco:2007ii} for detailed derivations). Each orientifold point carries a $\pm$ sign, with the constraint that the number of orientifold planes with the same sign is even (resp. odd) for dimers with number of nodes given by $4k$ (resp. $4k+2$). Orientifold points with charge $+$ (resp $-$) in the middle of a dimer face project down the corresponding gauge factor to $SO(n_a)$ (resp. $USp(n_a)$ ). Orientifold points with charge $+$ (resp. $-$) in the middle of a dimer edge project down the corresponding bifundamental onto the two-index symmetic (resp. antisymmetric) representation. Finally, faces and edges not mapped to themselves by the orientifold, combine with their images and descend to $U(n_a)$ gauge factors and bi-fundamental matter multiplets in the orientifold theory.
\section{The local AdS-WGC swampland criterion}
\label{sec:local-ads-wgc}
\subsection{Derivation}
\label{sec:derivation-deformed}
The WGC \cite{ArkaniHamed:2006dz}, in its minimal formulation establishes that in any theory including quantum gravity, any $U(1)$ gauge factor should have a super-extremal charged particle, namely $q\geq m$, in natural units. This has been generalized to other $p$-form gauge fields, requiring the existence of the corresponding branes with tensions bounded by their charges, $Q\geq T$, an extension natural in string theory models via T-duality.
The proposal in \cite{Ooguri:2016pdq} of a {\em refined} WGC establishes that the inequality is saturated only for BPS states in supersymmetric theories. This further motivates the {\em AdS-WGC} statement that theories of quantum gravity do not have stable non-supersymmetric AdS vacua, which are thus in the swampland, rather than the string landscape. The AdS-WGC is largely motivated by a particular (but large) class of AdS backgrounds in string theory, which correspond to flux compactifications arising as near horizon limits of systems of D-branes. A prototypical example is the type IIB AdS$_5\times {\bf S}^5$ solution with $N$ units of RR 5-form flux on the ${\bf S}^5$, which arises as the near horizon limit of a system of $N$ D3-branes in flat 10d spacetime \cite{Maldacena:1997re}.
In short, the $T=Q$ condition is crucial in the structure of these vacua, in which the tension creating the spacetime curvature is balanced against the flux sourced by the brane charge in the underlying picture. This proposal is further supported by the study of instabilities of non-supersymmetric AdS vacua due to bubbles of nothing \cite{Ooguri:2017njy}. The AdS-WGC is a powerful statement, which e.g. has subsequently been applied to derive novel constraints on particle physics \cite{Ibanez:2017kvh,Ibanez:2017oqr,Gonzalo:2018tpb,Gonzalo:2018dxi}.
In this paper we propose a generalization of the conjecture, which we dub the {\em local AdS-WGC}. It states that certain warped throats backgrounds, which are AdS locally in the radial direction but have a slow variation of the local 5d value of the cosmological constant, are not consistent in quantum gravity, except for supersymmetric cases. The precise formulation will be manifest from the derivation below.
The derivation follows the strategy of \cite{Ooguri:2016pdq} for AdS fluxed backgrounds, by taking a near horizon limit of D-brane systems. In our case, we apply the near horizon description to systems of regular and fractional D3-branes at singularities, in particular the toric CY singularities of section \ref{sec:dimer-intro}. We note that the discussion below also applies to throats from ${\cal N}=2$ fractional branes, despite the presence of singularities in the near horizon geometry, if one accounts for the additional fields from the twisted sectors, see \ref{sec:ntwo} for extra details.
The backgrounds correspond to the holographic duals of (the UV regime of) gauge theories with cascading RG flows, like the familiar conifold example. The statements below have well-established translations to the holographic dual gauge theory on the D-branes, but we prefer to emphasize the properties of the gravity side.
Consider a system of $N$ regular and $M$ fractional D3-branes at a toric CY singularity with metric,
\begin{eqnarray}
ds_{{\bf Y}_6}^2 \,=\, dr^2 \, +\, r^2 ds_{{\bf X}_5}^2
\end{eqnarray}
The near horizon geometry is a solution of the kind considered in \cite{Klebanov:2000nc} for the conifold and generalized in \cite{Grana:2000jj,Franco:2004jz}, as a particular class of the supersymmetric warped compactification ansatz in \cite{Dasgupta:1999ss,Giddings:2001yu},
\begin{eqnarray}
ds^2 \,=\, Z(r)^{-1/2}\, \eta_{\mu\nu}\, dx^\mu \,dx^\nu\, +\, Z(r)^{1/2} \, [\, dr^2 \, +\, r^2 ds_{{\bf X}_5}^2\, ].
\end{eqnarray}
One obtains a warped version of the singular manifold, which can be regarded as the 5d horizon ${\bf X}_5$ fibered over the 5d space given by 4d Minkowski space and the radial direction $r$.
There are $M$ units of RR 3-form flux along a non-trivial 3-cycle $\Sigma_3$ (topologically an ${\bf S}^3$ or a Lens space) in ${\bf X}_5$, and a corresponding NSNS 3-form flux, such that the combination (setting the 10d RR axion to zero for simplicity) $G_3=F_3 - \frac i g_s\, H_3$ is a harmonic (2,1)-form, so that the flux is supersymmetric. This $H_3$ flux can be described as a variation in $r$ of the 5d scalar arising from the axion $\phi$ given by the component of the NSNS 2-form $B_2$ along the harmonic 2-form $\omega_2$ Poincar\'{e} dual to $\Sigma_3$ (equivalently, the period of $B_2$ over the 2-cycle $\Sigma_2$ dual to $\Sigma_3$ in ${\bf X}_5$), specifically.
\begin{eqnarray}
H_3\, =\, g_s\, M \, \frac {dr}r\, \wedge \omega_2(\Sigma_3)
\label{scalar-variation}
\end{eqnarray}
The combination of fluxes is a source of the RR 5-form $dF_5=F_3\wedge H_3$, such that its flux $N$ over ${\bf X}_5$ varies logarithmically as
\begin{eqnarray}
N \sim g_s\, M^2 \ln (r/r_0)\,
\label{n-variation}
\end{eqnarray}
where $r_0$ is a cutoff distance. The fluxes also backreact on the geometry, via the warp factor, which obeys
\begin{eqnarray}
(\nabla{^2}_{{\bf Y}} Z)\, {\rm vol} ({\bf Y}_6)\, =\, g_s\, F_3\wedge H_3
\end{eqnarray}
leading to
\begin{eqnarray}
Z(r)\,= \, \, \frac{4 \pi g{_s}^2}{r^4} \, M^2 \, \left( \, \ln \left(\frac{r}{r_0}\right) \, + 1 \,\right)
\label{warp-variation}
\end{eqnarray}
The whole of ${\bf X}_5$ shrinks at $r=0$, but the $F_5$ flux has disappeared by then, so there is no topological obstruction to the shrinking from this side. However, the 3-cycle $\Sigma_3$ in ${\bf X}_5$ also collapses, and it supports the $F_3$ flux, which is constant. This leads to a naked singularity at the tip of the throat.
The 5d part of the above solution describes what we refer to as a {\em local AdS solution}. It corresponds to a background which locally in $r$ is an AdS$_5$ background, but whose AdS curvature changes in $r$, as in (\ref{warp-variation}). This variation is controlled by that of a 5d scalar, which in the earlier flux throat is $\phi=\int_{\Sigma_2} B_2$, changing from (\ref{scalar-variation}). In purely 5d terms, the defining property for this scalar is that (from the 10d topological coupling $F_3\wedge B_2 \wedge F_5$) it has a 5d topological coupling
\begin{eqnarray}
S_{\rm CS} \, =\, M \,\phi\, F_5
\end{eqnarray}
This is the 5d version of the topological couplings \cite{Dvali:2005an,Kaloper:2008fb}, arising in flux compactifications as described in \cite{Marchesano:2014mla,McAllister:2014mpa}. Upon integrating out the non-dynamical $F_5$, the resulting potential for $\phi$ controls the local (in $r$) value of the vacuum energy. The background value for this 5d field, following from (\ref{scalar-variation}) is
\begin{eqnarray}
d\phi\, =\, g_s\, M \, \frac {dr}r
\label{variantion-5d}
\end{eqnarray}
Alternatively, its boundary condition is fixed by the asymptotic behavior
\begin{eqnarray}
\phi \sim M\, \ln (r/r_0)
\end{eqnarray}
The local AdS solution can thus be described as a (in this case, 5d) AdS solution modified by the backreaction of a (5d) scalar $\phi$ with topological coupling to a non-dynamical field strength top-form and obeying (\ref{variantion-5d}). The coupling to the top-form can be replaced by equivalent dual formulations, e.g. the explicit $r$-dependence of the 5d vacuum energy.
The local AdS backgrounds we have described contain a naked singularity at the origin, which in fact is known to admit a smooth deformation (preserving supersymmetry) in certain singularities, starting from the celebrated conifold example \cite{Klebanov:2000hb} and generalized in \cite{Franco:2005fd}. Thus, the local AdS solution should be regarded as defining the asymptotics of certain very general class of warped throats, in principle with or without supersymmetry, and imposing swampland constraints on the possible existence of such throats in quantum gravity. This brings us to the precise formulation of a new swampland conjecture.
\medskip
{\underline {Local AdS-WGC swampland criterion}}:\\
{\em In consistent theory of quantum gravity, there are no stable non-supersymmetric solutions with asymptotics given by local AdS backgrounds, as defined above}.
\subsection{Evidence from deformation and DSB fractional brane systems}
\label{sec:deformation-dsb}
Besides the direct derivation in the spirit of the AdS-WGC, we now present additional support for the local AdS-WGC. Although the following results are known in the literature, their re-interpretation in terms of a swampland constraint is new and provides an interesting insight into the structure of the underlying warped throats and supersymmetry breaking, which we further exploit in later sections.
As mentioned in section \ref{sec:dimer-intro}, there is a large class of local AdS backgrounds arising as holographic duals of (the UV regime of) systems of regular and fractional D3-branes at singularities, specifically, fractional branes of the deformation or DSB kinds (${\cal N}=2$ fractional branes are discussed in section \ref{sec:ntwo}). We discuss their interplay with the local AdS-WGC in turn.
Toric CY singularities admitting a complex deformation can support deformation branes. The gauge theory on their worldvolumes has an UV RG flow whose holographic dual is given by a supersymmetric local AdS background supported by $M$ units of RR flux on the 3-cycle $\Sigma_3$ associated to the complex deformation. Thus the naked singularity at the origin in the local AdS background can be smoothed out by giving this 3-cycle a finite size. The resulting configuration is a smooth supergravity solution described by a warped version of the deformed CY threefold, preserving supersymmetry, and with asymptotics given by a local AdS background; this is thus in agreement with the local AdS-WGC statement. The field theory counterpart of this deformation process was described in \cite{Klebanov:2000hb,Franco:2005fd}.
Toric CY singularities can also support DSB fractional branes which are not associated to complex deformations. Still, the gauge theory on their worldvolume has a UV RG flow whose holographic dual is a supersymmetric local AdS background supported by $M$ units of RR flux on a 3-cycle $\Sigma_3$. The latter, however, cannot be given a finite size while preserving supersymmetry. Naively, one may think that the infrared region is smoothed out to an alternative configuration breaking supersymmetry, either in the form of a supergravity background beyond the warped CY ansatz (in the spirit of e.g. \cite{Butti:2004pk} in the supersymmetric case), or perhaps involving stringy ingredients, such us explicit sources from branes or other singular objects. However, if such re-stabilization would indeed be possible, it would contradict our local AdS-WGC statemetnt.
The actual answer is that the warped throats created by DSB fractional branes actually do not admit any such stable non-supersymmetric smooth version, in agreement with the local AdS-WGC conjecture. This has actually been already studied in the lilerature, from the gauge theory side. The complex cone over dP$_1$ is the prototypical case of a duality cascade triggered by a DSB brane, and the lack of a supersymmetric vacuum in this dP$_1$ theory was discussed in \cite{Berenstein:2005xa,Franco:2005zu,Bertolini:2005di}. This however does not imply the existence of a non-supersymmetric stable vacuum, rather \cite{Franco:2005zu} already established that the theory shows a runaway behaviour, as follows. By keeping the $U(1)$ factors in the description of the gauge theory, the system has a supersymmetry breaking minimum only if the Fayet-Iliopoulos terms are kept fixed, due to the constraints from the D-term potential. However, the FI terms are actually field dependent, and are controlled by the vevs of closed string twisted sectors. When they are taken as dynamical, the D-term potential can relax in new directions leading to the runaway. The same physics was reinterpreted in \cite{Intriligator:2005aw} as a baryonic runaway direction in the gauge theory with the (massive) $U(1)$'s integrated out. In either of these descriptions, the runaway direction corresponds to a dynamical blow-up of the singularity, since FI terms, or baryonic vevs, are related to blow-up modes. The fractional brane remains as a D5-brane wrapped on a 2-cycle in the dP$_1$ exceptional divisor.
The gravity dual of this runaway has not been determined in the literature, but its structure should correspond to a time-dependent solution, in which the geometry is resolved by growing a finite size dP$_1$ itself, with $M$ explicit D5-branes wrapped on one of its 2-cycles. The latter plays the role of sourcing the $M$ units of RR 3-form, peeling it off the 3-cycle and allowing it to shrink to zero size at the bottom of the (disappearing) throat.
It is interesting to point out that this system provides an interesting link between two seemingly unconnected swampland criteria. On one hand, the statement that in theories of quantum gravity all FI terms should be field-dependent, and thus dynamical \cite{Komargodski:2009pc}; on the other hand, our newly proposed local AdS-WGC. We expect other connections of the local AdS-WGC constraint with other swampland criteria.
We thus see that the class of throats obtained from the different kinds of fractional branes provide illustrative examples of the local AdS-WGC constraint. In later sections we illustrate the power of this conjecture to exclude candidates to non-supersymmetric throats proposed in the literature.
\subsection{Meta-stable throats}
\label{sec:metastable}
It is important to emphasize that the present form of the local AdS-WGC still allows for certain forms of non-supersymmetric warped throats. For instance,
$\bullet$ The conjecture poses no conflict so far with the existence of supersymmetry breaking meta-stable throats with local AdS asymptotics. For instance the systems of anti-D3-branes at the bottom of conifold-like warped throats (i.e. created by deformation fractional branes), extensively used since \cite{Kachru:2003aw}, are in principle allowed \footnote{For discussions on asymptotics and stability of these throats, there is a long-standing debate, see e.g. \cite{Bena:2018fqc} for a recent work, and references therein.}. See also \cite{Kachru:2009kg}, where non-supersymmetric orbifolds are considered and shown to be unstable through nucleation of bubbles of nothing. In contrast with the AdS-WGC, in local AdS throats there is no isometry in the radial direction introducing an infinite volume factor multiplying the decay probability, rather instabilities tend to nucleate near the tip of the throat. Hence, a finite and potentially small decay amplitude is in principle feasible, although this point deserves further study\footnote{We thank M. Montero for raising this point.}.
$\bullet$ Similarly for the nilpotent Goldstino scenario realized in terms of a single anti-D3-brane on top of an O3-plane \cite{Kallosh:2015nia}, for which the stability remarks of \cite{Polchinski:2015bea} specially apply.
$\bullet$ Finally, global compactifications including warped throats may contain ingredients in the CY bulk which modify non-trivially the boundary conditions in the UV region of the throat, thus changing its asymptotics, and allowing it to evade the local AdS-WGC constraint. For instance, this may well be the case if one introduces euclidean D3-brane instantons on 4-cycles intersecting the underlying DSB D3-brane system (thus, stretching in the radial direction of the throat) to stop their runaway, as proposed in \cite{Florea:2006si} (see also \cite{Blumenhagen:2006xt,Ibanez:2007rs} for related tools). Also, if one includes D7-branes introducing new flavours in DSB D-brane systems, to allow for metastable supersymmetry breaking vacua \cite{Franco:2006es,GarciaEtxebarria:2007vh} in the ISS spirit \cite{Intriligator:2006dd}. For a recent discussion of orientifolded throats, see \cite{Argurio:2017upa}.
\medskip
In the following discussions, we consider several large classes of non-supersymmetric warped throats, and reconcile them with the local AdS-WGC by looking for decay channels. Whether these decay channels render the configurations unstable or just meta-stable is not constrained by the conjecture in its present form, hence we loosely refer to them as instabilities of the configuration, even in cases where they could host meta-stable backgrounds.
\section{Warped throats with Dynamical Supersymmetry Breaking}
\label{sec:dsb-throat}
In the previous discussion, the system of D3-branes breaking supersymmetry had a fairly manifest runaway behaviour. There are however other systems of D3-branes at singularities which trigger genuine dynamical supersymmetry breaking, rather than runaway. In this section we explore the proposal of embedding such systems in warped throats \cite{Retolaza:2015nvh}, and how they face the local AdS-WGC.
Again, there are systematic tools for the construction of such theories in terms of D3-branes at toric singularities (possibly in the presence of orientifold quotients), producing ${\cal N}=1$ supersymmetric gauge theories with supersymmetry broken only by non-perturbative dynamics. As explained in \cite{Retolaza:2015nvh}, dimer diagram tools moreover allow to realize them as the theories arising in the infrared of duality cascades of systems of further (deformation) fractional D3-branes at singularities. The gravity dual description of these configurations would correspond to a locally AdS supersymmetric warped throat supported by 3-form fluxes on a 3-cycle associated to a complex deformation, and at whose tip we have the supersymmetry breaking D-brane sector.
If stable, such configurations would lead to a supersymmetry breaking warped throat violating the local AdS-WGC. In this section, we provide a detailed analysis of an illustrative example and show that the configurations are actually unstable. Concretely, although the DSB D3-brane system is consistent in isolation, its embedding into a warped throat contains an instability against bubble nucleation of certain D-brane domain walls. The latter are however more involved than just D3-brane domain walls peeling off the 5-form flux, and provide a novel kind of decay for warped throats. The system also relates to warped throats from (orientifolds of) ${\cal N}=2$ fractional branes, which we discuss as well.
\subsection{The DSB D-brane system}
\label{sec:dsb-gauge}
To make the discussion concrete, we consider an illustrative explicit example given by the DSB theory introduced in \cite{Franco:2007ii}. We start with the ${\bf C}^3/{\bf Z}_6'$ geometry, where the ${\bf Z}_6'$ generator $\theta$ acts as
\begin{eqnarray}
\theta: \;\; z_i\to e^{2\pi iv_i} z_i
\label{DSB-z6-generator}
\end{eqnarray}
with $v=(1,2,-3)/6$. We consider the quotient by an orientifold group $(1+\theta+\ldots +\theta^5) (1+\Omega \alpha (-1)^{F_L})$, where $\alpha$ acts as
\begin{eqnarray}
(z_1,z_2,z_3)\to (e^{2i\pi/12}, e^{4i\pi /12}, e^{-6i\pi /12}).
\end{eqnarray}
Equivalently, we may introduce invariant coordinates
\begin{eqnarray}
x=z_1^{\, 6}, ~y=z_2^{\,3}, ~z=z_3^{\, 2}.
\end{eqnarray}
in terms of which the orientifold corresponds to the geometric action
\begin{eqnarray}
x\to -x,~ y\to -y, ~z\to -z.
\label{DSB-z6-orientifold-action}
\end{eqnarray}
We consider sets of D3-branes at this orientifold singularities. The resulting gauge theory can be determined from its dimer diagram, shown in Figure \ref{dsbZ6}. As discussed in the introduction, there are different choices of orientifold signs, which lead to different results of $SO$ or $Sp$ gauge factors and of $\asymm / \symm$ matter fields. For our choice of interest, corresponding to orientifold signs $(a,b,c,d)=(++--)$, the resulting gauge theory is
\begin{eqnarray}
&SO(n_0)\times U(n_1)\times U(n_2)\times USp(n_3) & \nonumber \\
& (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_0,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_1) + (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2) + (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_2,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_3)+& \nonumber \\
& + (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_0,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2) + (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_3) + \asymm_2+\antisymm_1 +& \nonumber \\
&+ [\, (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_0,~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_3) + (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_2) + (~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_1,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2) \,]&.
\label{DSB-z6-general}
\end{eqnarray}
\begin{figure}[!htp]
\begin{center}
\includegraphics[scale=0.6]{dsbZ6-eps-converted-to.pdf}
\caption{Dimer diagram for an orientifold of the ${\bf C}^3/{\bf Z}_6'$ theory, from \cite{Franco:2007ii}.}
\label{dsbZ6}
\end{center}
\end{figure}
As is familiar \cite{Ibanez:1998qp}, cancellation of non-abelian gauge anomalies is equivalent to the requirement of cancellation of compact RR tadpoles, which leads to
\begin{eqnarray}
-n_0 + n_2+n_3-n_1-4=0.
\end{eqnarray}
We consider the solution $n_1=n_3=0$, $n_0=k$, $n_2=k+4$, which yields the gauge group $SO(k)\times U(k+4)$ with
matter $(~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~)+(1,\asymm)$. The $U(1)$ gauge factor is anomalous, with anomaly canceled by Green-Schwarz couplings, which make it massive and remove it from the massless spectrum. Focusing on $k=1$, we have an $SU(5)$ theory with chiral multiplets in the $10+{\overline 5}$ and no superpotential. This theory has been argued to show dynamical
supersymmetry breaking \cite{Affleck:1983vc,Poppitz:1995fh}. Since there is no moduli space, there is an isolated non-supersymmetric vacuum, which however lies at strong coupling and is non-calculable. Nevertheless, the vacuum energy should scale with the strong dynamics scale $\Lambda$ as
\begin{eqnarray}
V\, \sim \, |\Lambda|^4
\label{DSB-energy}
\end{eqnarray}
This provides a consistent configuration displaying supersymmetry breaking localized at the tip of the corresponding singularity.
It is natural to consider its embedding into warped throats, as a possible source of tunable uplifting energy to be used in attempts to build de Sitter string vacua. In the following we argue this not to be possible.
\subsection{The DSB AdS throat}
\label{sec:dsb-ads}
As a warm-up towards such throats, we may consider the simple addition of a large number of dynamical D3-branes to the earlier system, and take the near horizon limit. This corresponds to increasing the rank of all gauge factors in \ref{DSB-z6-general} by the same amount, namely
\begin{eqnarray}
n_0=N+1\quad ,\quad n_1=n_3= N \quad ,\quad n_2=N+ 5
\label{DSB-z6-general-rank}
\end{eqnarray}
For consistency with the $USp$ factor, $N$ should be taken even, but is otherwise unconstrained.
Since the DSB D-brane system (including the orientifold and the $k=1$ $SU(5)$ D-brane set) is subleading in $1/N$, standard arguments show that in the large $N$ limit we obtain a gravity dual given by AdS$_5\times {\bf X}$, where ${\bf X}$ corresponds to an orientifold of the ${\bf Z}_6'$ orbifold of ${\bf S}^5$. Note that since the ${\bf Z}_6'$ orbifold contains fixed complex planes in ${\bf C}^3$, there are fixed circles in the action on ${\bf S}^5$. This leads to circles of ${\bf C}^2/{\bf Z}_2$ and ${\bf C}^2/{\bf Z}_3$ singularities, which are however well understood \cite{Hanany:1998it,Gukov:1998kk}. The orientifold action (\ref{DSB-z6-orientifold-action}) has instead the origin as only fixed point, hence it is freely acting on ${\bf S}^5$.
At leading order in $1/N$, which corresponds to the classical gravity level, we have a supersymmetric AdS configuration, associated to the near horizon limit of a D-brane system saturating the WGC bound, hence satisfying the AdS-WGC. In the exact configuration, however, the DSB D-brane sector breaks supersymmetry, and implies that at the quantum level the gravitational background becomes non-supersymmetric, hence according to the AdS-WGC, the system should exhibit an instability.
Naively, it would seem that the instability corresponds, as suggested in \cite{Ooguri:2016pdq}, to the emission of shells of D3-branes peeling off the 5-form flux background from the AdS solution. This would correspond, in the underlying picture of D-branes at singularities, to the DSB D-brane system repelling dynamical D3-brane off the origin towards generic points in the transverse space. This actually turns out to be incorrect, as can be shown using the field theory description, using standard supersymmetric field theory arguments. Expelling the dynamical D3-branes corresponds to the Higgsing down the gauge theory with the rank assignment (\ref{DSB-z6-general-rank}) to the original $N=0$ $SU(5)$ theory, by giving vevs to suitable mesonic operators. To make the point, it suffices to turn on a vev for the gauge invariant operator involving fields in the first line in (\ref{DSB-z6-general})
\begin{eqnarray}
\langle\; (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_0,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_1) \cdot (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2) \cdot (~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_2,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_3)\; \rangle \equiv \Phi^3
\end{eqnarray}
Here $\Phi$ is the dimension 1 order parameter for this vev. The superpotential involves only triples of fields from the three different lines in (\ref{DSB-z6-general}), hence it is an F-flat direction. As follows from the D-brane picture, there are more general choices, allowing for three independent vevs -- for similar mesonic operators built from fields in the three different lines in (\ref{DSB-z6-general}) --- for each of the dynamical D3-branes. But for our present purposes it suffices to consider only this overall position vev $\Phi$.
From the viewpoint of the infrared $SU(5)$ theory this corresponds to a Higgsing of the UV $SU(N+5)$ theory by the $N$ flavours acquiring vevs involved in $\Phi$. Denoting $\Lambda$, and $\Lambda_{\rm UV}$ the dynamical scales of the $SU(5)$ and $SU(N+5)$ theories, the potential for $\Phi$ would follow from (\ref{DSB-energy}) from the implicit dependence of the IR scale $\Lambda$ on $\Phi$. However, taking the $SU(5)$ theory, with a $10+{\overline 5}$ matter content, and the UV $SU(N+5)$ theory, with matter content $(3N+1)~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~ + 2N\, ~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~ + \asymm$, the matching relation is just $\Lambda=\Lambda_{\rm UV}$, with no dependence of $\Phi$. This implies that the DSB D-brane systems does not exert forces on dynamical D3-branes, which are thus not repelled from the origin. The non-supersymmetric AdS configuration is not unstable towards the emission of such D3-brane shells peeling off the 5-form flux.
Actually, the contradiction with the AdS-WGC statement is avoided by a novel mechanism, related to a different kind of instability, which we explain as follows. Let us return to the picture of D3-branes at the orientifold of the ${\bf C}^3/{\bf Z}_6'$ singularity, i.e. the rank assignment (\ref{DSB-z6-general-rank}). The ${\bf Z}_6'$ quotient does not actually define an isolated singularity; indeed, the generator (\ref{DSB-z6-generator}) has the origin as only fixed point, but $\theta^3$ leaves invariant the complex plane parametrized by $z_2$, and $\theta^2$ leaves $z_3$ invariant. This implies that there is a complex plane (along $z_2$) of ${\bf C}^2/{\bf Z}_2$ singularities, and a complex plane (along $z_3$) of ${\bf C}^2/{\bf Z}_3$ singularities. In the field theory, there are flat directions corresponding to splitting some of the dynamical D3-branes into fractional D3-branes (of the ${\cal N}=2$ kind, i.e. D5-branes wrapped on the collapsed cycles of the ${\bf C}^2/{\bf Z}_n$) which can slide off the origin along the corresponding complex plane. Once the non-perturbative supersymmetry breaking kicks in, these flat directions can turn into runaway, providing an instability, bringing back agreement with the AdS-WGC.
The existence of this instability can again be analyzed in terms of the field theory, by Higgsing and scale matching. Consider for concreteness the splitting of dynamical D3-branes into fractional D3-branes of the ${\bf C}^2/{\bf Z}_2$ singularity associated to $\theta^3$, and moving the latter along $z_2$. A similar analysis could be performed using the fractional branes of the ${\bf C}^2/{\bf Z}_3$ curve of singularities. Motion in $z_2$ corresponds to mesonic vevs for fields in the second line in (\ref{DSB-z6-general}). Denoting the fields $(~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_0,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~_2)$ and $\asymm_2$ by $Q_{A}{}^{i}$ and $A_{ij}$, respectively, and $(~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_1,~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~_3)$, $\antisymm_1$ by $Q'_{j',B'}$, $S^{i'j'}$, respectively, the vevs for the two kinds of fractional branes have the structure
\begin{eqnarray}
v \, =\, \langle\, \epsilon_{AB} Q_A{}^i Q_B{}^j A_{ij}\, \rangle\quad ;\quad v'\, =\, \langle\, S^{i'j'} Q_{i'A} Q_{j'B} \delta^{AB}\, \rangle
\end{eqnarray}
For simplicity we have assumed all fractional D3-branes of the same kind to be located at the same position. The fact that the two different fractional branes are related to vevs of fields Higgsing the combinations of gauge factors (0,2) and (1,3), respectively, is manifest in the dimer diagram in Figure \ref{dsbZ6}, where the above combinations correspond to two sets of faces forming two different strips in the $z_2$ mesonic direction.
Let us compute the scale matching. Considering for instance $v\gg v'$ (eventually shown to be the realistic regime), the Higgsing pattern is
\begin{eqnarray}
&& SO(N+1) \times SU(N) \times SU(N+5) \times USp(N)\, \stackrel{v}{\longrightarrow} \\
&& \stackrel{v}{\longrightarrow} SO(1) \times SU(N) \times SU(5)\times USp(N)\, \stackrel{v'}{\longrightarrow} \,SO(1) \times SU(5) \nonumber
\end{eqnarray}
where the $SO(1)$ factor is kept for bookkeeping purposes.
In the first step, the $SU(N+5)$ is Higgsed down to $SU(5)$. In the second step, the $SU(5)$ theory maintains the number of colors, but $2N$ flavours become massive. The scale matching between the IR and UV scales $\Lambda$, $\Lambda_{\rm UV}$ is
\begin{eqnarray}
\Lambda^{13}\, = \, \Lambda_{\rm UV}^{13} \, v'{}^{2N} \, v^{-2N}
\label{scale-match}
\end{eqnarray}
Replacing in (\ref{DSB-energy}), the vev $v$ runs away to infinity, while the vev $v'$ is attracted to zero.
Note that, although the two kinds of fractional branes have similar features in isolated ${\bf C}^2/{\bf Z}_2$, they have a very different behavior in the presence of the orientifold action. This is in fact manifest already in the orientifold projection on the gauge group and matter content.
The resulting configuration is given by a set of D-branes describing the $SO(1) \times SU(N) \times SU(5)\times USp(N)$ gauge theory. The $SU(5)$ gauge factor still has the antisymmetric matter, but it has extra vector-like flavours, and the theory has supersymmetric vacua \cite{Poppitz:1995fh}. This fits nicely with the vacuum energy from (\ref{DSB-energy}), (\ref{scale-match}) going to zero as $v'\to 0$. Note that the final configuration can be described as a quotient (a ${\bf Z}_3$ orbifold of an orientifold of) a set of $N$ ${\cal N}=2$ fractional branes at ${\bf C}^2/{\bf Z}_2$. This configuration has a supersymmetric gravity dual given by a locally AdS throat of the kind studied in \cite{Bertolini:2000dk,Polchinski:2000mx}. These can be regarded as ${\cal N}=2$ versions of the ${\cal N}=1$ Klebanov-Strassler throats, with the singularity at the origin resolved by a stringy phenomenon, the so-called enhan\c{c}on configuration \cite{Johnson:1999qt}. The fact that the final end point is a supersymmetric local AdS background avoids conflicts with the local AdS-WGC.
In the gravity picture of the initial configuration, the instability of the non-susy AdS corresponds to the nucleation of bubbles defined by suitable fractional D3-branes, namely D5-branes wrapped on a collapsed $\bf P_1$ on the ${\bf S}^1$ of ${\bf C}^2/{\bf Z}_2$ singularities, and with spatial topology ${\bf S}^3$ in the non-compact dimensions, expanding outwards with time. In the interior, we are left with a supersymmetric locally AdS throat induced by the $N$ fractional branes stabilized at the origin, and with the singularity at its tip smoothed out presumably by an enhan\c{c}on configuration.
In contrast with other examples in the literature, this is neither a bubble of nothing nor a bubble removing the 5-form flux completely. It thus corresponds to a novel decay channel for non-supersymmetric warped throats.
\medskip
The ${\bf C}^3/{\bf Z}_6'$ orientifold singularity can be embedded in a locally AdS warped throat associated to a complex deformation, as discussed in section \ref{sec:dsb-throat}. In this setup, supersymmetry breaking on the infrared gauge theory would lead to contradiction with our proposed local AdS-WGC. However, our above analysis of the AdS case shows that the locally AdS throat is already unstable due to D5-brane bubble nucleation (on top of other possible decay channels related to the deformation fractional branes). Hence, the conflict with the local AdS-WGC is solved by the decay channel already solving the potential conflict with the AdS-WGC.
\subsection{Non-supersymmetric warped throats for ${\cal N}=2$ fractional branes}\label{sec:ntwo}
In this section we exploit the previous configuration to obtain a non-trivial example of non-supersymmetric warped throat induced by ${\cal N}=2$ fractional branes. The discussion is straightforward and the arguments should be familiar by now.
Consider the previous orientifold singularity, with D-branes corresponding to the rank assignment
\begin{eqnarray}
n_0=M+1\quad ,\quad n_1=n_3=0\quad ,\quad n_2=M+5
\end{eqnarray}
with $M$ even, for consistency of the (hidden) $USp$ factor.
This leads to a gauge theory with group $SO(M+1)\times SU(M+5)$, with matter $(~\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}~,~\overline{\raisebox{-.5pt}{\drawsquare{6.5}{0.4}}}~)+(1,\asymm)$. In the limit of large $M$, at leading order we have a gravity dual given by a quotient of the supersymmetric ${\cal N}=2$ warped throats in \cite{Bertolini:2000dk,Polchinski:2000mx}. The configuration is of the local AdS kind, hence the local AdS-WGC constraints should apply.
On the other hand, the gauge theory does not have a supersymmetric vacuum. The $SU(N)$ theory with odd $N$, antisymmetric matter, and no extra flavours, breaks supersymmetry, as shown in \cite{Affleck:1983vc,Affleck:1984xz,Poppitz:1995fh}. Actually, this reference argued for an isolated supersymmetry breaking vacuum for the theory with Yukawa couplings, which remove the classical flat direction. In our present example, such superpotential couplings are absent, and the classical flat direction can turn into runaway ones. This is precisely the conclusion from matching of scales, as in the previous section, which we skip.
This means that, on the gravity side, the classical background has a decay channel given by nucleation of bubbles of fractional branes, exactly as in the previous section. In this case, however, since there are no fractional branes of the supersymmetry preserving kind, the bubbles completely peel off the 5-form flux background of the configuration leading to a complete decay of the local AdS throat.
This example thus provides an explicit example of the application of the local AdS-WGC constraint to non-supersymmetric warped throats induced by ${\cal N}=2$ fractional branes.
\section{Supersymmetry breaking orientifolds in warped throats}
\label{sec:anti-oplanes}
In the previous sections, we have focused on warped throats whose underlying D-brane configuration is supersymmetric in perturbation theory, with supersymmetry breaking arising from non-perturbative strong dynamics effects. It is interesting to check the behavior of warped throats with more dramatic supersymmetry breaking patterns. In this section, we explore a class of warped throats, where supersymmetry breaking is induced by orientifold planes not preserving the supersymmetry preserved by the CY geometry and the 3-form fluxes. In fact, they correspond to the CPT conjugates of the familiar supersymmetric orientifold planes, so we refer to them as anti-orientifold planes. Systems of anti-orientifold planes in the presence of D-branes are identical to systems of anti-D-branes in the presence of orientifold planes, which have been considered in many non-supersymmetric string constructions, pioneered in \cite{Sugimoto:1999tx,Aldazabal:1999jr,Antoniadis:1999xk,Uranga:1999ib,Angelantonj:2000xf,Rabadan:2000ma}.
\subsection{Non-supersymmetric throats from anti-O3-planes}
\label{sec:anti-oplanes-throats}
We focus on anti-O3-planes in the presence of a large number $N$ of D3-branes, possibly at singularities and with extra $M$ fractional branes. In the underlying D-brane construction, they lead to an explictly non-supersymmetric spectrum, which can be easily determined using open string techniques and (non-supersymmetric projections of) dimer diagrams. For $M=0$, the systems of anti-O3-planes with $N$ D3-branes behave as ``supersymmetric'' and conformal in the leading large $N$ approximation, in the sense that the effects of orientifold planes (noticed via crosscaps) are subleading in the large $N$ limit. This implies that the gravity dual description corresponds to AdS backgrounds which behave as supersymmetric in the classical supergravity approximation, but have supersymmetry breaking effects at 1-loop. Similarly, in systems in the presence of $M$ additional deformation branes, we obtain locally AdS warped throats which are supersymmetric in the leading approximation, but break supersymmetry at the 1-loop order. These AdS and locally AdS configurations thus correspond to classically stable backgrounds, which, if stable in the full theory, would violate the AdS-WGC or the local AdS-WGC, respectively. Our purpose is thus to test the stability of these configurations, providing a check of these conjectures at the quantum level.
Concrete examples are easy to build. For instance, \cite{Kallosh:2015nia} provided tools to embed a single (anti-)O3-plane at the bottom of a warped throat with 3-form fluxes, for instance based on the $xy=z^3w^3$ singularity, a ${\bf Z}_3$ orbifold of the conifold. The deformed conifold itself $xy-zw=t^2$ also admits an involution $(x,y,z,w)\to (y,x,-z,-w)$ leading to O3-planes (in fact, two, located at $z=w=0, x=y=\pm i\,t$) \cite{Garcia-Etxebarria:2015lif}. Considering any of these geometries, we may just replace the O3-planes by anti-O3-planes and obtain explicit locally AdS warped throats with supersymmetry broken by anti-orientifold planes.
\subsection{Dynamics of D3-branes and anti-O3-planes}
\label{sec:anti-oplanes-o3}
It is useful to start considering anti-O3-planes in flat space, in the presence of $N$ D3-branes. In the large $N$ limit, the near horizon limit leads to gravity duals of the form AdS$_5\times {\bf RP}_5$, which behave as supersymmetric at leading order and feel the absence of supersymmetry at order $1/N$. The configuration is the CPT symmetric of O3-planes in the presence of anti-D3-branes (denoted by $\overline{\rm D3}$'s), which was studied in \cite{Uranga:1999ib} following the analysis in \cite{Witten:1998xy} for the supersymmetric O3-D3 system. We now revisit the main points, in anti-O3-plane language.
An anti-O3-plane is a fixed plane of the ${\bf Z}_2$ orientifold action on ${\bf R}^6$, preserving the 16 supersymmetries broken by D3-branes. There are four kinds of anti-O3-planes, classified according to the (discretized) values 0, $\frac 12$ for the NSNS and RR 2-form backgrounds on the ${\bf RP}_2$ (twisted) 2-cycles on the ${\bf RP}_5={\bf S}^5/{\bf Z}_2$ surrounding the origin in ${\bf R}^6$. In short, comparing with \cite{Witten:1998xy}, the tension of an anti-O3-plane equals that of the corresponding O3-plane, while they have opposite RR charge. The tensions and charges, measured in D3-brane units, for the anti-O3-planes are in the following table.
\begin{center}
\begin{tabular}{|l|c|c|c|}
\hline
D-brane description & $(\theta_{NS},\theta_R)$ & Tension & RR charge \\
\hline\hline
anti-(O3$^-$) & $(0,0)$ & -1/2 & $+1/2$ \\
\hline
anti-(O3$^-$) + 1 ${\overline{\rm D3}}$ & $(0,1/2)$ & +1/2 & $-1/2$ \\
\hline
anti- O3$^+$ & $(1/2,0)$ & +1/2 & $-1/2$ \\
\hline
anti- ${\widetilde{\rm O3}}^+$ & $(1/2,1/2)$ & + 1/2 & $-1/2$ \\
\hline
\end{tabular}
\end{center}
Just like for O3-planes, the O3$^-$ is a singlet under the type IIB $SL(2,{\bf Z})$ and the three remaining ones transform into each other under it.
The stability of the throats built out using these anti-O3-plane can be heuristically understood by considering the dynamics of D3-branes in the presence of these anti-O3-planes. Namely, we can consider the previous anti-O3-planes with a $N$ D3-branes on top (as counted in the double cover), and study the stability properties of the system.
The corresponding analysis can in fact be borrowed from \cite{Uranga:1999ib} (in its CPT conjugate version). It is straightforward to obtain the spectrum of the non-supersymmetric gauge theories on D3-branes in the presence of the different anti-O3-planes. The stability properties of the system can be assessed from the open string perspective, by the computation of the Coleman-Weinberg potential. We instead focus on the dynamics in the dual closed string channel, by comparing the interaction between D3-branes and anti-O3-planes due to exchange in the NSNS and RR channels.
We consider the different cases in turn:
$\bullet$ Consider $N=2p$ D3-branes in the presence of the anti-(O3$^-$). They have opposite sign tensions and equal sign RR charges, hence the gravitational and Coulomb interactions are both repulsive. Thus, D3-branes are expelled away from the anti-(O3$^-$) and the configuration is unstable.
$\bullet$ Take $N=2p$ D3-branes in the presence of the anti-(O3$^-$) + 1 ${\overline{\rm D3}}$. The D3-branes are attracted to the origin, but when they reach below sub-stringy distances, a tachyon arises from open strings between the stuck ${\overline{\rm D3}}$- and the dynamical D3-branes. The result is a configuration of the anti-(O3$^-$) with one stuck D3-brane at the origin, and $(2p-2)$ dynamical D3-branes. The system at the origin has tension +1/2 and charge +3/2, so the Coulomb repulsion overcomes the gravitational attraction and D3-branes are repelled. The result is a (CPT conjugate) of the nilpotent Goldstino configuration \cite{Kallosh:2015nia}.
$\bullet$ Consider $N=2p$ D3-branes in the presence of the anti-(O3$^+$). The gravitational and Coulomb interactions are both attractive, so the D3-branes are driven to the origin. Contrary to the previous case, however, there is no obvious annihilation between the anti-(O3$^+$) and the D3-branes. This would suggest that the non-supersymmetric AdS$_5\times {\bf RP}_5$ gravity dual is stable, in conflict with the AdS-WGC. Happily, as we will discuss later on, a non-perturbative instability will come to the rescue.
$\bullet$ For $N=2p$ D3-branes in the presence of the anti-(${\widetilde {\rm O3}}^+$) we have a similar situation. The D3-branes are driven to the origin, and no obvious decay channel seems to be available. This perturbatively stable configuration is however again rendered unstable by a non-perturbative process described later on, thus solving the potential conflict with the AdS-WGC and the local AdS-WGC constraints.
\subsection{Instabilities in throats with anti-O3-planes}
\label{sec:anti-oplanes-o3-throats}
The large $N$ limit of the above configurations of D3-branes on top of anti-O3-planes leads to near horizon geometries classically given by AdS$_5\times{\bf RP}_5$, with $N$ units of RR 5-form flux (as counted in the covering space) and the corresponding discrete NSNS and RR 2-form backgrounds on ${\bf RP}_2\subset{\bf RP}_5$. Absence of supersymmetry is only detectable at the 1-loop (i.e. $1/N$ order), namely via string diagrams involving crosscaps and thus noticing the underlying non-supersymmetric orientifold. Thus, the AdS-WGC condition implies such AdS backgrounds should have instabilities.
The same statement applies in more general local AdS warped throats with anti-O3-planes. For any local AdS warped throat admitting a supersymmetric orientifold involution introducing O3-planes, it is possible to consider the non-supersymmetric version obtained by the introduction of any of the different anti-O3-planes. The resulting gravitational background remains the same at the level of classical supergravity, but subleading corrections encode the breaking of supersymmetry. Thus, the local AdS-WGC conditions imply such local AdS backgrounds should be unstable.
We now analyze the instabilities in these AdS backgrounds, and the same conclusions clearly apply to local AdS configurations. The analysis follows the discussion in the previous section.
$\bullet$ In the case of the anti-(O3$^-$) orientifold projection, the repulsion exerted by the anti-O3-plane on D3-branes translates into a decay channel of the corresponding non-supersymmetric AdS$_5\times {\bf RP}_5$ background, by nucleation of D3-brane bubbles, which discharge the $N$ units of RR 5-form flux, much along the lines suggested in \cite{Ooguri:2016pdq}.
$\bullet$ In the case of the anti-(O3$^-$) with an extra anti-D3-brane, the decay channel of the corresponding non-supersymmetric AdS$_5\times {\bf RP}_5$ background is identical to the previous one, since the two configuration simply differ in the value mod 2 of the RR 5-form flux $N$. Notice that the decay does not change the values of the NSNS and RR 2-form backgrounds, since the anti-(O3$^-$) with either the initial stuck anti-D3-brane or the final stuck D3-brane, both have vanishing NSNS background and non-trivial RR 2-form background.
$\bullet$ In the case of the anti-(O3$^+$) projection, the flat space configuration seems stable. However, the S-dual of the anti-(O3$^+$) is given by the configuration of an anti-(O3$^-$) + 1 ${\overline{\rm D3}}$ of the previous paragraph. This suggests that the anti-(O3$^+$) can turn into an anti-(O3$^-$) via strong coupling processes. Indeed, notice that if one considers an NS5-brane (whose core is inherently non-perturbative) stretching along three of the anti-O3 directions and three directions transverse to it, the NS5-brane splits the anti-O3 in two halves, which actually have opposite signs for the orientifold plane charge, with one extra half anti-D3-brane on top of the anti-(O3$^-$) half to provide a continuous O3-plane charge across the NS5-brane (see \cite{Elitzur:1997hc} for a review including such brane constructions). This allows to nucleate holes in the anti-(O3$^+$), in whose interior the stuck ${\overline{\rm D3}}$ on the anti-(O3$^-$) can annihilate against one of the D3-branes around it, leading to repulsion of the remaining D3-branes, and thus, to instability. This suggests that, in the AdS$_5\times{\bf RP}_5$ gravity dual language, there is a decay channel via the nucleation of bubbles bounded by a domain wall given by an NS5-brane wrapped on a maximal ${\bf RP}_2$. From the analysis of topological constraints on wrapped branes in \cite{Witten:1998xy} (derived in the supersymmetric setup, but valid in general), this is indeed allowed. The NS5-brane may moreover carry arbitrarily large D3-brane charge, thus discharging dynamically the RR 5-form flux and rendering the AdS unstable.
$\bullet$ Similar conclusions hold in the case of the anti-($\widetilde{\rm O3}^+$) projection, where now the required domain wall involves a bound state of one NS5- and one D5-brane (aka a (1,1)-fivebrane) wrapped on ${\bf RP}_2\subset {\bf RP}_5$, thus changing both the NSNS and RR 2-form backgrounds. The fivebrane can carry D3-brane charge, so it can peel off the RR 5-form flux of the AdS compactification triggering its instability.
The instabilities of the above non-supersymmetric orientifolds of AdS backgrounds generalize straightforwardly to non-supersymmetric orientifolds of local AdS warped throats. Hence, in this class of examples, the local AdS-WGC is closely related to the ordinary AdS-WGC constraint.
\section{Discussion}
\label{sec:conclu}
In this paper we have proposed a new swampland conjecture forbidding stable non-supersymmetric locally AdS warped throats. This {\em local AdS-WGC} statement generalizes the analogous statement for stable non-supersymmetric AdS vacua. We have illustrated its application, which allows to reinterpret several known results about warped throats from fractional branes, and to derive new results on the (in)stability of large classes of non-supersymmetric throats, with supersymmetry breaking triggered by strong dynamics in infrared D-brane sectors, or by the presence of stringy sources like anti-O3-planes.
Although the local AdS-WGC forbids stable non-supersymmetric throats, it has no direct bearing on meta-stable non-supersymmetric throats. In contrast with the AdS-WGC, there is no isometry in the radial direction introducing an infinite volume factor multiplying the decay probability, so a finite and potentially small decay amplitude is in principle feasible.
The question of whether swampland criteria can impose further restrictions on the meta-stable throats used in dS uplifts is a very interesting one, to which we plan to return in the future.
Several of the instabilities of the non-supersymmetric throats we have discussed are of the runaway kind. In actual 4d compactifications, this corresponds to shortening the throat, thus moderating the hierarchies between the bulk and the throat. Hence, even if the dynamics of the global compactification eventually stabilizes the runaway and renders such configurations more stable, there may remain a question on the tunability of scale hierarchies in the final states. The possibility that swampland criteria directly constrain such hierarchies is a tantalizing direction we hope to explore in the future.
We have made some interesting progress, and provided yet another hint that the body of knowledge on swampland criteria on effective theories is paving the way towards an era of Quantum Gravitational String Phenomenology.
\section*{Acknowledgments}
We are pleased to thank S. Franco, M. Montero and L. Ib\'anez for useful discussions. E.G. would like to thank The City College at CUNY for its hospitality during part of this work and S. Franco in particular. This work is partially supported by the grants FPA2015-65480-P from the MINECO/FEDER, the ERC Advanced Grant SPLE under contract ERC-2012-ADG-20120216-320421 and the grant SEV-2016-0597 of the ``Centro de Excelencia Severo Ochoa" Programme.
\newpage
\bibliographystyle{JHEP}
|
1,314,259,995,020 | arxiv | \section{\label{sec: intro} Introduction}
A central question in the underdoped cuprate superconductors is the origin of the pseudogap phase. This phase was originally thought to to be a precursor phase to
superconductivity with spin-singlet pairs, no phase coherence, and no broken symmetries \cite{eme95,lee06}. However, more recent measurements suggest broken symmetries.
Specifically, polarized elastic neutron scattering observe intraunit cell magnetic order \cite{sid13} at a temperature close to the onset of a polar Kerr
effect \cite{xia08,he11} (see also Ref.~\onlinecite{kam02}). This suggests broken time-reversal symmetry \cite{var97,cha01}. Also, static quasi-long-range charge
density wave (CDW) order has been observed through x-ray scattering \cite{ghi12,com13,sil13} and through nuclear magnetic resonance \cite{wu14}. This order appears at
the incommensurate wavevectors $2{\bm Q}_x=(2Q,0)$ and $2{\bm Q}_y=(0,2Q)$ \cite{com13}. In addition, there exists evidence for superconducting (SC) correlations in the
pseudogap phase. Diamagnetism is observed much above $T_c$ \cite{li10} and also at fields that far exceed the estimated mean-field SC upper critical field \cite{yu14}.
To explain the prevalence of SC correlations and CDW order, pair density wave (PDW) order has been suggested as an order parameter for the pseudogap phase
\cite{lee14,yu14}. This proposal was bolstered by a demonstration that PDW order accounts for anomalous quasi-particle (qp) properties observed by angle-resolved
photoemission (ARPES) \cite{lee14}. PDW superconductivity is a spatially varying SC state similar to Fulde Ferrell Larkin Ovchinnikov (FFLO) states \cite{ful64,lar65}.
It has been discussed in a variety of contexts for the cuprates \cite{ber09,agt08,cor14,zel11,lee14}.
Here we show that PDW order can naturally induce a translational invariant secondary order parameter that breaks both time-reversal and parity symmetries, but is
invariant under the product of the two. Similar order parameters with this symmetry have appeared in the context of the cuprates under the name magnetoelectric (ME)
order \cite{ore11} and as ME loop current order \cite{sim02}. Here we name such order ME loop current order. We further show that there exists a mean-field PDW ground
state with ME loop current order that accounts for the Kerr effect and for intracell magnetic order, with CDW order at the observed wavevectors, and which accounts for
qp properties observed by ARPES \cite{he11}. This PDW ground state has continuous $U(1)$ degeneracies (associated with broken SC gauge and translational symmetries)
together with a discrete degeneracy associated with the ME loop current order. Fluctuations of the $U(1)$ degeneracies suppress both the SC and CDW order, allowing for
a state with spatial long-range ME loop current order and short-range SC and CDW orders (Fig.~\ref{fig1}). We propose that this state is responsible for behavior that
emerges at the pseudogap temperature $T^*$ \cite{he11}. Such a ME loop current state is conceptually similar to the nematic phase that arises due to magnetic
fluctuations proposed for the pnictides \cite{fer12} and to a translational invariant broken time-reversal symmetry state stemming from CDW and modulated bond current
orders \cite{wan14}.
Since it is closely related to ME loop current PDW state we find, and has been used to explain the anomalous qp properties observed through ARPES experiments, we
highlight the recent PDW proposal of Lee \cite{lee14}. In particular, this proposal has its origin in a gauge theory description of the resonating valence bond phase.
Here, pairing occurs through a transverse gauge field and leads to an incommensurate checkerboard PDW state for which the PDW order can be qualitatively expressed as
$\Delta({\bm x})=\Delta_Q[\cos({\bm Q}_x\cdot {\bm x})+i\cos({\bm Q}_y\cdot {\bm x})]$. This state has secondary CDW order at wavevectors $2{\bm Q}_x$ and $2{\bm Q}_y$,
in agreement with experiment. This state cannot account for the observed signatures of translational invariant broken time-reversal symmetry.
In the following, we begin with a summary of the symmetry properties of PDW order and introduce the translational invariant loop current order parameter. This is followed
by the relevant PDW action for tetragonal symmetry. For tetragonal symmetry, it is not possible to analytically find all possible ground states. For this reason we then
turn to an analysis of PDW order for a theory with orthorhombic symmetry. This theory allows for a complete understanding of all allowed PDW ground states and can be used
to establish the existence of a phase which has long-range translation invariant loop current order but no long-range superconducting or CDW order. We then return to
tetragonal symmetry and examine a loop current phase that is a natural generalization of that found for orthorhombic symmetry. After this we show there exists a PDW state
that shares the same symmetry properties as the recent tilted loop current phase discussed by Yakovenko \cite{yak14}. This phase is consistent with all observations of
broken time-reversal symmetry in the underdoped cuprates. Finally, we examine the quasi-particle (qp) properties relevant to ARPES measurements for the tetragonal ME
PDW phase. We show that while the qp properties of the ME PDW phase are similar to those found by Lee \cite{lee14} for a PDW phase without loop current order, there are
observable differences that will allow these two phases to be distinguished.
\begin{figure}[t]
\begin{center}
\includegraphics[width=2.55in]{phase_diagram.eps}
\end{center}
\caption{Qualitative temperature ($T$) versus hole doping ($p$) phase diagram. Here LC represents the ME loop current phase, PDW represents the pair density wave phase,
AF represents antiferromagnetism, and d-SC represents $d$-wave superconductivity.}
\label{fig1}
\vspace{-4mm}
\end{figure}
\vspace{-6mm}
\section{\label{sec: PDW-loop} PDW induced translational invariant loop current order}
\vspace{-2mm}
PDW order originates when paired fermions have a finite center of mass momentum. It is characterized by order parameter components $\dq$ which, under a translation
$\bm{T}$, transform as $\dq\rightarrow e^{i\bm{T}\cdot \bm{Q}}\dq$. Key here are the transformation properties under time-reversal $\mathcal{T}$
and parity symmetries $\mathcal{P}$:
\begin{equation}
\dq\xrightarrow{\mathcal{T}} \Delta^*_{-\bm Q} \qquad \qquad \dq\xrightarrow{\mathcal{P}} \ndq.
\end{equation}
These symmetries suggest a consideration of the secondary ME loop current order parameter $l= (|\dq[i]|^2-|\ndq[i]|^2)$. This order parameter has translational
invariance, is odd under both $\mathcal{T}$ and $\mathcal{P}$, and invariant under the product $\mathcal{T}\mathcal{P}$. If a PDW ground state satisfies
$|\dq[i]| \ne |\ndq[i]|$, then the state will have non-zero $l$. This condition is not satisfied by any of the PDW states proposed in the context of the cuprates
\cite{ber09,cor14,lee14,yu14}. This motivates the question, are there stable PDW ground states that do exhibit loop current order? Below we show there are.
We find that there exists a PDW ground state that can qualify as a pseudogap mean-field order parameter. We impose the following four criteria on such a state:
\vspace{0.1cm}
\noindent 1- It is a mean-field ground state of a Ginzburg-Landau-Wilson (GLW) action (for parameters that are not a set of measure zero in the GLW action parameter
space).\vglue 0.2 cm
\noindent 2- It has finite $l$ and accounts for the Kerr effect and intracell magnetic order.\vglue 0.2 cm
\noindent 3- It has CDW correlations at the observed momenta.\vglue 0.2 cm
\noindent 4- It can account for ARPES spectra. \vspace{0.1cm}
Prior to defining the PDW order parameter we consider in more detail, it is useful to point out that there are two previously found PDW ground states that should have
finite $l$. The first is the well known Fulde-Ferrel (FF) phase for which $\Delta({\bm x})=e^{i{\bm Q}\cdot {\bm x}}$. This state has no CDW order and therefore cannot
represent a pseudogap order parameter. The second state is found in Ref.~\onlinecite{agt08}, for which the gap can qualitatively be represented as
$\Delta({\bm x})=\dq{[e^{i{\bm Q}_x\cdot {\bm x}}+e^{i{\bm Q}_y\cdot {\bm x}}]}$. This state has CDW order, but this order is not at a wavevector that matches
experiment and, consequently, cannot be a pseudogap order parameter.
Criterion 4 strongly restricts our search for a pseudogap order parameter. Specifically, we require that the Fermi arc is reproduced, the low energy bands near the
anti-nodal point are reproduced (which has a gap minimum at momentum $k_G\ne k_F$, where $k_F$ is Fermi momentum) \cite{he11}, and the Fermi arc is derived
from occupied states moving up towards the Fermi energy \cite{he11,lee14}. The PDW state discussed in Ref.~\onlinecite{lee14} gives rise to these properties, and
it is natural to use this as a starting point. However, the GLW theory based on the PDW momenta chosen in Ref.~\onlinecite{lee14} does not produce a ground state that
satisfies the above four criteria and we must therefore consider generalizations of this state. To identify such a generalization, we note that
a key feature of Ref.~\onlinecite{lee14} that allows the ARPES spectra to be
reproduced is the choice of the momenta about which fermions are paired. In particular, the mean-field pairing Hamiltonian for PDW order is
\begin{equation}
H=\sum_{{\bm p},s} \epsilon_{{\bm p}}c^{\dagger}_{{\bm p}s} c_{{\bm p}s}+ \sum_{{\bm Q}_i,{\bm p}}[\Delta_{{\bm Q}_i}({\bm p})c^{\dagger}_{{\bm p}+{\bm K}_i\uparrow}
c^{\dagger}_{-{\bm p}+{\bm K}_i,\downarrow}\\+h.c.],
\end{equation}
where $c_{{\bm k}s}$ is the fermion destruction operator with momentum ${\bm k}$ and spin $s$, $\epsilon_{\bm k}$ is the bare dispersion, and $h.c.$ means Hermitian
conjugate. The momenta about which the fermions are paired are the ${\bm K}_i$, leading to PDW order at ${\bm Q}_i=2{\bm K}_i$. In the following we examine PDW order
that stems from the ${\bm K}_i$ shown in Fig.~2.
\begin{figure}[t]
\begin{center}
\includegraphics[width=2.55in]{low_sym_tetragonal.eps}
\end{center}
\caption{\label{fig: PDW-momenta}The positions of the momenta $\bm {K}_i$ about which PDW Cooper pairs are formed. The corresponding eight PDW order parameter
components $\dq[i]$ have momenta $\bm{Q}_i=2\bm{K}_i$. The solid line momenta apply only to the theory with orthorhombic symmetry, and all the momenta (solid and
dashed) are included for tetragonal symmetry. The displacement $\delta K_y$ denotes the shift of the momenta $\bm{K}_i$ from the zone edge. When $\delta K_y=0$, the
theory of Ref.~\onlinecite{lee14} is reproduced.}
\vspace{-4mm}
\end{figure}
In the limit that $\delta K_y=0$, the theory of Ref.~\onlinecite{lee14} is reproduced. Consequently, for sufficiently
small $\delta K_y$, the PDW states examined here should be able to reproduce the ARPES spectra. We show that this is indeed the case in Section \ref{sec: quasi}.
\vspace{-4mm}
\section{\label{sec: GLW-tetra} GLW Action: tetragonal symmetry}
The momenta specified in Fig.~\ref{fig: PDW-momenta} lead to a PDW order parameter with eight complex degrees of freedom: $(\dq[1], \dq[2], \dq[3],\dq[4],\ndq[1],
\ndq[2], \ndq[3], \ndq[4])$. To construct the GLW free energy, the transformation properties of this order parameter under rotations are required. The point group
symmetry is $D_{4h}$ with generators $\{ C_4, \sigma_{x}, \sigma_z \}$ where $C_4$ is a 4-fold rotation about the $c$-axis and $\sigma_x$ ($\sigma_z$) is a mirror
reflection through $y$-$z$ ($x$-$y$) plane. Under these generators, the PDW order $(\dq[1], \dq[2], \dq[3],\dq[4],\ndq[1], \ndq[2],
\ndq[3], \ndq[4])$ transforms as
\begin{equation}
\begin{aligned}
C_4:& (\dq[3], \dq[4], \ndq[1], \ndq[2], \ndq[3], \ndq[4], \dq[1], \dq[2]),\\
\sigma_{x}:& (\dq[2], \dq[1], \ndq[4], \ndq[3], \ndq[2], \ndq[1], \dq[4], \dq[1]),\\
\sigma_z:& (\dq[1], \dq[2], \dq[3],\dq[4], \ndq[1], \ndq[2], \ndq[3], \ndq[4]).
\end{aligned}
\end{equation}
Considering invariance under translations, rotations, time-reversal, parity and gauge symmetries, the
corresponding GLW action can be written as: $S_{0, \text{tet}} = S_{0, \text{hom}} + S_{0, \text{grad}}$. Here, $S_{0, \text{hom}}$ and $S_{0, \text{grad}}$ are
\begin{widetext}
\begin{multline}\label{eq: tetra-free1_hom}
S_{0,\text{hom}} = r_0 \sum\nolimits_i |\dq[i]|^2 + \beta_1 {\left( \sum\nolimits_i |\dq[i]|^2 \right)}^2 \\
+ \beta_2 \left( |\dq[1]|^2 |\ndq[1]|^2 + |\dq[2]|^2 |\ndq[2]|^2 + |\dq[3]|^2 |\ndq[3]|^2 + |\dq[4]|^2 |\ndq[4]|^2\right)\\
+ \beta_3 \left( |\dq[1]|^2 |\dq[2]|^2 + |\dq[3]|^2 |\dq[4]|^2 + |\ndq[1]|^2 |\ndq[2]|^2 + |\ndq[3]|^2 |\ndq[4]|^2 \right) \\
+ \beta_4 \left( |\dq[1]|^2 |\dq[3]|^2 + |\dq[2]|^2 |\dq[4]|^2 + |\dq[3]|^2 |\ndq[1]|^2 + |\dq[4]|^2 |\ndq[2]|^2 \phantom{~~~~.} \right.\\
\shoveright{ \left. + \phantom{.}|\ndq[1]|^2 |\ndq[3]|^2 + |\ndq[2]|^2 |\ndq[4]|^2 + |\ndq[3]|^2 |\dq[1]|^2 + |\ndq[4]|^2 |\dq[2]|^2 \right)} \\
+ \beta_5 \left( |\dq[1]|^2 |\dq[4]|^2 + |\ndq[1]|^2 |\ndq[4]|^2 + |\dq[2]|^2 |\ndq[3]|^2 + |\dq[3]|^2 |\ndq[2]|^2 \right)\\
+ \beta_6 \left( |\dq[2]|^2 |\dq[3]|^2 + |\dq[4]|^2 |\ndq[1]|^2 + |\ndq[2]|^2 |\ndq[3]|^2 + |\ndq[4]|^2 |\dq[1]|^2 \right) \\
+ \beta_7 \left( |\dq[1]|^2 |\ndq[2]|^2 + |\dq[2]|^2 |\ndq[1]|^2 + |\dq[3]|^2 |\ndq[4]|^2 + |\dq[4]|^2 |\ndq[3]|^2 \right) \\
+ \beta_{c_1} \left\{ \left[\dq[1]\ndq[1](\dq[2]\ndq[2])^* + \dq[3]\ndq[3](\dq[4]\ndq[4])^* \right ] + c.c. \right\} \phantom{~~~~~~~~~~~..} \\
+ \beta_{c_2} \left\{ \left[\dq[1]\ndq[1](\dq[3]\ndq[3])^* + \dq[2]\ndq[2](\dq[4]\ndq[4])^* \right ] + c.c. \right\} \phantom{~~~~~~~~~~~..} \\
+ \beta_{c_3} \left\{ \left[\dq[1]\ndq[1](\dq[4]\ndq[4])^* + \dq[2]\ndq[2](\dq[3]\ndq[3])^* \right ] + c.c. \right\}, \phantom{~~~~~~~~~~~~~~~~~~~~~~~~~~~.....}
\end{multline}
\begin{multline}\label{eq: tetra-free1_grad}
S_{0, \text{grad}} = \kappa_1 \sum\nolimits_i |{\bm D}_{\perp}\dq[i]|^2 + \kappa_2 \left[ \phantom{~~~}\sum_{\makebox[0pt]{$\scriptstyle \bm{Q}_j = \pm \bm{Q}_{1,2}$}} \phantom{} \left( |\Dxdq[j]|^2 - |\Dydq[j]|^2 \right) \phantom{~~} - \phantom{\bm{Q}} \sum_{\makebox[0pt]{$\scriptstyle \bm{Q}_k = \pm \bm{Q}_{3,4}$}} \phantom{} \left( |\Dxdq[k]|^2 - |\Dydq[k]|^2\right) \right] \\
\shoveright{ + \kappa_3 \left[\phantom{~~~}\sum_{\makebox[0pt]{$\scriptstyle \bm{Q}_l = \pm \bm{Q}_{1,4}$}} \phantom{} [(\Dxdq[l])\Cydq[l] + c.c.] \phantom{~~} - \phantom{\bm{Q}} \sum_{\makebox[0pt]{$\scriptstyle \bm{Q}_m = \pm \bm{Q}_{2,3}$}} \phantom{} [(\Dxdq[m])\Cydq[m] + c.c.]\right]}\\
+ \kappa_4 \sum\nolimits_{i} |\Dzdq[i]|^2 + \frac{1}{2}(\bm{\nabla} \times \bm{A})^2,
\end{multline}
\end{widetext}
where ${\bm D}=-i\nabla- 2e {\bm A}$, ${\bm D}_{\perp}=(D_x,D_y)$, and ${\bm B}=\bm{\nabla}\times{\bm A}$.
In the spatially homogeneous case (for which spatial variations of the order parameter are ignored), the possible ground states depend upon nine unknown
phenomenological constants. This parameter space is too large to carry out a complete analysis of all the possible ground states. However, with the above action,
it is straightforward to find the conditions under which a particular state is a local minimum. In the following, we therefore consider a simplified theory that
applies to a material with orthorhombic symmetry (such as YBCO). For this orthorhombic theory, a complete analysis can be carried out. This analysis yields a PDW state
that is compatible with experiment, this state we generalize to tetragonal symmetry. Prior to the discussion of the solvable orthorhombic theory,
we first consider the secondary order parameters that are relevant for PDW order.
\vspace{2mm}
\section{\label{sec: secondary} Secondary order parameters}
Different PDW ground states are distinguished by the secondary order parameters that are induced by the PDW order. These secondary order parameters play a central role
in situations in which the original PDW order does not appear either due to impurities or due to fluctuations. In some circumstance, these secondary order parameters
have also been named vestigial order \cite{fra14}. These secondary order parameters are identified by examining all possible bi-linear products
of the $\dq[i]$. This leads to five distinct kinds of secondary order: CDW \cite{agt08,ber09}, orbital density wave order (ODW) \cite{agt08} (with spatially
modulated orbital currents), translational invariant charge-4 superconductivity (4SC) \cite{ber09-2,rad09} (we do not consider finite-momentum charge-4
superconductivity), strain \cite{ber09-2,rad09}, and translational invariant loop current (LC) order. Specifically, the CDW order is given by
$\rho_{2{\bm Q}}\propto (\dq \ndq^* + \ndq \dq^*)$ or $\rho_{{\bm Q}_1-{\bm Q}_2} \propto (\dq[1] \dq[2]^* + \ndq[2] \ndq[1]^*)$, the ODW
order is given by $L^z_{{\bm Q}_1-{\bm Q}_2} \propto i(\dq[1] \ndq[2]^* - \dq[2] \ndq[1]^*)$, the 4SC order is given by $\Delta_4\propto \dq \ndq$, strain order is
given by $\epsilon_i \propto (|\dq[1]|^2 + |\ndq[1]|^2 - |\dq[2]|^2 -|\ndq[2]|^2)$ \cite{ber09,rad09}, and the loop current order, which was discussed above,
by $l_i \propto (|\dq[i]|^2-|\ndq[i]|^2)$.
\vspace{6mm}
\section{\label{sec: GLW-ortho} GLW action: orthorhombic symmetry}
Here we consider the orthorhombic variant of Fig.~\ref{fig: PDW-momenta}. The GLW action in this case allows all possible ground states to be found and further allows
for a analysis of preemptive loop current order discussed in the next section. The order parameter has four complex degrees of freedom and is represented by the
momenta given by the solid arrows in Fig.~\ref{fig: PDW-momenta}. The same symmetry considerations as above lead to the partition function
$Z\propto \int \Pi_i \mathcal{D} \Delta_i e^{-S_{0}}$ with GLW action $S_{0}$ given by
\begin{widetext}
\begin{multline}
S_0 = r_0\sum\nolimits_{i}|\dq[i]|^2 + \frac{\beta_1}{2} \left (\sum\nolimits_{i}|\dq[i]|^2 \right)^2 + \frac{\beta_2}{2} \left( |\dq[1]|^2+|\ndq[1]|^2 -|\dq[2]|^2-|\ndq[2]|^2 \right)^2 \\
\label{free} \shoveright{+\frac{\beta_3}{2} \left( |\dq[1]|^2-|\ndq[1]|^2 -|\dq[2]|^2+|\ndq[2]|^2 \right)^2
+\frac{\beta_4}{2} \left(|\dq[1]|^2-|\ndq[1]|^2 + |\dq[2]|^2 - |\ndq[2]|^2 \right)^2} \\
\shoveright{+ \beta_5 \left[ \dq[1]\ndq[1](\dq[2]\ndq[2])^* +\dq[2]\ndq[2](\dq[1]\ndq[1])^* \right]
+\kappa_1\sum\nolimits_i|{\bm D}_{\perp} \Delta_i|^2
+ \kappa_2\sum\nolimits_i \left(|D_x\Delta_i|^2-|D_y\Delta_i|^2\right)}\\
\shoveright{+ \kappa_3 \left[\left((D_x \dq[1])(D_y\dq[1])^* + (D_x \ndq[1])(D_y\ndq[1])^* - (D_x \dq[2])(D_y\dq[2])^*-(D_x \ndq[2])(D_y\ndq[2])^* \right) + c.c.\right]} \\
+ \kappa_4\sum\nolimits_i|\Dzdq[i]|^2+\frac{1}{2}(\bm{\nabla} \times \bm{A})^2.
\end{multline}
\end{widetext}
\begin{table*}[t]
\caption{\label{tbl: ortho}{\bf Properties of PDW Ground States for orthorhombic symmetry in Fig.~\ref{fig: PDW-momenta}.} All possible PDW ground states and
accompanying CDW and ODW order. The second column shows the parameter regions for which these phases are stable. In the third and fourth columns: $2{\bm Q}_x=(2Q, 0)$,
$2{\bm Q}_y=(0, 2{Q})$, other modes can be found by using the relationships $\rho_{\bm Q}=(\rho_{-{\bm Q}})^*$ and $L^z_{\bm Q}=(L^z_{-{\bm Q}})^*$. The fifth
column gives all translational invariant order parameters with $l_x\propto$ $ |\dq[1]|^2-|\ndq[1]|^2-|\dq[2]|^2+|\ndq[2]|^2$,
$l_y\propto |\dq[1]|^2-|\ndq[1]|^2+|\dq[2]|^2-|\ndq[2]|^2$, $\Delta_{4e,s}\propto \dq[1]\ndq[1]+ \dq[2]\ndq[2]$, $\Delta_{4e,d}\propto \dq[1]\ndq[1]-\dq[2]\ndq[2]$, and
$\epsilon_{xy}\propto |\dq[1]|^2+|\ndq[1]|^2-|\dq[2]|^2-\ndq[2]|^2$. The sixth column gives the degeneracy of the ground state.}
\begin{ruledtabular}
\begin{tabular}{c r c c c r}
$(\dq[1],\dq[2],\ndq[1],\ndq[2])$& Stability & CDW modes & ODW modes & Q=0 Order &Degeneracy Manifold \\\hline
\multirow{3}*{$(1,~0,~0,~0)$} & $\beta_2+\beta_3<0,\beta_2+\beta_4<0$ & \multirow{3}*{none} & \multirow{3}*{none} & \multirow{3}*{\begin{tabular}{@{}c@{}}$\epsilon_{xy}$ \\ $l_x$, $l_y$\end{tabular}} & \multirow{3}*{$U(1)\times Z_2\times Z_2$} \\
& $\beta_3+\beta_4<0$ & & & & \\
& $\beta_2+\beta_3+\beta_4<-|\beta_5|/4$& & & & \\\cline{2-2}
\multirow{2}*{$(1,~1,~0,~0)$} & $\beta_2+\beta_3>0,\beta_4<\beta_2$ & \multirow{2}*{$\rhoqx$} & \multirow{2}*{$L^z_{2{\bm Q}_x}$}& \multirow{2}*{$l_y$}& \multirow{2}*{$U(1)\times U(1) \times Z_2$} \\
& $\beta_4<\beta_3,\beta_4<-|\beta_5|/4$& & & & \\\cline{2-2}
\multirow{2}*{$(1,~0,~0,~1)$} & $\beta_2+\beta_4>0,\beta_3<\beta_2$ & \multirow{2}*{$\rhoqy$} & \multirow{2}*{$L^z_{2{\bm Q}_y}$}& \multirow{2}*{ $l_x$}& \multirow{2}*{$U(1)\times U(1) \times Z_2$} \\
& $\beta_3<\beta_4,\beta_3<-|\beta_5|/4$& & & & \\\cline{2-2}
\multirow{2}*{$(1,~0,~1,~0)$} & $\beta_3+\beta_4>0,\beta_2<\beta_3$ & \multirow{2}*{$\rho_{2{\bm Q}_1}$} & \multirow{2}*{none} &$\epsilon_{xy}$ & \multirow{2}*{$U(1)\times U(1)\times Z_2$} \\
& $\beta_2<\beta_4,\beta_2<-|\beta_5|/4$& & &$\Delta_{4e,s}$, $\Delta_{4e,d}$ & \\\cline{2-2}
\multirow{3}*{$(1,~1,~1,~1)$} & $\beta_5<0,\beta_5<4\beta_2$ & \multirow{3}*{\begin{tabular}{@{}c@{}}$\rho_{2{\bm Q}_1},~\rho_{2{\bm Q}_2}$ \\ $\rhoqx,~\rhoqy$ \end{tabular}} & \multirow{3}*{none} & \multirow{3}*{$\Delta_{4e,s}$} & \multirow{3}*{$U(1)\times U(1)\times U(1)$} \\
& $\beta_5<4\beta_3,\beta_5<4\beta_4$ & & & & \\
& $\beta_5/4<\beta_2+\beta_3+\beta_4$ & & & & \\\cline{2-2}
\multirow{3}*{$(1,~i,~1,~i)$} & $\beta_5>0,-\beta_5<4\beta_2$ & \multirow{3}*{$\rho_{2{\bm Q}_1},~\rho_{2{\bm Q}_2}$} & \multirow{3}*{$L^z_{2{\bm Q}_y},L^z_{2{\bm Q}_x}$} & \multirow{3}*{$\Delta_{4e,d}$} & \multirow{3}*{$U(1)\times U(1)\times U(1)$} \\
&$-\beta_5<4\beta_3,-\beta_5<4\beta_4$ & & & & \\
&$-\beta_5/4<\beta_2+\beta_3+\beta_4$ & & & & \\
\end{tabular}
\end{ruledtabular}
\end{table*}
\subsection{\label{subsec: ground}Ground states}
For this action, it is possible to find all homogeneous mean-field ground states analytically. These are listed in Table~\ref{tbl: ortho} together with the
corresponding conditions that the ground state represents a global minimum, secondary order parameters, and degeneracy manifold (degeneracy manifold specifies the
number of states with the same ground state energy). Of the ground states listed in Table \ref{tbl: ortho}, only one state (named the ME PDW state) has the potential to
represent a pseudogap mean-field order parameter when generalized to tetragonal symmetry. This ME PDW state has the order parameter
$(\dq[1],\dq[2],\ndq[1],\ndq[2])=\Delta(1,~1,~0,~0)$ and is depicted in Fig.~\ref{fig: varma}. It is stable when $\beta_1+\beta_2>0,~\beta_2+\beta_3>0,~\beta_4<\beta_2,
~\beta_4<\beta_3$, and $\beta_4<-|\beta_5|/4$. This state can be characterized by the secondary orders that it induces: loop current order $l_y=|\dq[1]|^2-|\ndq[1]|^2+
|\dq[2]|^2-|\ndq[2]|^2$; CDW order $\rho_{2{\bm Q}_x} = \dq[1]\dq[2]^*+\ndq[2]\ndq[1]^*$; and orbital density wave (ODW) order at the same wavevector as the CDW order
$L^z_{2{\bm Q}_x}=i(\dq[1]\ndq[2]^*-\dq[2]\ndq[1]^*)$ ($L^z$ is the $z$-component of angular momentum). The ground state manifold of the ME PDW state has a
$U(1)\times U(1)\times Z_2$ degeneracy. The two $U(1)$ degeneracies arise from the usual SC phase symmetry breaking and from the breaking of translational invariance.
The $Z_2$ symmetry denotes the degeneracy between the $(\dq[1],\dq[2],\ndq[1],\ndq[2])=\Delta(1,~1,~0,~0)$ and $\Delta(0,~0,~1,~1)$ states and is associated with the
ME loop current order (which is of opposite sign for these two degenerate states). In the next section we discuss how this ground state manifold can give rise to a
preemptive transition for which there is only ME loop current long-range order.
\begin{figure}[t]
\begin{center}
\includegraphics[width=3.0in]{varma.eps}
\end{center}
\caption{The ME PDW state for orthorhombic symmetry. The arrows ${\bm K}_i$ depict the non-zero components of the PDW order parameter (which order at
${\bm Q}_i=2{\bm K}_i$). Together with the PDW order at the two wavevectors ${\bm Q}_i$, this state has CDW order at the wavevector $2{\bm Q}_x={\bm Q}_1-{\bm Q}_2$,
ODW order at the same wavevector, and ME loop current order.}
\label{fig: varma}
\end{figure}
\subsection{\label{subsec: preemp-ortho} Emergent loop current order - Orthorhombic symmetry }
Fluctuations can lead to a preemptive transition in which the $U(1)\times U(1)$ symmetry is not broken, but the $Z_2$ symmetry is. Such a state will exhibit spatial
long-range ME loop current order and short-range SC and CDW order. To examine this possibility, we consider the partition function given by the effective action in
Eq.~(\ref{free}) in two dimensions (2D), ignore the vector potential, and focus on the parameter regime for which the ME PDW state is stable. We decouple the quartic
terms through Hubbard-Stratonovich (HS) transformations. In particular, we introduce the field $\psi$ to decouple the $(\sum_i|\Delta_i|^2)^2$ term, $\epsilon_{xy}$
to decouple the $(\dq[1]|^2 + |\ndq[1]|^2-|\dq[2]|^2 - |\ndq[2]|^2)^2$ term, $l_x$ to decouple the $ (|\dq[1]|^2-|\ndq[1]|^2-|\dq[2]|^2+|\ndq[2]|^2)^2$ term, $l_y$ to
decouple the $( |\dq[1]|^2-|\ndq[1]|^2+|\dq[2]|^2-|\ndq[2]|^2)^2$ term, and two complex fields $\Delta_{4e,s}$ and $\Delta_{4e,d}$ to decouple the
$\left[ \dq[1]\ndq[1](\dq[2]\ndq[2])^* +\dq[2]\ndq[2](\dq[1]\ndq[1])^* \right ]$ term. The resultant action is quadratic in the fields $\dq[i]$ and these fields can
be integrated out. For the parameter regime we examine, the phases with non-zero $\Delta_{4e,s}$ and $\Delta_{4e,d}$ are energetically unfavorable. Consequently we
set these fields to zero. Additionally, the remaining fields have Ising symmetry, so it is reasonable to treat these at a mean-field level. This leads to the following
effective action
\setlength\multlinegap{0pt}
\begin{multline}
\frac{S_{\text{eff}}}{A}=\frac{l_x^2}{2|\beta_3|}+\frac{l_y^2}{2|\beta_4|}-\frac{\psi^2}{2\beta_1}-\frac{\epsilon_{xy}^2}{2\beta_2}\\
+ \int \frac{d^2 q}{4 \pi^2} \ln \Big [(\chi_{1,\bm{q}}^{-1}+\epsilon_{xy}+l_x+l_y)(\chi_{1,\bm{q}}^{-1}+\epsilon_{xy}-l_x-l_y)\\
(\chi_{2,\bm{q}}^{-1}-\epsilon_{xy}+l_x-l_y)(\chi_{2,\bm{q}}^{-1}-\epsilon_{xy}-l_x+l_y)\Big],
\end{multline}
where $A$ is the area, $\chi_{1,\bm{q}}^{-1}=r_0+\psi+\kappa_1q^2+\kappa_2(q_x^2-q_y^2)+2\kappa_3 q_xq_y$,
$\chi_{2,\bm{q}}^{-1} = r_0+\psi+\kappa_1q^2+\kappa_2(q_x^2-q_y^2)-2\kappa_3 q_xq_y$.
The anisotropy due to $\kappa_2$ and $\kappa_3$ can be removed by rotating and re-scaling $q_x$ and $q_y$, yielding $(\tilde{q}_x^2+\tilde{q}_y^2)/ \tilde{\kappa}$
with $\tilde{\kappa}=\sqrt{\kappa_1^2-\kappa_2^2-\kappa_3^2}$, and the integrals over momenta can then be carried out. Treating $S_{\text{eff}}$ within a mean field
approximation leads to the following self-consistency equations
\begin{multline}
r^* = \bar{r}_0-\tilde{\beta}_1 \ln \big\{ \big[(r^*+\epsilon_{xy}^*)^2-(l_x^* + l_y^*)^2]\\
[(r^* -\epsilon_{xy}^*)^2-(l_x^* -l_y^*)^2 \big\rbrack \big\rbrace, \nonumber
\end{multline}
\begin{equation}
\begin{gathered} \nonumber
\epsilon_{xy}^* = - \tilde{\beta}_2 \ln\left [\frac{(r^*+\epsilon_{xy}^*)^2-(l_x^* + l_y^*)^2}{(r^*-\epsilon_{xy}^*)^2-(l_x^* - l_y^*)^2)}\right ],\\
l_x^* = - \tilde{\beta}_3\ln\left[\frac{(r^*+l_x^*)^2 - (\epsilon_{xy}^* + l_y^*)^2}{(r^* - l_x^*)^2 - (\epsilon_{xy}^* - l_y^*)^2}\right],\\
l_y^* = \ln\left[\frac{(r^*+l_y^*)^2 - (\epsilon_{xy}^* + l_x^*)^2}{(r^* - l_y^*)^2 - (\epsilon_{xy}^* - l_x^*)^2}\right],
\end{gathered}
\end{equation}
where $r^*=r_0^*+\psi^*$, the $^*$ denotes a rescaling by a factor
$4\pi\tilde{\kappa}/|\beta_4|$, $\tilde{\beta}_i=\beta_i/|\beta_4|$, $\bar{r}_0=r_0^*+8 \tilde{\beta}_1 \ln \Lambda + 4\bar{\beta_1}\ln(4\pi\tilde{\kappa}/|\beta_4|)$
and $\Lambda$ is the momentum cutoff. We find that for parameters $\beta_i$ such that the ME PDW state is stable, the mean field solution is given by
$\epsilon_{xy}=l_x=0$ and $l_y\ne 0$. The mathematical analysis of this solution is the same as that used to examine preemptive nematic order in
Ref.~\onlinecite{fer12}. This work implies that there is a second order transition into a ME loop current state when $\tilde{\beta}_1>2$ (this becomes first order
transition if $\tilde{\beta}_1<2$). This analysis can be extended to three dimensions and, provided $\kappa_4/\tilde{\kappa}$ is sufficiently small, a second order
transition into a loop current phase will occur \cite{fer12}. Such a preemptive ME loop current phase will exhibit: SC and CDW correlations consistent with experiment
\cite{ghi12,com13,sil13,li10,yu14}; broken time-reversal symmetry; broken parity symmetry; and is invariant under the product of time-reversal and parity symmetry.
\begin{figure}[t]
\begin{center}
\subfloat[]{\label{subfig: varma2}
\includegraphics[width=1.62in]{varma_phase21.eps}} \hfill
\subfloat[]{\label{subfig: varma3}
\includegraphics[width=1.62in]{varma_loop21.eps}}
\caption{(Color online) The ME PDW state for tetragonal symmetry. (a) The arrows ${\bm K}_i$ depict the non-zero components of the PDW order parameter in the ME PDW
state (which order at ${\bm Q}_i=2{\bm K}_i$). This state has the same symmetry properties as the ME loop current phase discussed in Ref.~\onlinecite{sim02}.
(b) ME Loop current state introduced in Ref.~\onlinecite{sim02}. Here the larger dark circles are Cu sites, the smaller circles are O sites, the arrows represent the
direction of the current, and the arrow heads and tails give the direction of the magnetic moments induced by the currents. \label{fig: var}}
\vspace{-5mm}
\end{center}
\end{figure}
\section{\label{sec: in-plane-tetra}In-plane loop current order - tetragonal symmetry}
\vspace{-2mm}
The ME PDW state found in Section~\ref{subsec: ground} has a natural generalization to tetragonal symmetry. In particular,
$(\dq[1], \dq[2], \dq[3], \dq[4], \ndq[1], \ndq[2], \ndq[3], \ndq[4]) = (\Delta_1, \Delta_2, 0, 0, 0, 0, \Delta_2, \Delta_1)$
is a stable state of the tetragonal GLW action (this will become apparent in the analysis that follows). This state is depicted in
Fig.~\ref{fig: var}\subref{subfig: varma2}, it shares the same symmetries as the ME loop current state shown in Fig.~\ref{fig: var}\subref{subfig: varma3} which has been
discussed in Refs.~\onlinecite{sim02, aji13}. Note that $\Delta_1 \ne \Delta_2$, however, as $\delta K_y = 0$, we recover the state examined in Ref.~\onlinecite{lee14} for which
$\Delta_1=\Delta_2$, so for sufficiently small $\delta K_y$, we expect that $\Delta_1\approx \Delta_2$. To carry out an analysis of this phase, we follow the approach used
in Section V for orthorhombic symmetry. In particular, we re-write the free energy terms denoted by $\beta_1$ to $\beta_7$ as squares of basis functions of irreducible
invariants for tetragonal symmetry. This allows for a straightforward HS transformation. While we can also introduce HS fields for the terms $\beta_{ci}$, for the loop
current phases we are interested in, these fields vanish (as they did in the orthorhombic case), consequently, we will not include these terms in the following. To
reformulate the quartic portion of the effective action, we set $l_i = |\dq[i]|^2 - |\ndq[i]|^2$ and $ \epsilon_i = |\dq[i]|^2 + |\ndq[i]|^2 $. Basis functions for
irreducible representations of $D_{4h}$ are then $p_{1x} = - l_3 - l_4$, $p_{1y} = l_1 + l_2$, $p_{2x} = l_1 - l_2$, $p_{2y} = l_3 - l_4$ ($\bm{p}_1$ and $\bm{p}_2$ are
both bases for the $E_u$ representation), $\psi=\sum_i \epsilon_i$ (corresponding to the $A_{1g}$ representation), $\gamma = \epsilon_1-\epsilon_2+\epsilon_3-\epsilon_4$
(corresponding to the $A_{2g}$ representation), $\epsilon_{x^2-y^2}=\epsilon_1+\epsilon_2-\epsilon_3-\epsilon_4$ (corresponding to the $B_{1g}$ representation),
$\epsilon_{xy}=\epsilon_1-\epsilon_2-\epsilon_3+\epsilon_4$ (corresponding to the $B_{2g}$ representation). In terms of these basis functions Eq.~(\ref{eq: tetra-free1_hom})
can be rewritten as
\begin{widetext}
\begin{multline} \label{eq: tetra-free2_hom}
S_{0, \text{hom}} = r_0 \sum\nolimits_i |\dq[i]|^2 + \tilde\beta_1 \psi^2 + \tilde\beta_2 \bm{p}_{1}^2 + \tilde\beta_3 \bm{p}_{2}^2 + \tilde\beta_4 \bm{p}_1 . \bm{p}_2
+ \tilde\beta_5 \gamma^2 + \tilde\beta_6 \epsilon_{x^2 -y^2}^2 + \tilde\beta_7 \epsilon_{xy}^2\\
\phantom{ABCDE} + \beta_{c_1} \left\{ \left[\dq[1]\ndq[1](\dq[2]\ndq[2])^* + \dq[3]\ndq[3](\dq[4]\ndq[4])^* \right] + c.c. \right\}\\
\phantom{ABCDE} + \beta_{c_2} \left\{ \left[\dq[1]\ndq[1](\dq[3]\ndq[3])^* + \dq[2]\ndq[2](\dq[4]\ndq[4])^* \right] + c.c. \right\}\\
+ \beta_{c_3} \left\{ \left[\dq[1]\ndq[1](\dq[4]\ndq[4])^* + \dq[2]\ndq[2](\dq[3]\ndq[3])^* \right] + c.c. \right\},
\end{multline}
where $\tilde\beta_1 = \beta_1 + (1/8)(\beta_4 + \beta_6 + \beta_7 - \beta_2)$, $ \tilde\beta_2 = (1/8)(\beta_3 - \beta_2 - \beta_7)$,
$\tilde\beta_3 = (1/8)(\beta_7 - \beta_2 - \beta_3)$, $\tilde\beta_4 = (1/4)(\beta_6 - \beta_5)$, $\tilde\beta_5 = (1/8)(\beta_4 - \beta_3 - \beta_6)$,
$\tilde\beta_6 = ({\beta_2}/4) + (1/8)(\beta_3 - \beta_4 - \beta_6)$, $\tilde\beta_7 = (1/8)(\beta_2 + \beta_6 - \beta_4 - \beta_7)$. In the above expression,
all terms except $\beta_{ci}$ and $\tilde{\beta}_4$ are squares of basis functions. To account for $\tilde{\beta}_4$, we rotate
$l_{1i}=\cos\theta ~p_{1i}+\sin\theta ~p_{2i}$ and $l_{2i}= -\sin\theta ~p_{1i}+\cos\theta ~p_{2i}$ with
$\cos\theta = \frac{\sqrt{\left(\tilde\beta_2 - \tilde\beta_3 + \sqrt{(\tilde\beta_2 - \tilde\beta_3)^2 + {\tilde\beta_4}^2}\right)^2 +
{\tilde\beta_4}^2}}{2\sqrt{(\tilde\beta_2 - \tilde\beta_3)^2 + {\tilde\beta_4}^2}}$
and
$\sin\theta = \frac{\sqrt{\left(\tilde\beta_2 - \tilde\beta_3 - \sqrt{(\tilde\beta_2 - \tilde\beta_3)^2 + {\tilde\beta_4}^2}\right)^2 +
{\tilde\beta_4}^2}}{2\sqrt{(\tilde\beta_2 - \tilde\beta_3)^2 + {\tilde\beta_4}^2}}.$
In terms of these new parameters Eq.~(\ref{eq: tetra-free2_hom}) can be expressed as
($\tilde\beta $\textquotesingle s and $\lambda$\textquotesingle s have been rescaled by a factor of half for convenience)
\begin{comment}
\begin{multline}
S_{0, \text{hom}} = r_0 \sum\nolimits_i |\dq[i]|^2 + \tilde\beta_1 {\left( \sum\nolimits_i |\dq[i]|^2 \right)}^2 \\
+ \tilde\beta_2\left[\left(|\dq[1]|^2 - |\ndq[1]|^2 + |\dq[2]|^2 - |\ndq[2]|^2\right)^2 + \left( |\dq[3]|^2 - |\ndq[3]|^2 + |\dq[4]|^2 - |\ndq[4]|^2\right)^2 \right] \\
+ \tilde\beta_3 \left[\left( |\dq[1]|^2 - |\ndq[1]|^2 - |\dq[2]|^2 + |\ndq[2]|^2 \right)^2 + \left(|\dq[3]|^2 - |\ndq[3]|^2 - |\dq[4]|^2 + |\ndq[4]|^2 \right)^2 \right] \\
+ \tilde\beta_4 \left(2 |\dq[3]|^2 |\dq[2]|^2 - 2 |\dq[3]|^2 |\ndq[2]|^2 - 2 |\ndq[3]|^2 |\dq[2]|^2 + 2 |\ndq[3]|^2 |\ndq[2]|^2 \phantom{ABDEFGH~} \right. \\
\left.\phantom{ABDEFGHIJKLMN..}-\phantom{}2 |\dq[4]|^2 |\dq[1]|^2 + 2 |\dq[4]|^2 |\ndq[1]|^2 + 2 |\ndq[4]|^2 |\dq[1]|^2 - 2 |\ndq[4]|^2 |\ndq[1]|^2 \right)\\
+ \tilde\beta_5 \left( |\dq[1]|^2 + |\ndq[1]|^2 -|\dq[2]|^2 - |\ndq[2]|^2 + |\dq[3]|^2 + |\ndq[3]|^2 - |\dq[4]|^2 - |\ndq[4]|^2 \right)^2 \phantom{GG~~} \\
+ \tilde\beta_6 \left( |\dq[1]|^2 + |\ndq[1]|^2 +|\dq[2]|^2 + |\ndq[2]|^2 - |\dq[3]|^2 - |\ndq[3]|^2 - |\dq[4]|^2 - |\ndq[4]|^2 \right)^2 \phantom{GG~~} \\
+ \tilde\beta_7 \left( |\dq[1]|^2 + |\ndq[1]|^2 -|\dq[2]|^2 - |\ndq[2]|^2 - |\dq[3]|^2 - |\ndq[3]|^2 + |\dq[4]|^2 + |\ndq[4]|^2 \right)^2 \phantom{GG~~} \\
+ \beta_{c_1} \left[ \left(\dq[1]\ndq[1](\dq[2]\ndq[2])^* + \dq[3]\ndq[3](\dq[4]\ndq[4]) \right ) + c.c. \right] \phantom{GGGGHHHHKKKKL~~...} \\
+ \beta_{c_2} \left[ \left(\dq[1]\ndq[1](\dq[3]\ndq[3])^* + \dq[2]\ndq[2](\dq[4]\ndq[4]) \right ) + c.c. \right] \phantom{GGGGHHHHKKKKL~~...} \\
+ \beta_{c_3} \left[ \left(\dq[1]\ndq[1](\dq[4]\ndq[4])^* + \dq[2]\ndq[2](\dq[3]\ndq[3]) \right ) + c.c. \right]
\end{multline}
\end{comment}
\begin{comment}
\begin{equation}
\begin{aligned}
&
\lambda_2 &= \frac{\tilde\beta_2 + \tilde\beta_3 - \sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}}{2} \\
\cos\theta &\equiv \frac{\sqrt{\left(\tilde\beta_2 - \tilde\beta_3 + \sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}\right)^2 + {\tilde\beta_4}^2}}{2\sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}} &
\sin\theta &\equiv \frac{\sqrt{\left(\tilde\beta_2 - \tilde\beta_3 - \sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}\right)^2 + {\tilde\beta_4}^2}}{2\sqrt{ \left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}}
\end{aligned}
\end{equation}
\end{comment}
\begin{multline} \label{eq: tetra-free3_hom}
S_{0,\text{hom}} = r_0 \sum\nolimits_i |\dq[i]|^2 + \frac{\tilde\beta_1}{2} \psi^2 + \frac{\lambda_1}{2} \left( l_{1x}^2 + l_{1y}^2 \right) + \frac{\lambda_2}{2} \left( l_{2x}^2 + l_{2y}^2 \right)
+ \frac{\tilde\beta_5}{2} \gamma^2 + \frac{\tilde\beta_6}{2} \epsilon_{x^2 -y^2}^2 + \frac{\tilde\beta_7}{2} \epsilon_{xy}^2\\
\phantom{ABCD} + \beta_{c_1} \left\{ \left[\dq[1]\ndq[1](\dq[2]\ndq[2])^* + \dq[3]\ndq[3](\dq[4]\ndq[4])^* \right] + c.c. \right\}\\
\phantom{ABCD} + \beta_{c_2} \left\{ \left[\dq[1]\ndq[1](\dq[3]\ndq[3])^* + \dq[2]\ndq[2](\dq[4]\ndq[4])^* \right] + c.c. \right\}\\
\shoveright{ + \beta_{c_3} \left\{ \left[\dq[1]\ndq[1](\dq[4]\ndq[4])^* + \dq[2]\ndq[2](\dq[3]\ndq[3])^* \right] + c.c. \right\}} \\
\shoveleft{\phantom{ABC}= r_0 \sum\nolimits_i |\dq[i]|^2 + \frac{\tilde\beta_1}{2}{\left( \sum\nolimits_i |\dq[i]|^2 \right)}^2} \\
\phantom{~~~}+ \frac{\lambda_1}{2}\left[\left(- |\dq[3]|^2 + |\ndq[3]|^2 - |\dq[4]|^2 + |\ndq[4]|^2\right)\cos\theta + \left(|\dq[1]|^2 - |\ndq[1]|^2 - |\dq[2]|^2 + |\ndq[2]|^2\right)\sin\theta \right]^2\\
\phantom{~~~}+ \frac{\lambda_1}{2}\left[\left(|\dq[1]|^2 - |\ndq[1]|^2 + |\dq[2]|^2 - |\ndq[2]|^2\right)\cos\theta + \left(|\dq[3]|^2 - |\ndq[3]|^2 - |\dq[4]|^2 + |\ndq[4]|^2\right)\sin\theta \right]^2 \phantom{~~} \\
\phantom{~~~}+ \frac{\lambda_2}{2}\left[\left(|\dq[3]|^2 - |\ndq[3]|^2 + |\dq[4]|^2 - |\ndq[4]|^2\right)\sin\theta + \left(|\dq[1]|^2 - |\ndq[1]|^2 - |\dq[2]|^2 + |\ndq[2]|^2\right)\cos\theta \right]^2 \phantom{~~} \\
\phantom{~~~}+ \frac{\lambda_2}{2}\left[\left(- |\dq[1]|^2 + |\ndq[1]|^2 - |\dq[2]|^2 + |\ndq[2]|^2\right)\sin\theta + \left(|\dq[3]|^2 - |\ndq[3]|^2 - |\dq[4]|^2 + |\ndq[4]|^2\right)\cos\theta \right]^2 \\
\phantom{~~~}+ \frac{\tilde\beta_5}{2} \left( |\dq[1]|^2 + |\ndq[1]|^2 -|\dq[2]|^2 - |\ndq[2]|^2 + |\dq[3]|^2 + |\ndq[3]|^2 - |\dq[4]|^2 - |\ndq[4]|^2 \right)^2 \\
\phantom{~~~}+ \frac{\tilde\beta_6}{2} \left( |\dq[1]|^2 + |\ndq[1]|^2 +|\dq[2]|^2 + |\ndq[2]|^2 - |\dq[3]|^2 - |\ndq[3]|^2 - |\dq[4]|^2 - |\ndq[4]|^2 \right)^2 \\
\phantom{~~~}+ \frac{\tilde\beta_7}{2} \left( |\dq[1]|^2 + |\ndq[1]|^2 -|\dq[2]|^2 - |\ndq[2]|^2 - |\dq[3]|^2 - |\ndq[3]|^2 + |\dq[4]|^2 + |\ndq[4]|^2 \right)^2 \\
\phantom{~~~~~~~.}+ \beta_{c_1} \left\{ \left[\dq[1]\ndq[1](\dq[2]\ndq[2])^* + \dq[3]\ndq[3](\dq[4]\ndq[4])^* \right ] + c.c. \right\} \\
\phantom{~~~~~~~.}+ \beta_{c_2} \left\{ \left[\dq[1]\ndq[1](\dq[3]\ndq[3])^* + \dq[2]\ndq[2](\dq[4]\ndq[4])^* \right ] + c.c. \right\} \\
+ \beta_{c_3} \left\{ \left[\dq[1]\ndq[1](\dq[4]\ndq[4])^* + \dq[2]\ndq[2](\dq[3]\ndq[3])^* \right ] + c.c. \right\},
\end{multline}
where $\lambda_1 =\frac{\tilde\beta_2 + \tilde\beta_3 + \sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}}{2}$ and $\lambda_2 = \frac{\tilde\beta_2 + \tilde\beta_3 - \sqrt{\left(\tilde\beta_2 - \tilde\beta_3 \right)^2 + {\tilde\beta_4}^2}}{2}$. Notice that if $\lambda_1<0$, $\beta_{ci}$ are sufficiently small, and all other quartic terms are positive, then the ME loop current phase will be the mean-field ground state. This is the limit that we will examine further. In particular, in the next paragraph, we examine preemptive loop current order emerging from this ME PDW phase.
We decouple the quartic terms of Eq.~(\ref{eq: tetra-free3_hom}) through HS transformations. In particular, introducing $\psi$, $l_{1x}$, $l_{1y}$, $l_{2x}$, $l_{2y}$,
$\gamma$, $\epsilon_{x^2-y^2}$ and $\epsilon_{xy}$ to decouple the second($(\sum_i|\Delta_i|^2)^2$), third, fourth, fifth, sixth, seventh, eighth and ninth term
respectively. The resultant action is quadratic in the fields $\dq[i]$ and these fields can be integrated out. As in the orthorhombic case, the terms with
$\beta_{ci}$ do not contribute to the effective action in the ME PDW phase, so we do not include these terms (the HS decomposition of these terms can proceed through
charge-4e superconducting fields, ignoring these terms is equivalent to setting these fields to zero). The remaining fields have discrete symmetries, so it is
reasonable to treat these at a mean-field level. This leads to the following effective action (note we have set $\lambda_1<0$ and all other quartic terms are positive)
\begin{multline} \label{eq: tetra-eff}
\frac{S_{\text{eff},\text{tet}}}{A} = \frac{l_{1x}^2 + l_{1y}^2}{2|\lambda_1|} - \frac{l_{2x}^2 + l_{2y}^2}{2 \lambda_2}
- \frac{\psi^2}{2\tilde\beta_1} - \frac{\gamma^2}{2\tilde\beta_5} - \frac{\epsilon_{x^2 - y^2}^2}{2\tilde\beta_6} - \frac{\epsilon_{xy}^2}{2\tilde\beta_7} \\
+ \int \frac{d^2 q}{4 \pi^2} \ln \left[(\chi_{1,\bm{q}}^{-1} + \gamma + \epsilon_{x^2 - y^2} + \epsilon_{xy} - l_{1x}\sin\theta - l_{1y}\cos\theta
+ l_{2x}\cos\theta - l_{2y}\sin\theta) \right.\\
\left. \phantom{ABCDEFG}(\chi_{1,\bm{q}}^{-1} + \gamma + \epsilon_{x^2 - y^2} + \epsilon_{xy} + l_{1x}\sin\theta + l_{1y}\cos\theta
- l_{2x}\cos\theta + l_{2y}\sin\theta) \right.\\
\left. \phantom{ABCDEFG}(\chi_{2,\bm{q}}^{-1} - \gamma + \epsilon_{x^2 - y^2} - \epsilon_{xy} + l_{1x}\sin\theta - l_{1y}\cos\theta
- l_{2x}\cos\theta - l_{2y}\sin\theta) \right.\\
\left. \phantom{ABCDEFG}(\chi_{2,\bm{q}}^{-1} - \gamma + \epsilon_{x^2 - y^2} - \epsilon_{xy} - l_{1x}\sin\theta + l_{1y}\cos\theta
+ l_{2x}\cos\theta + l_{2y}\sin\theta) \right.\\
\left. \phantom{ABCDEFG}(\chi_{3,\bm{q}}^{-1} + \gamma - \epsilon_{x^2 - y^2} - \epsilon_{xy} + l_{1x}\cos\theta - l_{1y}\sin\theta
+ l_{2x}\sin\theta + l_{2y}\cos\theta)\right.\\
\left. \phantom{ABCDEFG}(\chi_{3,\bm{q}}^{-1} + \gamma - \epsilon_{x^2 - y^2} - \epsilon_{xy} - l_{1x}\cos\theta + l_{1y}\sin\theta
- l_{2x}\sin\theta - l_{2y}\cos\theta)\right.\\
\left. \phantom{ABCDEFG}(\chi_{4,\bm{q}}^{-1} - \gamma - \epsilon_{x^2 - y^2} + \epsilon_{xy} + l_{1x}\cos\theta + l_{1y}\sin\theta
+ l_{2x}\sin\theta - l_{2y}\cos\theta)\right.\\
\left. \phantom{ABCDEFG}(\chi_{4,\bm{q}}^{-1} - \gamma - \epsilon_{x^2 - y^2} + \epsilon_{xy} - l_{1x}\cos\theta - l_{1y}\sin\theta
- l_{2x}\sin\theta + l_{2y}\cos\theta)\right],
\end{multline}
where $\chi_{1,\bm{q}}^{-1} = r_0 + \psi + \kappa_1(q_x^2 + q_y^2) + \kappa_2(q_x^2 - q_y^2) + 2\kappa_3q_xq_y$,
$\chi_{2,\bm{q}}^{-1} = r_0 + \psi + \kappa_1(q_x^2 + q_y^2) + \kappa_2(q_x^2 - q_y^2) - 2\kappa_3q_xq_y$,
$\chi_{3,\bm{q}}^{-1} = r_0 + \psi + \kappa_1(q_x^2 + q_y^2) - \kappa_2(q_x^2 - q_y^2) - 2\kappa_3q_xq_y$, and
$\chi_{4,\bm{q}}^{-1} = r_0 + \psi + \kappa_1(q_x^2 + q_y^2) - \kappa_2(q_x^2 - q_y^2) + 2\kappa_3q_xq_y$.
\begin{comment}
\begin{multline} \label{eq: r1}
r = \bar{r}_0 - \frac{4\tilde\beta_1}{\pi \tilde\kappa} \ln \left\{ \left[ \left( r + \gamma + \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta + l_{1y}\cos\theta - l_{2x}\cos\theta + l_{2y}\sin\theta \right)^2 \right] \right. \\
\left. \left[ \left( r - \gamma + \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta - l_{1y}\cos\theta - l_{2x}\cos\theta - l_{2y}\sin\theta \right)^2 \right] \right. \\
\left. \left[ \left( r + \gamma - \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta - l_{1y}\sin\theta + l_{2x}\sin\theta + l_{2y}\cos\theta \right)^2 \right] \right. \\
\left. \left[ \left( r - \gamma - \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta + l_{1y}\sin\theta + l_{2x}\sin\theta - l_{2y}\cos\theta \right)^2 \right] \right\}
\end{multline}
\begin{multline} \label{eq: gamma}
\gamma = - \frac{\tilde\beta_5}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \gamma + \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta + l_{1y}\cos\theta - l_{2x}\cos\theta + l_{2y}\sin\theta \right)^2}
{\left( r - \gamma - \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta + l_{1y}\sin\theta + l_{2x}\sin\theta - l_{2y}\cos\theta \right)^2} \right] \right. \\
\left. + \ln \left[ \frac{\left( r + \gamma - \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta - l_{1y}\sin\theta + l_{2x}\sin\theta + l_{2y}\cos\theta \right)^2}
{\left( r - \gamma + \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta - l_{1y}\cos\theta - l_{2x}\cos\theta - l_{2y}\sin\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline} \label{eq: x2-y2}
\epsilon_{x^2-y^2} = - \frac{\tilde\beta_6}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \gamma + \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta + l_{1y}\cos\theta - l_{2x}\cos\theta + l_{2y}\sin\theta \right)^2}
{\left( r - \gamma - \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta + l_{1y}\sin\theta + l_{2x}\sin\theta - l_{2y}\cos\theta \right)^2} \right] \right. \\
\left. + \ln \left[ \frac{\left( r - \gamma + \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta - l_{1y}\cos\theta - l_{2x}\cos\theta - l_{2y}\sin\theta \right)^2}
{\left( r + \gamma - \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta - l_{1y}\sin\theta + l_{2x}\sin\theta + l_{2y}\cos\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline} \label{eq: xy}
\epsilon_{xy} = - \frac{\tilde\beta_7}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \gamma + \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta + l_{1y}\cos\theta - l_{2x}\cos\theta + l_{2y}\sin\theta \right)^2}
{\left( r + \gamma - \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta - l_{1y}\sin\theta + l_{2x}\sin\theta + l_{2y}\cos\theta \right)^2} \right] \right. \\
\left. + \ln \left[ \frac{\left( r - \gamma - \epsilon_{x^2-y^2} + \epsilon_{xy} \right)^2 - \left( l_{1x}\cos\theta + l_{1y}\sin\theta + l_{2x}\sin\theta - l_{2y}\cos\theta \right)^2}
{\left( r - \gamma + \epsilon_{x^2-y^2} - \epsilon_{xy} \right)^2 - \left( l_{1x}\sin\theta - l_{1y}\cos\theta - l_{2x}\cos\theta - l_{2y}\sin\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline}
l_{1x} = \frac{|\lambda_1|}{4\pi\tilde\kappa} \left\{ \sin\theta \ln \left[ \frac{\left( r + \epsilon_{x^2-y^2} + l_{1x}\sin\theta - l_{2x}\cos\theta \right)^2 - \left( \gamma + \epsilon_{xy} + l_{1y}\cos\theta + l_{2y}\sin\theta \right)^2}
{\left( r + \epsilon_{x^2-y^2} - l_{1x}\sin\theta + l_{2x}\cos\theta \right)^2 - \left( \gamma + \epsilon_{xy} - l_{1y}\cos\theta - l_{2y}\sin\theta \right)^2} \right] \right.\\
\left. + \cos\theta \ln \left[ \frac{\left( r - \epsilon_{x^2-y^2} + l_{1x}\cos\theta + l_{2x}\sin\theta \right)^2 - \left( \gamma - \epsilon_{xy} - l_{1y}\sin\theta + l_{2y}\cos\theta \right)^2}
{\left( r - \epsilon_{x^2-y^2} - l_{1x}\cos\theta - l_{2x}\sin\theta \right)^2 - \left( \gamma - \epsilon_{xy} + l_{1y}\sin\theta - l_{2y}\cos\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline}
l_{1y} = \frac{|\lambda_1|}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r + \epsilon_{x^2-y^2} + l_{1y}\cos\theta + l_{2y}\sin\theta \right)^2 - \left( \gamma + \epsilon_{xy} + l_{1x}\sin\theta - l_{2x}\cos\theta \right)^2}
{\left( r + \epsilon_{x^2-y^2} - l_{1y}\cos\theta - l_{2y}\sin\theta \right)^2 - \left( \gamma + \epsilon_{xy} - l_{1x}\sin\theta + l_{2x}\cos\theta \right)^2} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r - \epsilon_{x^2-y^2} + l_{1y}\sin\theta - l_{2y}\cos\theta \right)^2 - \left( \gamma - \epsilon_{xy} - l_{1x}\cos\theta - l_{2x}\sin\theta \right)^2}
{\left( r - \epsilon_{x^2-y^2} - l_{1y}\sin\theta + l_{2y}\cos\theta \right)^2 - \left( \gamma - \epsilon_{xy} + l_{1x}\cos\theta + l_{2x}\sin\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline}
l_{2x} = -\frac{\lambda_2}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r + \epsilon_{x^2-y^2} - l_{1x}\sin\theta + l_{2x}\cos\theta \right)^2 - \left( \gamma + \epsilon_{xy} - l_{1y}\cos\theta - l_{2y}\sin\theta \right)^2}
{\left( r + \epsilon_{x^2-y^2} + l_{1x}\sin\theta - l_{2x}\cos\theta \right)^2 - \left( \gamma + \epsilon_{xy} + l_{1y}\cos\theta + l_{2y}\sin\theta \right)^2} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r - \epsilon_{x^2-y^2} + l_{1x}\cos\theta + l_{2x}\sin\theta \right)^2 - \left( \gamma - \epsilon_{xy} - l_{1y}\sin\theta + l_{2y}\cos\theta \right)^2}
{\left( r - \epsilon_{x^2-y^2} - l_{1x}\cos\theta - l_{2x}\sin\theta \right)^2 - \left( \gamma - \epsilon_{xy} + l_{1y}\sin\theta - l_{2y}\cos\theta \right)^2} \right] \right\}
\end{multline}
\begin{multline}
l_{2y} = -\frac{\lambda_2}{4\pi\tilde\kappa} \left\{ \sin\theta \ln \left[ \frac{\left( r + \epsilon_{x^2-y^2} + l_{1y}\cos\theta + l_{2y}\sin\theta \right)^2 - \left( \gamma + \epsilon_{xy} + l_{1x}\sin\theta - l_{2x}\cos\theta \right)^2}
{\left( r + \epsilon_{x^2-y^2} - l_{1y}\cos\theta - l_{2y}\sin\theta \right)^2 - \left( \gamma + \epsilon_{xy} - l_{1x}\sin\theta + l_{2x}\cos\theta \right)^2} \right] \right.\\
\left. + \cos\theta \ln \left[ \frac{\left( r - \epsilon_{x^2-y^2} - l_{1y}\sin\theta + l_{2y}\cos\theta \right)^2 - \left( \gamma - \epsilon_{xy} + l_{1x}\cos\theta + l_{2x}\sin\theta \right)^2}
{\left( r - \epsilon_{x^2-y^2} + l_{1y}\sin\theta - l_{2y}\cos\theta \right)^2 - \left( \gamma - \epsilon_{xy} - l_{1x}\cos\theta - l_{2x}\sin\theta \right)^2} \right] \right\}
\end{multline}
\end{comment}
To carry out the integrals, the anisotropy in $\chi_{i, \bm q}^{-1}$ due to $\kappa_2$ and $\kappa_3$, can again be removed by rotating and re-scaling $q_x$ and $q_y$,
yielding $(\tilde{q}_x^2+\tilde{q}_y^2)/ \tilde{\kappa}$ with $\tilde{\kappa}=\sqrt{\kappa_1^2-\kappa_2^2-\kappa_3^2}$. We find the self-consistency equations by
setting the first derivatives with respect to the field equal to zero. The relevant solution that minimizes the action satisfies $\gamma = 0$, $\epsilon_{x^2-y^2} = 0$,
$l_{1x} = l_{1y} \equiv \ell_1$ and $l_{2x} = l_{2y} \equiv \ell_2$ and the self consistency equations become (here $r = r_0 + \psi$ and
$\bar{r}_0 = r_0 + (4\tilde\beta_1/\pi \tilde\kappa)\ln\Lambda$)
\begin{multline}\label{eq: r2}
r = \bar{r}_0 - \frac{8\tilde\beta_1}{\pi \tilde\kappa} \ln \left\{ \left[ \left( r + \epsilon_{xy} \right)^2 - \left( \ell_1\cos\theta + \ell_1\sin\theta + \ell_2\sin\theta - \ell_2\cos\theta \right)^2 \right] \right. \\
\left. \left[ \left( r - \epsilon_{xy} \right)^2 - \left( \ell_1\cos\theta - \ell_1\sin\theta + \ell_2\sin\theta + \ell_2\cos\theta \right)^2 \right] \right\},
\end{multline}
\begin{multline}
\epsilon_{xy} = - \frac{\tilde\beta_7}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1\sin\theta + \ell_1\cos\theta - \ell_2\cos\theta + \ell_2\sin\theta \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1\cos\theta - \ell_1\sin\theta + \ell_2\sin\theta + \ell_2\cos\theta \right)^2}\right] \right. \\
\left. + \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1\cos\theta + \ell_1\sin\theta + \ell_2\sin\theta - \ell_2\cos\theta \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1\sin\theta - \ell_1\cos\theta - \ell_2\cos\theta - \ell_2\sin\theta \right)^2}\right] \right\},
\end{multline}
\begin{multline}
\ell_1 = \frac{|\lambda_1|}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r + \ell_1\cos\theta + \ell_2\sin\theta \right)^2 - \left( \epsilon_{xy} + \ell_1\sin\theta - \ell_2\cos\theta \right)^2}
{\left( r - \ell_1\cos\theta - \ell_2\sin\theta \right)^2 - \left( \epsilon_{xy} - \ell_1\sin\theta + \ell_2\cos\theta \right)^2} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r + \ell_1\sin\theta - \ell_2\cos\theta \right)^2 - \left( \epsilon_{xy} + \ell_1\cos\theta + \ell_2\sin\theta \right)^2}
{\left( r - \ell_1\sin\theta + \ell_2\cos\theta \right)^2 - \left( \epsilon_{xy} - \ell_1\cos\theta - \ell_2\sin\theta \right)^2} \right] \right\},
\end{multline}
\begin{multline}
\ell_2 = -\frac{\lambda_2}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r - \ell_1\sin\theta + \ell_2\cos\theta \right)^2 - \left( \epsilon_{xy} - \ell_1\cos\theta - \ell_2\sin\theta \right)^2}
{\left( r + \ell_1\sin\theta - \ell_2\cos\theta \right)^2 - \left( \epsilon_{xy} + \ell_1\cos\theta + \ell_2\sin\theta \right)^2} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r + \ell_1\cos\theta + \ell_2\sin\theta \right)^2 - \left( \epsilon_{xy} + \ell_1\sin\theta - \ell_2\cos\theta \right)^2}
{\left( r - \ell_1\cos\theta - \ell_2\sin\theta \right)^2 - \left( \epsilon_{xy} - \ell_1\sin\theta + \ell_2\cos\theta \right)^2} \right] \right\}.
\end{multline}
\begin{comment}
Let $a = \cos\theta + \sin\theta$ and $b = \cos\theta - \sin\theta$, then
\begin{equation}
r = \bar{r}_0 - \frac{8\tilde\beta_1}{\pi \tilde\kappa} \ln \left\{ \left[ \left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2 \right]
\left[ \left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2 \right] \right\}
\end{equation}
\begin{equation}
\gamma = - \frac{\tilde\beta_5}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2}
{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2} \right]
+ \ln \left[ \frac{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2} \right] \right\} = 0
\end{equation}
\begin{equation}
\epsilon_{x^2-y^2} = - \frac{\tilde\beta_6}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2}
{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2} \right]
+ \ln \left[ \frac{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2} \right] \right\} = 0
\end{equation}
\begin{equation}
\epsilon_{xy} = - \frac{\tilde\beta_7}{4\pi\tilde\kappa} \left\{ \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2} \right]
+ \ln \left[ \frac{\left( r + \epsilon_{xy} \right)^2 - \left( \ell_1 a - \ell_2 b \right)^2}
{\left( r - \epsilon_{xy} \right)^2 - \left( \ell_1 b + \ell_2 a \right)^2} \right] \right\}
\end{equation}
\begin{multline}
\ell_1 = \frac{|\lambda_1|}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r + \ell_1 a - \ell_2 b + \epsilon_{xy} \right) \left( r + \ell_1 b + \ell_2 a - \epsilon_{xy} \right)}
{\left( r - \ell_1 a + \ell_2 b + \epsilon_{xy} \right) \left( r - \ell_1 b - \ell_2 a - \epsilon_{xy} \right)} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r + \ell_1 a - \ell_2 b + \epsilon_{xy} \right) \left( r - \ell_1 b - \ell_2 a - \epsilon_{xy} \right)}
{\left( r - \ell_1 a + \ell_2 b + \epsilon_{xy} \right) \left( r + \ell_1 b + \ell_2 a - \epsilon_{xy} \right)} \right] \right\}
\end{multline}
\begin{multline}
\ell_2 = -\frac{\lambda_2}{4\pi\tilde\kappa} \left\{ \cos\theta \ln \left[ \frac{\left( r - \ell_1 a + \ell_2 b + \epsilon_{xy} \right) \left( r + \ell_1 b + \ell_2 a - \epsilon_{xy} \right)}
{\left( r + \ell_1 a - \ell_2 b + \epsilon_{xy} \right) \left( r - \ell_1 b - \ell_2 a - \epsilon_{xy} \right)^2} \right] \right.\\
\left. + \sin\theta \ln \left[ \frac{\left( r + \ell_1 a - \ell_2 b + \epsilon_{xy} \right) \left( r + \ell_1 b + \ell_2 a - \epsilon_{xy} \right)}
{\left( r - \ell_1 a + \ell_2 b + \epsilon_{xy} \right) \left( r - \ell_1 b - \ell_2 a - \epsilon_{xy} \right)^2} \right] \right\}
\end{multline}
\noindent $\Longrightarrow$
\begin{multline} \label{eq: r}
r = \bar{r}_0 - \frac{8\tilde\beta_1}{\pi \tilde\kappa} \left\{ 4 \ln r + \ln \left( 1 + \tilde \ell_1 a - \tilde \ell_2 b + \tilde \epsilon_{xy} \right) + \ln \left( 1 - \tilde \ell_1 a + \tilde \ell_2 b + \tilde \epsilon_{xy} \right) \right.\\
\left. + \ln \left( 1 + \tilde \ell_1 b + \tilde \ell_2 a - \tilde \epsilon_{xy} \right) + \ln \left( 1 - \tilde \ell_1 b - \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right\}
\end{multline}
\begin{gather}
- \frac{2\pi\tilde\kappa}{\tilde\beta_7}\epsilon_{xy} = \ln \left[ \frac{\left( r + \epsilon_{xy} + \ell_1 a - \ell_2 b \right) \left( r + \epsilon_{xy} - \ell_1 a + \ell_2 b \right)}
{\left( r - \epsilon_{xy} + \ell_1 b + \ell_2 a \right) \left( r - \epsilon_{xy} - \ell_1 b - \ell_2 a \right)} \right] \\
\Rightarrow \phantom{ABCDEFGHIJKLMNOPQRSTUVWXYZABCD} \nonumber \\
- \frac{2\pi\tilde\kappa}{\tilde\beta_7}\epsilon_{xy} = \ln \left( 1 + \tilde \epsilon_{xy} + \tilde \ell_1 a - \tilde \ell_2 b \right) + \ln \left( 1 + \tilde \epsilon_{xy} - \tilde \ell_1 a + \tilde \ell_2 b \right)
- \ln \left( 1 - \tilde \epsilon_{xy} + \tilde \ell_1 b + \tilde \ell_2 a \right) - \ln \left( 1 - \tilde \epsilon_{xy} - \tilde \ell_1 b - \tilde \ell_2 a \right) \phantom{ABCDE}
\end{gather}
\begin{multline}
\frac{4\pi\tilde\kappa}{|\lambda_1|}\ell_1 = \cos\theta \left[ \ln \left( 1 + \tilde \ell_1 a - \tilde \ell_2 b + \tilde \epsilon_{xy} \right) + \ln \left( 1 + \tilde \ell_1 b + \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right.\\
\shoveright{\left. - \ln \left( 1 - \tilde \ell_1 a + \tilde \ell_2 b + \tilde \epsilon_{xy} \right) - \ln \left( 1 - \tilde \ell_1 b - \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right]} \\
+\phantom{.}\sin\theta \left[ \ln \left( 1 + \tilde \ell_1 a - \tilde \ell_2 b + \tilde \epsilon_{xy} \right) + \ln \left( 1 - \tilde \ell_1 b - \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \phantom{ABCD..}\right.\\
\left. - \ln \left( 1 - \tilde \ell_1 a + \tilde \ell_2 b + \tilde \epsilon_{xy} \right) - \ln \left( 1 + \tilde \ell_1 b + \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right]
\end{multline}
\begin{multline}
-\frac{4\pi\tilde\kappa}{\lambda_2}\ell_2 = \cos\theta \left[ \ln \left( 1 - \tilde \ell_1 a + \tilde \ell_2 b + \tilde \epsilon_{xy} \right) + \ln \left( 1 + \tilde \ell_1 b + \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right.\\
\shoveright{\left. - \ln \left( 1 + \tilde \ell_1 a - \tilde \ell_2 b + \tilde \epsilon_{xy} \right) - \ln \left( 1 - \tilde \ell_1 b - \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right]} \\
+\phantom{.}\sin\theta \left[ \ln \left( 1 + \tilde \ell_1 a - \tilde \ell_2 b + \tilde \epsilon_{xy} \right) + \ln \left( 1 + \tilde \ell_1 b + \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \phantom{ABCD..} \right.\\
\left. - \ln \left( 1 - \tilde \ell_1 a + \tilde \ell_2 b + \tilde \epsilon_{xy} \right) - \ln \left( 1 - \tilde \ell_1 b - \tilde \ell_2 a - \tilde \epsilon_{xy} \right) \right]
\end{multline}
\end{widetext}
where $\epsilon_{xy} = \tilde \epsilon_{xy} r$, $\ell_1 = \tilde \ell_1 r \text{~and~} \ell_2 = \tilde \ell_2 r$.
\end{comment}
\end{widetext}
To address whether or not there can be a second order transition into a phase with loop current order, we expand in powers of $\ell_1$. To cubic order in $\ell_1$ we
find
\begin{multline}
\epsilon_{xy} = - \frac{\tilde \beta_7^*}{2(2 \tilde \beta_7^* + r) r} \left[ 4 \cos2\theta ~ \ell_1 \ell_2 \right.\\ \left. + 2 \sin2\theta (-\ell_1^2 + \ell_2^2)\right],
\end{multline}
\begin{equation}
\ell_2 \sim \mathcal{O}(\ell_1^3),
\end{equation}
\begin{multline}
4 r^2(r - \left|\lambda_1^* \right|) \ell_1 = - 4\left|\lambda_1^* \right| \frac{\tilde \beta_7^*}{2(2 \tilde \beta_7^* + r)} \sin^22\theta~\ell_1^3 \\ - \frac{2}{3}\left|\lambda_1^* \right| \left( \cos4\theta - 3\right) \ell_1^3,
\end{multline}
where $^*$ denotes that the coefficients are scaled by $\pi \tilde\kappa$. Thus to leading order in $\ell_1$, $r = \left|\lambda_1^* \right|$. Going to next higher
order, let $r = r_{\delta = 0} + \delta = \left|\lambda_1^* \right| + \delta$ where $\delta$ is small correction such that $(\delta/\left|\lambda_1^* \right| \ll 1)$,
then the previous equation becomes
\begin{multline}\label{eq: delta}
\frac{\delta}{\left|\lambda_1^* \right|} = \left(- \frac{\alpha_7}{2 \alpha_7 + 1} + \frac{1}{6} \right) \sin^22\theta~{\ell_1^*}^2 - \frac{1}{6}\cos^22\theta~{\ell_1^*}^2 \\ + \frac{1}{2} {\ell_1^*}^2
\end{multline}
and Eq.~(\ref{eq: r2}) leads to
\begin{equation}\label{eq: rbarbar}
\bar{\bar{r}}_0 = 1 + \left(1 + 32 \alpha_1 \right) \frac{\delta}{\left|\lambda_1^* \right|} - 16 \alpha_1 {\ell_1^*}^2,
\end{equation}
where $\bar{\bar{r}}_0 = (\bar r_0 / \left|\lambda_1^* \right|) - 32 \alpha_1 \ln\left|\lambda_1^* \right| $, $\alpha_1 = \tilde \beta_1 / \left| \lambda_1 \right|$,
$ \alpha_7 = \tilde \beta_7 / \left| \lambda_1 \right| $ and $\ell_1^* = \ell_1 / \left| \lambda_1^* \right|$. Eliminating $\delta$ between Eqs.~(\ref{eq: delta}) and
(\ref{eq: rbarbar}), we obtain
\begin{multline}\label{eq: trans}
\bar{\bar{r}}_0 = 1 + (1 + 32 \alpha_1) \left[ \left(-\frac{\alpha_7}{1 + 2 \alpha_7} + \frac{1}{6} \right)\sin^22\theta \right.\\ \left. - \frac{1}{6}\cos^22\theta \right]{\ell_1^*}^2 + \frac{1}{2} {\ell_1^*}^2.
\end{multline}
Equation~(\ref{eq: trans}) shows that a local maximum $\bar{\bar{r}}_0 = 1$ occurs if the quadratic term in $\ell_1^*$ is negative. Since $\bar{\bar{r}}_0$ is monotonically
increasing with temperature, this maximum gives the highest possible transition temperature (provided there are no other local maxima at higher $\bar{\bar{r}}_0$ -- here
we note that no such maxima occurred in a related model \cite{fer12}) and the corresponding transition is second order. However, if the quadratic term in $\ell_1^*$ is
positive, then the largest value of $\bar{\bar{r}}_0$ will occur at non-zero $\ell_1^*$, indicating a first order transition. This emergent loop current phase shares
the same symmetry properties as the ME loop current state discussed in Refs.~\onlinecite{sim02, aji13}. While such a phase captures much of the physics associated with broken
time-reversal symmetry, it does not provide a complete explanation of all the signatures of broken time-reversal symmetry in the pseudogap phase \cite{yak14}. We
address this in the next section.
\begin{comment}
This emergent loop current phase shares the same symmetry properties as the ME loop current state discussed in Ref.~\onlinecite{aji13}. Such a state is consistent with polarized neutron scattering observations of broken time-reversal symmetry \cite{sid13},
the observation of dichroic ARPES order \cite{kam02}, and can account for the ultrasound observation of a second order phase transition in the pseudogap phase
\cite{she13}. Note that this phase cannot explain broken time-reversal symmetry.
\end{comment}
\section{\label{sec: tilted-loop}Tilted loop current order}
It has been argued that the Kerr effect \cite{xia08,kar14} is zero for the ME loop current state discussed above and a non-vanishing Kerr effect requires additional
physics (such as a structural transition \cite{she13} or ordering along the $c$-axis). This has been discussed in detail by Yakovenko \cite{yak14} and he has
identified a modified loop current state consistent with all experiments of broken time-reversal symmetry. This tilted loop current state is shown in
Fig.~\ref{fig: yako}\subref{subfig: yakovenko_loop}.
It is possible to find a PDW state that shares the same symmetry properties as the tilted loop current state (once the SC and CDW orders
are removed through fluctuations). The simplest way to find such a state is to allow for the pairing momenta to have a $c$-axis component. The corresponding PDW order
parameter has sixteen complex degrees of freedom (eight for momenta $\vQ_i+Q_z\hat{z}$ and eight for PDW momenta $\vQ_i-Q_z\hat{z}$ where the $\vQ_i$ are the momenta
considered in Section \ref{sec: in-plane-tetra}). Here we do not present a complete analysis of this order parameter. However, it is possible to show that the state
depicted in Fig.~\ref{fig: yako}\subref{subfig: yakovenko_pairing} is a mean-field ground state and thus represents a viable order parameter. In this state only four of
the PDW momenta have non-zero order parameter components. As depicted in Fig.~\ref{fig: yako}\subref{subfig: yakovenko_pairing}, two of these momenta lie below the $x$-$y$
plane and two lie above the $x$-$y$ plane. When the SC and CDW order are removed through fluctuations, this state will have the same symmetry properties as the tilted
loop-current phase and is therefore also consistent with all existing experiments that show broken time-reversal symmetry.
\vspace{-2mm}
\begin{figure}[h]
\begin{center}
\subfloat[]{\label{subfig: yakovenko_loop}
\includegraphics[width=1.62in]{tilted_loop21.eps}} \hfill
\subfloat[]{\label{subfig: yakovenko_pairing}
\includegraphics[width=1.62in]{tilted1.eps}}
\caption{(Color online) (a) Tilted loop current state proposed by Yakovenko \cite{yak14}. The arrows on the bonds depict the direction of the current, the longer arrows
depict the associated magnetic moments.
(b) PDW state with the same symmetry properties as the tilted loop current state. The arrows ${\bm K}_i$ depict the non-zero components of the PDW order parameter.
Wavevectors labeled \textquotedblleft+\textquotedblright are above the $x$-$y$ plane and those labeled \textquotedblleft\textendash\textquotedblright are below the $x$-$y$
plane. \label{fig: yako}}
\vspace{-5mm}
\end{center}
\end{figure}
\section{\label{sec: quasi} Quasi-particle properties of loop current PDW phases}
In this Section we examine whether the broken time-
\newpage
\onecolumngrid
\begin{figure}[t]
\centering
\subfloat[$\Delta_1 = \Delta_2;~k_y = \pi$]{\label{subfig: banda}
\includegraphics[width=0.29\textwidth]{fig2_a2}
\subfloat[$\Delta_1 = \Delta_2;~k_y = \pi - 0.7$]{\label{subfig: bandb}
\includegraphics[width=0.29\textwidth]{fig2_b2}
\subfloat[$\Delta_1 \ne \Delta_2;~k_y = \pi$]{\label{subfig: bandc}
\includegraphics[width=0.29\textwidth]{fig2_c2}}
\caption{\label{fig: bands}(Color online) Quasi particle spectrum for the ME PDW state with $\delta K_y=0.1$. Shown are the bare electron dispersion
(the white parabola) and the PDW bands weighted by $|u(k)|^2$ (the negative energy portion is observable by ARPES). (a) $\Delta_1=\Delta_2=75$ meV and $k_y=\pi$.
(b) $\Delta_1=\Delta_2=75$ meV and $k_y=\pi-0.7$, here occupied bands have moved up to $\epsilon_F$ to create the Fermi arcs.
(c) $\Delta_1=85$ meV, $\Delta_2=65$ meV, and $k_y=\pi$. Notice the asymmetry in $k_x$ about $k_x=0$.}
\vspace{-5mm}
\end{figure}
\twocolumngrid
\noindent reversal symmetric PDW states are consistent with ARPES measurements. Here we focus our analysis on the tetragonal ME PDW state discussed in Section VI
(qualitatively similar results will appear for the PDW state discussed in Section VII). To examine the qp properties, we consider the Hamiltonian
\begin{equation}
H=\sum_{{\bm k},s} \epsilon_{{\bm k}}c^{\dagger}_{{\bm k}s} c_{{\bm k}s}+ \sum_{{\bm Q}_i,{\bm k}}[\Delta_{{\bm Q}_i}({\bm k})c^{\dagger}_{{\bm k}+\frac{{\bm Q}_i}{2},\uparrow} c^{\dagger}_{-{\bm k}+\frac{{\bm Q}_i}{2},\downarrow}\\+h.c.], \label{H}
\end{equation}
\noindent where $c_{{\bm k}s}$ is the fermion destruction operator with momentum ${\bm k}$ and spin $s$, $\epsilon_{\bm k}$ is the bare dispersion, and $h.c.$ means
Hermitian conjugate. We compute the eigenstates of Eq.~(\ref{H}) and the spectral weight using
\begin{figure}[t]
\begin{center}
\includegraphics[width=3.1in]{spw.eps}
\end{center}
\caption{(Color online) Spectral weight showing Fermi arcs for ME PDW state. Here $\Delta_1=\Delta_2=75$ meV and $\Gamma=10$ meV.}
\label{fig3}
\vspace{-1mm}
\end{figure}
\vspace{-5mm}
\begin{equation}
I(\omega,{\bm k})= Im\sum_{\alpha} \frac{|u_{\alpha,{\bm k}}|^2}{w-E_{\alpha,{\bm k}}-i\Gamma},
\end{equation}
where $E_{\alpha,{\bm k}}$ are the eigenenergies of Eq.~(\ref{H}), $u_{\alpha,{\bm k}}$ is the weight of the fermion with momentum ${\bm k}$ in the band $\alpha$,
and the damping factor $\Gamma$ models short-range order in the PDW phase. In our calculations we use the bare dispersion $\epsilon_{\bm k}$ given in
Ref.~\onlinecite{he11} and set $\Gamma=0.1$ eV. In addition, we set $\Delta_{{\bm Q}_i}({\bm k})=\Delta_i f_i({\bm k}-{\bm K}_i)$ which localizes the pairing in
${\bm k}$ space as described in \cite{lee14} (for $\dq[1]$, $f_1({\bm k}-{\bm K}_1)=e^{-(k_y-K_y)^2/k_0^2}$, the other $f_i$ are determined by tetragonal symmetry).
Figures~\ref{fig: bands}\subref{subfig: banda} and \ref{fig: bands}\subref{subfig: bandb} show the bands weighted by a factor $|u_{\alpha,{\bm k}}|^2$ for fixed $k_y=\pi$
and $k_y=\pi-0.7$ as a function of $k_x$ (with $\Delta_1=\Delta_2$). These first two figures show that the Fermi arc results from occupied states moving towards the Fermi
level, a point emphasized in Ref.~\onlinecite{lee14}. In Fig.~\ref{fig: bands}\subref{subfig: bandc} we illustrate the role of $\Delta_1\ne\Delta_2$. Notice that the ARPES
bands become asymmetric about $k_x=0$. This asymmetry is consistent with existing ARPES measurements and it would be of interest to examine this experimentally. We note
that this asymmetry does not exist in the PDW phase proposed in Ref.~\onlinecite{lee14}. Fig.~\ref{fig3} shows the spectral weight for $\Delta_1=\Delta_2=75$ meV revealing
the Fermi arcs.
\vspace{-4mm}
\section{\label{sec: conclude} Conclusions}
\vspace{-2mm}
We have shown that PDW order can generate translational invariant ME loop current order as a secondary order parameter. We further show that there exists a PDW ground
state with ME loop current order, CDW correlations, and qp properties consistent with ARPES. When phase fluctuations are included, a state appears in which only the ME
loop current order has long-range spatial correlations. We predict that this state will exhibit short-range incommensurate angular momentum correlations at the same
wavevector as the CDW correlations. We also show that this state gives rise to an asymmetry in the qp properties that may be observed by ARPES.
\vspace{-4mm}
\begin{acknowledgments}
\vspace{-2mm}
We thank Egor Babaev, Andrey Chubukov, Julien Garaud, Marc-Henri Julien, Patrick Lee, and Yuxuan Wang for fruitful discussions. We acknowledge support from NSF grant
No. DMR-1335215.
\end{acknowledgments}
|
1,314,259,995,021 | arxiv | \section{Notation and definitions} \label{sec:notation}
We denote the set of probability distributions on $\{1,\dotsc,n\}$ by
\[
\mathcal{P} := \left\{ p = (p_1,\dotsc,p_n)\in\mathbb{R}^n : p_i \geq 0 \text{ for }i=1,\dotsc,n,\, \sum_{i=1}^n p_i = 1 \right\}.
\]
The set of probability vectors with strictly positive entries is denoted $\mathcal{P}_+$. For a vector $r\in \mathbb{R}^n$, $r_+$ denotes its largest entry, and $r_-$ denotes its smallest entry.
A function $f: \mathcal{P} \to \mathbb{R}$ is $\kappa$-\emph{Lipschitz} (with respect to the total variation distance) if for all $p,q \in \mathcal{P}$,
\begin{equation}\label{eq:def_Lipschitz}
|f(p) - f(q)| \leq \kappa\, \TV(p,q)
\end{equation}
and when \eqref{eq:def_Lipschitz} holds, $\kappa$ is called a Lipschitz constant for $f$.
The smallest $\kappa>0$ such that $f$ is $\kappa$-Lipschitz is called the \emph{optimal Lipschitz constant} for $f$. The function $f$ is said to be \emph{Lipschitz continuous} if it is $\kappa$-Lipschitz for some $\kappa>0$.
Given $x\in \mathbb{R}^n$, write $x^\downarrow = (x^\downarrow_j)_{j=1}^n$ for the permutation of $x$ such that $x^\downarrow_1 \geq x^\downarrow_2 \geq \dotsm \geq x^\downarrow_n$. For $x,y\in \mathbb{R}^n$, we say $x$ \emph{majorizes} $y$, written $x \succ y$, if
\begin{equation} \label{def:majorize}
\sum_{j=1}^k x^\downarrow_j \geq \sum_{j=1}^k y^\downarrow_j \quad \forall k=1,\dotsc,n-1, \quad \text{and}\quad \sum_{j=1}^n x^\downarrow_j = \sum_{j=1}^n y^\downarrow_j.
\end{equation}
We say a function $\varphi: \mathcal{P} \to \mathbb{R}$ is Schur convex if for $p,q\in \mathcal{P}$, $p\prec q\implies \varphi(p) \leq \varphi(q)$. We say $\varphi$ is \emph{Schur concave} if $-\varphi$ is Schur convex. One useful characterization of Schur convex functions is if $\varphi : \mathcal{P} \to \mathbb{R}$ is differentiable and symmetric, then it is Schur convex if and only if
\begin{equation} \label{eq:S-convex-condition}
(p_i - p_j) \left[ \partial_{p_i}\varphi(p) - \partial_{p_j} \varphi(p) \right] \geq 0 \qquad \forall i,j
\end{equation}
for each $p \in \mathcal{P}$ \cite[Section 3.A, Equation (10)]{marshall2011inequalities}.
\section{Continuity bounds for Schur concave functions}\label{sec:cty-bounds}
The quantity $E_C$ defined in \eqref{eq:def_EC} respects the majorization preorder, in that if $p, q \in \mathcal{P}$ satisfy $p\prec q$, then
\[
E_C(p) \geq E_C(q)
\]
as was shown in \cite[Proposition 1]{Ros81}.
In other words, $E_C$ is Schur concave. Recently \cite{HY10,HD17,HOS18}, it has been shown that the majorization preorder interacts well with respect to total variation distance, in the sense that in any total variation ball
\[
B_\varepsilon(r) = \left\{p \in \mathcal{P} : \TV(p,r) \leq \varepsilon \right\}
\]
there exists a minimal $r_\varepsilon^* \in B_\varepsilon(r)$ and maximal $r_{*,\varepsilon} \in B_\varepsilon(r)$ element:
\[
r_\varepsilon^* \prec p \prec r_{*,\varepsilon} \quad \forall p \in B_\varepsilon(r).
\]
Moreover, in \cite{HD17}, the following so-called semigroup property was established:
\begin{equation}\label{eq:semigroup}
r_{\varepsilon_1 + \varepsilon_2}^* =(r_{\varepsilon_1}^*)_{\varepsilon_2}^* \qquad \forall r\in \mathcal{P}, \, \varepsilon_1,\varepsilon_2 > 0.
\end{equation}
In \cite{HD19a}, this semigroup property was used to construct uniform continuity bounds for Schur concave functions with respect to the total variation distance. This construction begins by noting that if $p,q\in \mathcal{P}$ satisfy $\TV(p,q) \leq \varepsilon$, then for any Schur concave function $f$,
\begin{equation}\label{eq:bound-H-by-Delta-eps}
|f(p) - f(q)| \leq \max\left\{ f(q_\varepsilon^*) - f(q), f(p_\varepsilon^*) - f(p) \right\}.
\end{equation}
The semigroup property then allows the analysis of the quantity $q \mapsto f(q_\varepsilon^*) - f(q)$ to proceed infinitesimally:
\[
f(q_\varepsilon^*) - f(q) = \int_0^\varepsilon \partial_s f(q_s^*) \d s = \int_0^\varepsilon \Gamma_f(q_s^*) \d s
\]
for $\Gamma_f(r) := \left.\partial_t^+ f(r_t^*) \right|_{t=0}$, where $\partial_t^+$ denotes the derivative from above. Here, the path $(q_s^*)_{0\leq s \leq \varepsilon}$ is the so-called path of \emph{majorization flow}. Hence, if $\Gamma_f$ is bounded above by some $k > 0$, then
\[
f(q_\varepsilon^*) - f(q) \leq \varepsilon k
\]
which then yields a Lipschitz continuity bound for $f$ by using \eqref{eq:bound-H-by-Delta-eps}.
Moreover, the particular structure of $r_\varepsilon^*$ then can be used to show that for Schur concave $f$, the quantity $\Gamma_f$ is simply a difference of two partial derivatives. We refer to \cite{HD19a} for the details of this technique, which yields the following result.
\begin{theorem}[Corollary 3.2, \cite{HD19a}] \label{thm:HD19}
Let $f: \mathcal{P}\to \mathbb{R}$ be a Schur concave function which is continuously differentiable on $\mathcal{P}_+$. We write $f(r_1,\dotsc,r_n) \equiv f(r)$ for $r\in \mathcal{P}$. Next, for $r \in \mathcal{P}$, let $i_+ \in \{1,\dotsc,n\}$ be an index such that $r_+ = r_{i_+}$, and similarly $i_- \in \{1,\dotsc,n\}$ such that $r_- = r_{i_-}$.
Define
\begin{equation}
\begin{aligned}
\Gamma_f : \quad \mathcal{P}_+ &\to \mathbb{R}\\
r &\mapsto (\partial_{r_{i_+}} - \partial_{r_{i_-}})f(r_1,\dotsc, r_n).
\end{aligned}
\end{equation}
Note that this definition does not depend on the choice of $i_\pm$ since $f$ is permutation invariant. Then $f$ is Lipschitz continuous if and only if
\[
k := \sup_{r \in \mathcal{P}_+} \Gamma_f(r)
\]
satisfies $k < \infty$. Moreover, in the latter case $k$ is the optimal Lipschitz constant for $f$.
\end{theorem}
\section{Proof of the upper bound \eqref{eq:Lip-upper-bound}}\label{sec:upper-bound}
In this section, we use \Cref{thm:HD19} to establish \Cref{thm:main}. Note that by \eqref{eq:expected_num_connected_components}, $E_C : \mathcal{P} \to \mathbb{R}$ is a polynomial in the components of the probability vector $p$ and in particular is continuously differentiable.
In \cite[Proposition 1]{Ros81}, the author proves that $p \mapsto E_C(p)$ is Schur concave using the criterion \eqref{eq:S-convex-condition}, by showing that
\[
\partial_{p_i}E_C(p) - \partial_{p_j} E_C(p) = (p_j - p_i) \sum_{S^*} (|S^*|+1)! \prod_{j\in S^*} p_j
\]
where $S^*$ ranges over nonempty sets of $\{1,\dotsc,n\}\setminus\{i,j\}$. Hence,
\[
\Gamma_{E_C}(r) = (r_+ - r_-) \sum_{S^*} (|S^*|+1)! \prod_{j\in S^*} r_j
\]
where $S^*$ ranges over nonempty sets of $I :=\{1,\dotsc,n\}\setminus\{i_+,i_-\}$, where $i_\pm$ are indices such that $r_{i_\pm} = r_{\pm}$. We can use the criterion \eqref{eq:S-convex-condition} again by repeating the proof of \cite[Proposition 1]{Ros81} to show that for
\[
S(\{r_i\}_{i\in I}) := \sum_{S^*} (|S^*|+1)! \prod_{j\in S^*} r_j,
\]
we have
\[
\partial_{r_i}S(\{r_i\}_{i\in I}) - \partial_{r_j} S(\{r_i\}_{i\in I}) = (r_j - r_i) \sum_{S^*} (|S^*|+3)! \prod_{j\in S^*} r_j
\]
and hence $S$ is Schur concave on the set $\left\{ p \in \mathbb{R}^{n-2} : p_i \geq 0, \sum_i p_i = 1-r_+ - r_i \right\}$. For such $p$,
\[
S(p) \leq S\left( \left\{ \frac{1-r_- - r_+}{n-2} \right\}_{i \in I}\right)
\]
and thus
\begin{equation} \label{eq:EC-bound-1}
\Gamma_{E_C}(r) \leq (r_+ - r_-)\sum_{k=1}^{n-2} {n-2 \choose k} (k+1)! (1-r_- - r_+)^{k} (n-2)^{-k}.
\end{equation}
To obtain a Lipschitz bound on $E_C(r)$, it suffices to bound $\Gamma_{E_C}(r)$ independently of $r\in \mathcal{P}$. We upper bound \eqref{eq:EC-bound-1} by taking $r_-=0$. For the simplicity of notation, let $s = r_+$ and $m = n-2$. Then we aim to bound
\begin{equation} \label{eq:def_Bns}
B_m(s) := s \sum_{k=1}^m{m \choose k} (k+1)! (1-s)^{k}m^{-k}
\end{equation}
for $s\in \left[\frac{1}{m+2}, 1\right]$, using that $r_+ \in \left[\frac{1}{n},1\right]$ which follows from $r \in \mathcal{P}$.
Let
$$
S_m(s) := \sum_{k=1}^m c_{k,m} (1-s)^{k-1}
$$
with
$$
c_{k,m} := {m \choose k} \frac{(k+1)!}{m^k} = (k+1)\prod_{j=1}^{k-1} \left(1-\frac{j}{m}\right),
$$
then
\begin{equation}\label{eq:Bm-Sm-relationship}
B_m(s) = s(1-s) S_m(s).
\end{equation}
Applying the inequality $1-x\le\exp(-x)$ to every factor in $c_{k,m}$ gives the simple upper bound
\begin{eqnarray*}
c_{k,m}
&\le& (k+1) \prod_{j=1}^{k-1} \exp(-j/m) \\
&=&(k+1)\exp\left(-\sum_{j=1}^{k-1} j/m\right)
=(k+1)\exp\left(-\frac{(k-1)k}{2m}\right) \\
&\le& (k+1)\exp\left(-\frac{(k-1)^2}{2m}\right).
\end{eqnarray*}
As $c_{k,m}\ge0$, we can also use the same inequality $1-s\le \exp(-s)$ in the formula for $S_m(s)$.
This gives as a first upper bound:
\begin{eqnarray*}
S_m(s) &\le& \sum_{k=1}^m (k+1)\exp\left(-\frac{(k-1)^2}{2m} - (k-1)s\right) \\
&=& \sum_{l=0}^{m-1} (l+2)\exp\left(-\frac{l^2}{2m} - ls\right) \\
&=& 2+\sum_{l=1}^{m-1} (l+2)\exp\left(-\frac{l^2}{2m} - ls\right).
\end{eqnarray*}
We can interpret this sum as a lower Riemann sum for a certain Riemann integral.
Noting that the factor $l+2$ increases with $l$ and the factor $\exp\left(-\frac{l^2}{2m} -ls\right)$ decreases, we have
$$
(l+2)\exp\left(-\frac{l^2}{2m} -l s\right)
\le \int_{l-1}^l (u+3) \exp\left(-\frac{u^2}{2m}-u s \right) \;du.
$$
Therefore,
\begin{eqnarray*}
S_m(s)&=&2+\sum_{l=1}^{m-1} (l+2)\exp\left(-\frac{l^2}{2m}-ls \right)\\
&\le& 2+\sum_{l=1}^{m-1} \int_{l-1}^l (u+3) \exp\left(-\frac{u^2}{2m} -us\right) \;du \\
&=& 2+\int_{0}^{m-1} (u+3) \exp\left(-\frac{u^2}{2m}-us \right) \;du \\
&\le& 2+\int_{0}^{\infty} (u+3) \exp\left(-\frac{u^2}{2m}-us \right) \;du \\
&=& 2+\exp(ms^2/2) \int_{0}^{\infty} (u+3) \exp\left(-\frac{(u+ms)^2}{2m} \right) \;du.
\end{eqnarray*}
In terms of the probability density function $\phi(x)$ of the standard normal distribution, $\phi(x) = \exp(-x^2/2)/\sqrt{2\pi}$,
and making the substitution $v=(u+ms)/\sqrt{m}$,
this last expression can be written as
\begin{eqnarray*}
\frac{1}{\phi(\sqrt{m}s)} \int_0^\infty (u+3) \phi\left(\frac{u+ms}{\sqrt{m}}\right)\;du
&=& \frac{\sqrt{m}}{\phi(\sqrt{m}s)} \int_{\sqrt{m}s}^\infty (\sqrt{m}v-ms+3) \phi(v)\;dv \\
&=& \frac{\sqrt{m}}{\phi(\sqrt{m}s)} \left(\sqrt{m} \int_{\sqrt{m}s}^\infty v \phi(v)\;dv + (3-ms) \int_{\sqrt{m}s}^\infty \phi(v)\;dv\right).
\end{eqnarray*}
Exploiting the fact that $x\phi(x)=-\phi'(x)$, and with $\Phi(x)$ the cumulative density function of the standard normal distribution,
this last expression is equal to
$$
\frac{\sqrt{m}}{\phi(\sqrt{m}\;s)} \left(\sqrt{m}\; \phi(\sqrt{m}\;s) + (3-mx) (1-\Phi(\sqrt{m}\;s))\right)
=m+\sqrt{m}(3-ms)\frac{1-\Phi(\sqrt{m}\;s)}{\phi(\sqrt{m}\;s)},
$$
so that
$$
S_m(s) \le 2+m+\sqrt{m}(3-ms)\frac{1-\Phi(\sqrt{m}\;s)}{\phi(\sqrt{m}\;s)}.
$$
The function in the last factor,
$$M(x):=\frac{1-\Phi(x)}{\phi(x)},$$ is known as the Mills ratio, and several bounds are known for it.
A well-known upper bound valid for $x>0$ is $M(x) < 1/x$ \cite{Gor41,YC15}, which follows from the fact that $M'(x) = x M(x) - 1$ and that $M$ is a strictly decreasing function.
Therefore,
$$
3\sqrt{m} M(\sqrt{m}\;s)\le 3/s,
$$
and
$$
S_m(s) \le 2+m+\frac{3}{s}-m^{3/2}s M(\sqrt{m}\;s).
$$
Setting $\mu$ to be as in \Cref{thm:main}, we have
$$
-M(x)\le\frac{\mu-x}{x^2}.
$$
Therefore,
$$
S_m(s) \le 2+m+\frac{3}{s}+\sqrt{m}\frac{\mu-\sqrt{m}\;s}{s} = 2+\frac{3+\mu\sqrt{m}}{s},
$$
and
$$
B_m(s) \le (1-s)(2s+3+\mu\sqrt{m})=2(1-s)(1+s)+(1-s)(1+\mu\sqrt{m})\le 2+(1+\mu\sqrt{m}),
$$
over the interval $0\le s\le 1$
Explicit numerical calculations of $B_m(s)$ for $m$ up to $10^6$ suggest that the maximal value of $B_m(s)$ is bounded below
by $\mu\sqrt{m}$ and, hence, lies within a constant not exceeding 3 of our bound, which is remarkable. In the following, we prove a slightly weaker bound, which recovers the square-root scaling at leading order.
\section{Proof of the lower bound \eqref{eq:Lip-lower-bound}}\label{sec:lower-bound}
Let $r = \left(r_+, \frac{1 - r_+}{n-2}, \dotsc, \frac{1 - r_+}{n-2}, 0\right) \in \mathcal{P}$ for some $r_+ \in \left[\frac{1}{n-1}, 1\right]$, so that $r$ is a probability distribution with largest element $r_+$. Then the start of \Cref{sec:upper-bound} establishes that
\[
\Gamma_{E_C}(r) = B_m(s)
\]
where $B_m(s)$ is defined in \eqref{eq:def_Bns}, and $s := r_+$, and $m := n-2$. By \Cref{thm:HD19}, it remains to lower bound $B_m(s)$ for some $s \in \left[ \frac{1}{m+1}, 1 \right]$. As in \eqref{eq:Bm-Sm-relationship}, we write
\begin{equation}\label{eq:lb-decompose-B}
B_m(s) = s \sum_{k=1}^m c_{k, m} (1-s)^{k}, \qquad c_{k,m} := (k+1)\prod_{j=1}^{k-1}\left(1-\frac{j}{m}\right).
\end{equation}
Then
\[
\ln \frac{c_{k,m}}{k+1} = \sum_{j=1}^{k-1}\ln \left( 1 - \frac{j}{m} \right) = \sum_{j=0}^{k-1}\ln \left( 1 - \frac{j}{m} \right) \geq \int_0^k \ln \left( 1 - \frac{u}{m} \right) \d u
\]
using that since $j \mapsto \ln \left( 1 - \frac{j}{m} \right)$ is decreasing, the integral forms an underapproximation to the sum. By changing variables to $v = u/m$, we obtain
\[
\ln \frac{c_{k,m}}{k+1}\geq m \int_0^{k/m} \ln(1-v)\d v = -k - (m-k) \ln\left( 1 - \frac{k}{m} \right) \geq - \frac{k^2}{m}
\]
using that $\ln\left( 1 - \frac{k}{m} \right) \leq - \frac{k}{m}$. Hence,
\begin{equation}\label{eq:c_km-lower-bound}
c_{k,m} \geq (k+1) \exp\left( - \frac{k^2}{m}\right).
\end{equation}
From \eqref{eq:lb-decompose-B}, defining $c_{0,m} = 1$, we have
\begin{align*}
\frac{1}{s}B_m(s) &= \sum_{k=1}^{m} c_{k,m}(1-s)^k = \sum_{k=0}^{m} c_{k,m}(1-s)^k - 1\\
&\geq \sum_{k=0}^{m}(k+1) \exp\left( - \frac{k^2}{m} + k \ln(1-s) \right) - 1\\
&\geq \int_0^{m+1} u \exp \left( - \frac{u^2}{m} + u \ln(1-s) \right) \d u - 1.
\end{align*}
using \eqref{eq:c_km-lower-bound} for the first inequality. For the second inequality, notice that the sum is of the form $\sum_{k=0}^m f(k+1) g(k)$ where $f(k)=k$ is monotone increasing, and $g(k) = \exp\left( - \frac{k^2}{m} + k \ln(1-s) \right)$ is monotone decreasing. Hence, we have $f(k+1)\geq \int_k^{k+1} f(u) \d u = \|\left.f\right|_{[k,k+1]}\|_1$ and $g(k) = \sup_{k \leq u \leq k+1} g(u) = \|\left.g\right|_{[k,k+1]}\|_\infty$, using that both functions are non-negative. H\"older's inequality gives
\[
\int_k^{k+1} f(u) g(u)\d u \leq \|\left.f\right|_{[k,k+1]}\|_1\, \|\left.g\right|_{[k,k+1]}\|_\infty \leq f(k+1)g(k)
\]
and summing over $k$ yields the inequality. Next, since
\[
- \frac{u^2}{m} + u \ln(1-s) = - \frac{1}{m} \left( \left( u - \frac{m \ln(1-s)}{2} \right)^2 - \left( \frac{m \ln(1-s)}{2} \right)^2\right),
\]
we obtain
\begin{align*}
\frac{1}{s}B_m(s) &\geq \frac{1}{\exp\left( - \frac{1}{2} \left(\sqrt{\frac{m}{2}}\frac{\ln(1-s)}{2}\right)^2\right)}\int_0^{m+1}u \exp \left( - \frac{1}{2} \left( \sqrt{\frac{2}{m}} \left(u - \frac{m}{2} \ln(1-s) \right)\right)^2\right) -1\\
&= \frac{1}{\phi(b)} \int_0^{m+1} u\phi(au -b) \d u-1
\end{align*}
for $a = \sqrt{\frac{2}{m}}$, $ b = \sqrt{\frac{m}{2}}\ln(1-s)$, and $\phi(x) := \frac{1}{\sqrt{2\pi}}\mathrm{e}^{-\frac{x^2}{2}}$ is the p.d.f.~of a standard normal distribution. Changing variables to $v = au - b$, we find
\begin{align*}
\frac{1}{s}B_m(s) &\geq \frac{1}{\phi(b)a^2} \left[\int_{-b}^{a(m+1)-b} v \phi(v) \d v + b \int_{-b}^{a(m+1)-b} \phi(v) \d v\right]\\
&= \frac{1}{\phi(b)a^2} \left[ \phi(-b) - \phi( a(m+1) - b) + b( \Phi(a(m+1)-b) - \Phi(-b))\right] - 1
\end{align*}
where $\Phi$ is the c.d.f~of the standard normal distribution. Since $a(m+1)-b \leq \sqrt{2m}$ and $\phi(x)$ is decreasing on $x > 0$, we have $\phi( a(m+1) - b) \leq \phi(\sqrt{2m})$. Using also that $\Phi( a(m+1)-b) \leq 1$, we obtain
\begin{equation*}
\frac{1}{s}B_m(s) \geq - \frac{\phi(\sqrt{2m})}{a^2 \phi(-b)} + \frac{1}{a^2} \left( 1 + b M(-b)\right) - 1
\end{equation*}
where $M(x) = \frac{1 - \phi(x)}{\Phi(x)}$ is the Mills ratio. Substituting in $a$, we have
\begin{equation*}
\frac{1}{s}B_m(s) \geq - \frac{m\phi(\sqrt{2m})}{2\phi(-b)} + \frac{m}{2} \left( 1 + b M(-b)\right) - 1 .
\end{equation*}
Recalling the definition of $x_0$ and $\mu$ from \Cref{thm:main}, we choose $s = 1 - \mathrm{e}^{- \sqrt{\frac{2}{m}}x_0}$ so that $b = - x_0$, and $\mu = x_0 - x_0^2 M(x_0)$. Substituting for $\mu$, we have
\[
B_m(s) \geq s\left(\frac{m\mu}{2x_0} - 1- \frac{m\phi(\sqrt{2m})}{2\phi(x_0)} + \right) .
\]
Using the bound $\mathrm{e}^{-x} \leq 1 - x + \frac{x^2}{2}$ for $x\geq 0$, we have $s \geq \sqrt{\frac{2}{m}} x_0 - \frac{x_0^2}{m}$. Hence,
\[
B_m(s) \geq \frac{\mu \sqrt{m} }{\sqrt{2}} - \frac{\mu x_0}{2} - \sqrt{\frac{2}{m}} x_0 - \frac{x_0^2}{m}- \sqrt{m}\mathrm{e}^{-m}\frac{x_0}{2\sqrt{\pi}\phi(x_0)}.
\]
|
1,314,259,995,022 | arxiv | \section{Introduction}\label{sec:1}
Singularly perturbed problems have been widely studied in the fields of
fluid mechanics, aerodynamics, convection-diffusion process, etc.
In such problems, there exist boundary layers or interior layers because a
small parameter is included in the coefficient of the highest derivative.
Consider the following singularly perturbed turning point problem
in one dimension:
\begin{equation}
\label{eq1}
\left\{
\begin{aligned}
& Lu=-\varepsilon u''+p(x)u'+b(x)u=f(x),\quad x_L<x<x_R,\\
& u(x_L) = u_L, \quad u(x_R) = u_R,\\
\end{aligned}\right.
\end{equation}
where $p(x)$ has zeros $z_1<z_2<\cdots<z_m$ on $[x_L,x_R]$. We
assume $p$,
$b$, and $f$
to be sufficiently smooth. Furthermore, we suppose
\begin{equation}
b(x)-p'(x)\geq
\gamma_0>0
\label{eq:condition}
\end{equation}
to ensure the well-posedness of the dual problems. Each zero
of $p(x)$ is presumed to be a single root, i.e., $p'(z_i)\neq0$.
From the asymptotic analysis, we know that there will be boundary/interior
layers at some of $z_i$'s.
Here we consider the following types of singularities:
\begin{enumerate}
\item[(a)] Exponential boundary layers (singularly perturbed
problems without turning points);
\item[(b)] Cusp-like interior layers (interior turning point
problems);
\item[(c)] Boundary layers of other types (boundary turning point
problems).
\end{enumerate}
Singularly perturbed elliptic equations without turning
points have been widely studied by researchers.
Various numerical methods are utilized, where finite
difference methods and finite element methods play prominent roles.
El-Mistikawy and Werle raise an
exponential box scheme \cite{el1978numerical}(EMW scheme) in order to solve
Falkner-Skan equations.
Kellogg, Berger, and Han\cite{berger1984priori}, Riordan and
Stynes\cite{stynes1986finite}, etc., find this EMW scheme
efficient when solving singularly perturbed elliptic equations.
Fitted operator numerical methods, such as exponentially
fitted finite difference method and Petrov-Galerkin method, are developed.
Another class of methods, fitted mesh
methods\cite{guo1993uniformly, sun1995finite, hemker2000varepsilon,
li2000convergence, de2011parameter}, show good adaptivity to
different problems, while remeshing is necessary in some moving front
problems.
A turning point problem is a class of equations in which the
coefficient $p(x)$ vanishes in the domain. Compared to singularly
perturbed equations without turning points, interior
layers and other types of boundary layers might appear in the solutions to
turning point problems.
O'Malley\cite{o1970boundary}
and Abrahamson\cite{abrahamson1977priori} analyze turning point problems
in some common cases.
Kellogg, Berger, and Han\cite{berger1984priori} theoretically examine
turning point problems with single interior turning points, and they use a
modified EMW scheme, which obtains a first-order (or lower) convergence
rate.
Stynes and Riordan \cite{stynes1986finite,stynes19871} build a
numerical scheme under Petrov-Galerkin framework and prove the uniform
convergence
in $L^1$ norm and $L^{\infty}$ norm.
Farrell\cite{farrell1988sufficient}
proposes sufficient conditions for an exponentially fitting difference
scheme to be uniformly convergent for a turning point problem.
Farrell and Gartland\cite{farrell1988uniform} modify the EMW scheme and
construct a
scheme
with uniform first-order convergency, where parabolic cylinder functions
are used in the computation. For other studies of turning point problems
using fitted operator methods, please refer to \cite{roos1990global,
vulanovic1990numerical,geng2014numerical,munyakazi2019robust};
for fitted
mesh methods, please refer to \cite{sun1994finite,
natesan2003parameter,chen2008stability,o2011parameter,
o2012singularly,kumar2019parameter,yadav2021almost}.
We notice that most of the present research
assume turning points to be away from the boundary. If a turning point
meets an endpoint, the problem is called \textit{a boundary turning point
problem},
which has
not been thoroughly studied.
In \cite{vulanovic1987non}, Vulanovi{\'c} considers a turning point problem
with an arbitrary single turning point and obtains uniform convergency
using finite difference method on a non-equidistant mesh.
Vulanovi{\'c} and Farrell\cite{vulanovic1993continuous} examine a
multiple boundary turning point problem and make priori estimates.
However, estimates for
single boundary turning point problems and numerical methods based on the
uniform mesh are not given yet.
In order to fill this blank, in this paper we estimate the derivatives of
the solution to a standard single boundary turning point problem and raise
an algorithm without particular mesh generation.
Petrov-Galerkin finite element method (PGFEM) is used in many
problems. Dated
back to 1979, Hemker and Groen\cite{de1979error} raise a method that treats
problem (1) with Petrov-Galerkin method, where the coefficient $p(x)$ has a
positive
lower bound. The scheme in Farrell and Gartland \cite{farrell1988uniform}
is
based on the so-called patched function method, also interpreted as
Petrov-Galerkin method. In references
\cite{broersen2014robust,chakraborty2020optimal}, Petrov-Galerkin method
and discontinuous
Petrov-Galerkin method are implemented in elliptic equations in two
dimensions, demonstrating their efficiency and convergency.
Tailored Finite Point Method (TFPM) is raised by Han, Huang, and
Kellogg\cite{han2008tailored}, which is designed to solve PDEs using
properties of the solutions, especially for singularly perturbed
problems.
TFPM could handle exponential singularities well, while simple difference
methods might sometimes suffer from a low convergence rate.
TFPM is later utilized in interface problems\cite{huang2009tailored},
steady-state reaction-diffusion equations\cite{han2010tailored},
convection-diffusion-reaction equations\cite{han2013tailored}, etc.
This study presents a numerical scheme to solve problem (1) with
several types of singularities. We prove that the width of the boundary
layer of a single boundary turning point problem is
$O(\sqrt{\varepsilon})$, which is a weaker version of the result in
\cite{vulanovic1993continuous}. The derivative of the solution, $u'$, is
bounded by $C(1+\varepsilon^{-1/2})$ near the boundary layer and
bounded by $C(1+x^{-1})$ away from the layer.
The rest of this paper is organized as follows.
In section 2, priori estimates for continuous problems will be shown in
each case. We use PGFEM to solve
problem \eqref{eq1}, where we choose (either exact or approximate)
solutions to
dual problems as test functions.
We show details related to the numerical implementation in section 3.
Numerical results demonstrate our scheme's efficiency
and the uniform first-order convergency in section 4.
Finally, we give a brief conclusion in section 5.
\section{A Priori estimate}\label{sec:2}
In this section, we will present some priori estimates for cases
(a)(b)(c) respectively. First we briefly recall some results from previous
work for cases (a) and (b). We will prove our estimates for case (c)
later.
\subsection{Exponential boundary layer}
Suppose the velocity coefficient $p(x)\geq p_0>0$ (or otherwise, it has a
negative upper bound). Equation \eqref{eq1} is now written as:
\begin{equation}
\label{eq1a}
\left\{
\begin{aligned}
& Lu\equiv -\varepsilon u''+p(x)u'+b(x)u=f(x),\quad -1<x<1,\\
& u(-1) = u_L, \quad u(1) = u_R,\\
& p(x)\geq p_0>0,\quad b(x)\geq b_0\geq 0.\\
\end{aligned}\right.
\end{equation}
The solution to \eqref{eq1a} admits a boundary layer at $x=1$ (at $x=-1$ if
$p(x)\leq p_0<0$), and it is
shown\cite{kellogg1978analysis, berger1984priori} that the following
estimates
hold:
\begin{equation}
\label{estimate1a}
|u^{(k)}(x)|\leq C\Big(1+\varepsilon^{-k}\exp(-\frac{\eta
(1-x)}{\varepsilon})\Big),\quad x\in(-1,1),\quad k=0,1,2,\cdots,
\end{equation}
where $C,\eta$ are positive constants independent of $\varepsilon$. We have
the following property at once:
\begin{proposition}
\rm\label{prop:exp}
Suppose $u$ is the solution to \eqref{eq1a} and $p(x)$ is lower
bounded, then there exists a constant $C$
independent of $\varepsilon$, such that
\begin{equation}
\label{eq:prop:exp}
|(1-x)u'(x)|\leq C,\quad \forall x\in(-1,1).
\end{equation}
\end{proposition}
\subsection{Cusp-like interior layer}
Suppose there is only one turning point $x=0$, and equation
\eqref{eq1} reads:
\begin{equation}
\label{eq1b}
\left\{
\begin{aligned}
& Lu\equiv-\varepsilon u''+p(x)u'+b(x)u=f(x),\quad -1<x<1,\\
& u(-1) = u_L,\quad u(1) = u_R,\\
& p(0)=0, p'(0)<0,|p'(x)|\geq\frac{1}{2}|p'(0)|,\quad b(x)\geq b_0>
0.\\
\end{aligned}\right.
\end{equation}
In some papers, $x=0$ is called \emph{an attractive turning point} because
flows on both sides are toward the turning point, and \eqref{eq1b} is
called \emph{an attractive turning point problem}. Such problems are
characterized by the parameter $\lambda=-b(0)/p'(0)$. It is
shown\cite{abrahamson1977priori,berger1984priori}
that the solution has an interior layer when $\lambda\in(0,1]$, and
the following estimates hold:
\begin{equation}
\label{estimate1b}
|u^{(k)}(x)|\leq C\big(|x|+\sqrt{\varepsilon}\big)^{\lambda-k},\quad
x\in
(-1,1),\quad k=0,1,2,\cdots.
\end{equation}
Similar to the previous case, the first derivative of the solution turns
out bounded after multiplying a factor $x$:
\begin{proposition}
\rm\label{prop:cusp}
Suppose $u$ is the solution to \eqref{eq1b}, then there exists a
constant $C$ independent of $\varepsilon$, such that
\begin{equation}
\label{eq:prop:cusp}
|xu'(x)|\leq C,\quad \forall x\in(-1,1),
\end{equation}
on the assumption that $\lambda\in(0,1]$.
\end{proposition}
If $p'(0)>0$, the problem is also called \emph{a repulsive turning point
problem}, and its solution is smooth near the turning point. Thus we
need
no
additional treatment when dealing with such turning points.
\subsection{Boundary turning point problem}
Consider the turning point is positioned at an endpoint. We set the
interval as $[0,1]$, and \eqref{eq1} becomes:
\begin{equation}
\label{eq1c}
\left\{
\begin{aligned}
& Lu=-\varepsilon u''+p(x)u'+b(x)u=f(x),\quad 0<x<1,\\
& u(0) = u_L,\quad u(1) = u_R,\\
& p(0)=0,|p'(x)|\geq\frac{1}{2}|p'(0)|,\quad b(x)\geq b_0> 0.\\
\end{aligned}\right.
\end{equation}
For multiple boundary turning point problems, i.e., $p^{(k)}(0)=0$ for
$k=1,2,\cdots,m$, it is
proved\cite{vulanovic1993continuous} that there
exist positive constants $C,\eta$ independent of $\varepsilon$ such that
the
following estimates hold true:
\begin{equation}
\label{eq:mbtpp}
|u^{(k)}(x)|\leq C\bigg(
1+\varepsilon^{-k/2}\exp\big(-\frac{\eta
x}{\sqrt{\varepsilon}}\big)
\bigg),\quad x\in(0,1),\quad k=0,1,2,\cdots.
\end{equation}
One can deduce the following result immediately:
\begin{equation}
\label{eq:btpp}\begin{aligned}
|u^{(k)}(x)|&\leq C\min\{(1+\varepsilon^{-k/2}),(1+x^{-k})\}\\
&=C(1+(\max\{x,\sqrt{\varepsilon}\})^{-k}).\\
\end{aligned}
\end{equation}
Now assume that the boundary turning point is single, i.e., $p'(0)\ne0$.
Boundary behaviors of such problems differ from those of \eqref{eq1a}. We
introduce
the
following approximated problem:
\begin{equation}
\label{eq1c'}
\left\{
\begin{aligned}
& \tilde{L}u\equiv-\varepsilon u''+p'(0) xu'+b(0)u=f(0), \quad
0<x<1,\\
& u(0) = u_L,\quad u(1) = u_R,\\
& p'(0)\neq0,\quad b(0)>0.\\
\end{aligned}\right.
\end{equation}
We divide the discussion of problem \eqref{eq1c'} into two cases by the
signal of
$p'(0)$.
Inspired by \cite{berger1984priori}, we could represent the solution as a
linear combination of Weber's parabolic cylinder functions, which we use to
analyze the bounds of the derivatives.
\subsubsection{Preparations for estimates}
We introduce a lemma to estimate the solution $u$ more precisely.
\begin{lemma}
\rm\label{lemma:pDu}
There exists one and only one solution $u$ to \eqref{eq1c}. Besides,
there is a
constant $C$ independent with
$\varepsilon$, such that
\begin{equation}
\label{eq:pDu}
|p(x)u'(x)|\leq C, \quad\forall x\in[0,1/2].
\end{equation}
\end{lemma}
\begin{proof}
We suffice to show that $\varepsilon u''(x)$ is bounded by $C$ on
$[0,1/2]$, for $u$ could be bounded by $f$ using the maximum
principle.
First assume that $p'(0)=-\alpha<0$, and let $z(x)=u''(x)$.
Differentiate \eqref{eq1c} once, and we have:
$$-\varepsilon z'+p(x)z=s(x),$$
where $s(x)=s_1(x)+s_2(x)$,
$$s_1(x)=f'(x)-b'(x)u,$$
$$s_2(x)=-(p'(x)+b(x))u'.$$
Let $$P(x)=-\int_{0}^x p(t)dt.$$
Since $$p'(x)\leq -\frac{1}{2}\alpha<0,$$
it holds that:
\begin{displaymath}
\begin{aligned}
p(x)\leq-\frac{1}{2}\alpha x\leq0,\\
P(x)\geq \frac{\alpha}{4}x^2\geq0.\\
\end{aligned}
\end{displaymath}
Thus
\begin{displaymath}
\begin{aligned}
-\frac{P(x)}{\varepsilon}\leq0,\\
P(t)-P(x)=\int_{t}^{x}p(\tau)d\tau\leq0,\\
\end{aligned}
\end{displaymath}
for $0<t<x.$
By variance of constants we have
\begin{equation}
\label{eq:z(x)}
z(x)=z(0)\exp\Big(-\frac{P(x)}{\varepsilon}\Big)-
\varepsilon^{-1}\int_{0}^{x}\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)s(t)dt.
\end{equation}
Taking $x=0$ in \eqref{eq1c},
$$z(0)=\varepsilon^{-1}(b(0)u(0)-f(0))=C\varepsilon^{-1}.$$
The integral in the second term of \eqref{eq:z(x)} is split into terms
with $s_1$ and
$s_2$:
$$I_1=\int_{0}^{x}\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)s_1(t)dt\leq\int_{0}^{x}1\cdot
Cdt=C;$$
$$\begin{aligned}
I_2
&=\int_{0}^{x}\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)s_2(t)dt\\
&=-\int_{0}^{x}\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)\Big(p'(t)+b(t)\Big)u'(t)dt\\
&=-\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)\Big(p'(t)+b(t)\Big)u(t)\bigg|_{t=0}^{t=x}\\
&\quad+\int_{0}^{x}\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)\Big(p''(t)+b'(t)\Big)u(t)dt\\
&\quad+\int_{0}^{x}\frac{d}{dt}\Big[\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)\Big]\Big(p'(t)+b(t)\Big)u(t)dt\\
&\leq
C+C+C\int_{0}^{x}\frac{d}{dt}\Big[\exp\Big(\frac{P(t)-P(x)}{\varepsilon}\Big)\Big]dt\\
&=C,\\
\end{aligned}$$
where we use the second mean value theorem for integrals before the
inequality. Thus we have shown that $\varepsilon u''(x)$ is
bounded on $[0,1]$ if $p'(0)<0$.
In the case $p'(0)=\alpha>0$, the same argument could be repeated
with the
following modification:
$$
z(x)=\exp\Big(\frac{P(\frac{1}{2})-P(x)}{\varepsilon}\Big)z\Big(\frac{1}{2}\Big)-
\varepsilon^{-1}\int_{x}^{1/2}
\exp\Big(\frac{P(\tau)-P(x)}{\varepsilon}\Big)s(\tau)d\tau,$$
where $|z(\frac{1}{2})|$
is bounded by $C$.
The result here applies only for
$x\in[0,\frac{1}{2}]$ because \eqref{eq1c} has a boundary layer at
$x=1$.
\end{proof}
We have shown that $p(x)u'(x)$ is bounded near the singular point $x=0$.
The original problem \eqref{eq1c} is solved by decomposing $u=u_1+u_2+u_0$:
\begin{equation}
\begin{aligned}
&u_0=\frac{f(0)}{b(0)},\\
&\left\{
\begin{aligned}
&\tilde{L}u_1=0,\quad 0<x<1,\\
&u_1(0)=u_L-u_0,\quad u_1(1)=u_R-u_0,\\
\end{aligned}
\right.\\
&\left\{
\begin{aligned}
&\tilde{L}u_2=g(x),\quad 0<x<1,\\
&u_2(0)=u_2(1)=0,\\
\end{aligned}
\right.
\end{aligned}
\label{eq:decomposition_of_u}
\end{equation}
where $\tilde{L}$ is defined in \eqref{eq1c'}, and
$$g(x)\equiv\big(f(x)-f(0)\big)-\big(p(x)-p'(0)
x\big)u'-\big(b(x)-b(0)\big)u.$$
From Lemma \ref{lemma:pDu} we could write $$g(x)=h(x)x.$$
We solve $u_1$ by direct representation with parabolic cylinder functions,
while $u_2$ is related to Green's function for $\tilde{L}$.
\subsubsection{Basic property of parabolic cylinder
function}\label{sec:2.3.2}
Parabolic cylinder functions $U(a,x),V(a,x)$, using Weber's
notations, are linear independent solutions to the equation:
\begin{equation}
-y''+\Big(a+\frac{x^2}{4}\Big)y=0,
\notag
\label{eq:pcf}
\end{equation}
where $a$ is a coefficient.
The following properties will be used later:
\begin{align}
&\pi V(a,x)=\Gamma\Big(\frac{1}{2}+a\Big)\Big(\sin \pi a\cdot
U(a,x)+U(a,-x)\Big);\tag{\ref{eq:pcf}{.1}}\label{eq:imag1}\\
&\Gamma\Big(\frac{1}{2}+a\Big)U(a,x)=\pi \sec^2 \pi
a\Big(V(a,-x)-\sin \pi a\cdot V(a,x))\Big);\tag{\ref{eq:pcf}{.2}}\\
&\sqrt{2\pi}U(a,ix)=\Gamma\Big(\frac{1}{2}-a\Big)\Big(e^{-i\pi(-\frac{a}{2}-\frac{1}{4})}U(-a,x)+e^{i\pi(-\frac{a}{2}-
\frac{1}{4})}U(-a,-x)\Big);\tag{\ref{eq:pcf}{.3}}\label{eq:imag2}\\
&U'(a,x)+\frac{1}{2}xU(a,x)+\Big(a+\frac{1}{2}\Big)U(a+1,x)=0;\tag{\ref{eq:pcf}{.4}}\\
&U'(a,x)-\frac{1}{2}xU(a,x)+U(a-1,x)=0;\tag{\ref{eq:pcf}{.5}}\\
&V'(a,x)+\frac{1}{2}xV(a,x)-V(a+1,x)=0;\tag{\ref{eq:pcf}{.6}}\\
&V'(a,x)-\frac{1}{2}xV(a,x)-\Big(a-\frac{1}{2}\Big)V(a-1,x)=0;\tag{\ref{eq:pcf}{.7}}\\
&U(a,x)= \exp\Big(-\frac{1}{4}x^2\Big)x^{-a-\frac{1}{2}}\delta_1,
\quad x\geq C_0;\tag{\ref{eq:pcf}{.8}}\\
&V(a,x)=
\sqrt{\frac{2}{\pi}}\exp\Big(\frac{1}{4}x^2\Big)x^{a-\frac{1}{2}}\delta_2,
\quad x\geq C_0.\tag{\ref{eq:pcf}{.9}}\label{prop:pcf}
\end{align}
$C_0=O(1)$ is a constant related to $a$, and coefficients
$\delta_{1},\delta_2$ satisfy $$|\delta_i-1|\leq \frac{1}{3}.$$
\subsubsection{Green's function of the operator $\tilde{L}$}
Denote $\mu_0$ by the solution to
$$\left\{\begin{aligned}
&\tilde{L}u=0,\\
&u(0)=0,\quad u(1)=1,\\
\end{aligned}\right.$$
and $\mu_1$ by the solution to
$$\left\{\begin{aligned}
&\tilde{L}u=0,\\
&u(0)=1,\quad u(1)=0.\\
\end{aligned}\right.$$
The Wronskian of $\mu_0$ and $\mu_1$ is
$$W(x)=W(\mu_0,\mu_1)=\mu_0(x)\mu_1'(x)-\mu_1(x)\mu_0'(x).$$
The Green's function of $\tilde{L}$ is piecewise defined on $[0,1]$:
\begin{displaymath}
G(x,\tau)=\left\{
\begin{aligned}
&-\varepsilon^{-1}\mu_0(x)\mu_1(\tau)\exp\Big(\frac{p'(0)}{2\varepsilon}(1-\tau^2)\Big)/W(1),&0\leq
x\leq\tau\leq1,\\
&-\varepsilon^{-1}\mu_1(x)\mu_0(\tau)\exp\Big(\frac{p'(0)}{2\varepsilon}(1-\tau^2)\Big)/W(1),&0\leq
\tau\leq x\leq1,\\
\end{aligned}
\right.
\end{displaymath}
which satisfies
$$\tilde{L}G(x,\tau)=\delta(x-\tau),$$
$$G(0)=G(1)=0.$$
$u_2$ defined in \eqref{eq:decomposition_of_u} could be represented as
$$u_2(x)=\int_{0}^{1} G(x,\tau)\tau h(\tau) d\tau.$$
Thus estimates for $u_2$ turn into ones for $G(\cdot,\tau)$ and its
derivatives.
\subsubsection{The case $p'(0)=\alpha>0$}
We first estimate derivatives of the solution $u$ in the next
lemma.
\begin{lemma}
\rm\label{lemma:positive}
Assume $u$ is the solution to \eqref{eq1c} with $p'(0)>0$, and
$\rho=C_0\sqrt{\varepsilon}$, where $C_0$ is defined in section
\ref{sec:2.3.2}. For
$k=1,2,\cdots$,
\begin{equation}
\label{eq:positive}
|u^{(k)}(x)|\leq\left\{
\begin{aligned}
&C(1+\rho^{-k}), &0\leq x\leq\rho,\\
&C(1+x^{-k}),&\rho\leq x\leq 1/2.\\
\end{aligned}
\right.
\end{equation}
Rewriting these estimates in a more compact form, we have:
\begin{equation}
\tag{\ref{eq:positive}{'}}
|u^{(k)}(x)|\leq C(1+(\max\{x,\rho\})^{-k}),\quad 0\leq x\leq 1/2.
\end{equation}
\end{lemma}
\begin{proof}
Since $u_0$ in \eqref{eq:decomposition_of_u} has no contribution to the
derivative, for the sake of simplicity, we replace $u$ by $u_1+u_2$ and
still denote it by $u$.
We introduce the following change of variable, for both $u_1$ and $u_2$
satisfy an equation of $\tilde{L}$:
$$\tilde{x}=\frac{x}{\sqrt{\varepsilon/\alpha}},$$
$$u(x)=\tilde{u}(\tilde{x})\exp\Big(\frac{\tilde{x}^2}{4}\Big).$$
Denoting $\beta=b(0)/\alpha>0$, we obtain the equation for $\tilde{u}$:
$$-\tilde{u}''+\Big(\frac{\tilde{x}^2}{4}+\beta-\frac{1}{2}\Big)\tilde{u}=0.$$
$\tilde{u}$ admits linear independent solutions
$U(\beta-\frac{1}{2},\tilde{x}),V(\beta-\frac{1}{2},\tilde{x})$. Hence
$$u(x)=c_1\exp\Big(\frac{\tilde{x}^2}{4}\Big)U\Big(\beta-\frac{1}{2},\tilde{x}\Big)+
c_2\exp\Big(\frac{\tilde{x}^2}{4}\Big)V\Big(\beta-\frac{1}{2},\tilde{x}\Big).$$
Coefficients $c_1,c_2$ are determined by boundary conditions:
\begin{displaymath}
\left(\begin{array}{cc}
U\Big(\beta-\frac{1}{2},0\Big) &
V\Big(\beta-\frac{1}{2},0\Big)\\
\exp(\frac{\alpha}{4\varepsilon})U\Big(\beta-\frac{1}{2},\frac{1}{\sqrt{\varepsilon/\alpha}}\Big)
&
\exp(\frac{\alpha}{4\varepsilon})V\Big(\beta-\frac{1}{2},\frac{1}{\sqrt{\varepsilon/\alpha}}\Big)
\\
\end{array}\right)
\left(\begin{array}{c}
c_1\\c_2\\
\end{array}
\right)
=\left(\begin{array}{c}
u_L\\u_R\\
\end{array}
\right).
\end{displaymath}
Call the matrix on the left-hand-side $A$. Denoting $K_i$ as
constants of $O(1)$, we could rewrite $A$ in
the asymptotic form:
\begin{displaymath}
A\approx\left(\begin{array}{cc}
K_1 & K_2\\
K_3\varepsilon^{\beta/2} &
K_4\exp(\frac{\alpha}{2\varepsilon})\varepsilon^{(1-\beta)/2} \\
\end{array}\right),
\end{displaymath}
\begin{displaymath}
A^{-1}\approx\left(\begin{array}{cc}
K_4 &
-K_2\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{(\beta-1)/2}\\
-K_3\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{\beta-1/2} &
K_1\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{(\beta-1)/2}
\\
\end{array}\right).
\end{displaymath}
Therefore the coefficients are represented as follows:
\begin{displaymath}
\begin{aligned}
\left(\begin{array}{c}
c_1\\c_2\\
\end{array}
\right)&=A^{-1}\left(\begin{array}{c}
u_L\\u_R\\
\end{array}
\right).
\end{aligned}
\end{displaymath}
We omit all the constants of $O(1)$ for simplicity. Considering
$u_L,u_R=O(1)$, the derivatives are in the following
form:
\begin{displaymath}
\begin{aligned}
u^{(k)}(x)&=\varepsilon^{-k/2}\bigg[
c_1\exp\Big(\frac{\tilde{x}^2}{4}\Big)\Pi^k_{i=1}(-\beta-i+1)U\Big(\beta+k-\frac{1}{2},\tilde{x}\Big)
+c_2\exp\Big(\frac{\tilde{x}^2}{4}\Big)V\Big(\beta+k-\frac{1}{2},\tilde{x}\Big)
\bigg]\\
&=\varepsilon^{-k/2}\bigg[c_1'\exp\Big(\frac{\tilde{x}^2}{4}\Big)U\Big(\beta+k-\frac{1}{2},\tilde{x}\Big)
+c_2\exp(\frac{\tilde{x}^2}{4})V\Big(\beta+k-\frac{1}{2},\tilde{x}\Big)
\bigg].
\end{aligned}
\end{displaymath}
For convenience, write $u_1(x)=u_L\mu_1(x)+u_R\mu_0(x)$, and we
estimate
$\mu_0$ and $\mu_1$ first in order to
estimate $u_1$ and its derivatives on $[0,1/2]$:
\begin{displaymath}
\begin{aligned}
&|\mu_0^{(k)}(x)|\leq\left\{
\begin{aligned}
&
C\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{\frac{\beta-k-1}{2}},
& |x|\leq\rho,\\
& C\Big(
\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{\beta-\frac{1}{2}}x^{-\beta-k}
+\varepsilon^{-k}\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)x^{\beta+k-1}
\Big), &\rho\leq|x|\leq1,\\
\end{aligned}
\right.\\
&|\mu_1^{(k)}(x)|\leq\left\{
\begin{aligned}
& C\varepsilon^{-\frac{k}{2}}, & |x|\leq\rho,\\
& C\Big(\varepsilon^{\frac{\beta}{2}}x^{-\beta-k}+
\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)\varepsilon^{\frac{\beta}{2}-k}
x^{\beta+k-1} \Big), &\rho\leq|x|\leq1,\\
\end{aligned}
\right.\\
\end{aligned}
\end{displaymath}
which hold for $k=0,1,2,\cdots$. Thus $$|W(1)|\geq
C\varepsilon^{\beta/2-1}.$$
If $x\leq \rho$, from $$u_1^{(k)}=u_L\mu_1^{(k)}+u_R\mu_0^{(k)},$$ we
have:
$$\begin{aligned}
|u_1^{(k)}(x)|&\leq
C\Big(|u_R||\mu_0^{(k)}(x)|+|u_L||\mu_1^{(k)}(x)|\Big)\\
&\leq
C\Big(|u_R|\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{\frac{\beta-k-1}{2}}+
|u_L|\varepsilon^{-\frac{k}{2}}\Big),\quad k=1,2,\cdots.\\
\end{aligned}$$
If $\rho\leq x\leq1/2$,
$$\exp(-\frac{\alpha}{2\varepsilon}(1-x^2))\leq\exp(-\frac{\alpha'}{2\varepsilon}),$$
where $\alpha'=\frac{3}{4}\alpha,$ we have
\begin{displaymath}
\begin{aligned}
|u_1^{(k)}(x)|&\leq
C|u_L|\Big(\varepsilon^{\frac{\beta}{2}}x^{-\beta-k}+
\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)\varepsilon^{\frac{\beta}{2}-k}
x^{\beta+k-1} \Big)\\
&\quad +C|u_R|\Big(
\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{\beta-\frac{1}{2}}x^{-\beta-k}
+\varepsilon^{-k}\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)x^{\beta+k-1}
\Big)\\
&\leq C(1+x^{-k}),\quad k=1,2,\cdots.
\end{aligned}
\end{displaymath}
Constant $C$ may depend on $\beta,k,\alpha,C_0$.
Derivatives of $u_2$ are estimated by induction.
\begin{displaymath}
\begin{aligned}
|u_2'(x)|&\leq C\int_{0}^{1}|G_x(x,\tau)|\tau d\tau\\
&=\left\{
\begin{aligned}
&\int_{0}^{x}+\int_{x}^{\rho}+\int_{\rho}^{1},&0\leq
x\leq\rho,\\
&\int_{0}^{\rho}+\int_{\rho}^{x}+\int_{x}^{1},&\rho\leq
x\leq\frac{1}{2}.\\
\end{aligned}
\right.
\end{aligned}
\end{displaymath}
Denote these six integrals as $I_{1,2,3}$ and $I_{1,2,3}'$. The first
three are easy to handle:\\
$I_1: 0\leq\tau\leq x\leq\rho$:\\
$$\begin{aligned}
&|G_x|\leq
C\varepsilon^{-1}\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big),\\
&I_1=\int_{0}^x |G_x|\tau d\tau\leq C.\\
\end{aligned}$$
$I_2: 0\leq x\leq \tau\leq\rho$:\\
$$\begin{aligned}
&|G_x|\leq
C\varepsilon^{-1}\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big),\\
&I_2=\int_{x}^{\rho} |G_x|\tau d\tau\leq C.\\
\end{aligned}$$
$I_3: 0\leq x\leq \rho\leq \tau$:\\
$$\begin{aligned}
&|G_x|\leq
C\Big(\varepsilon^{\frac{\beta}{2}-1}\exp(-\frac{\alpha}{2\varepsilon}\tau^2)\tau^{-\beta}+
\varepsilon^{\frac{\beta}{2}-1}\exp(-\frac{\alpha}{2\varepsilon})\tau^{\beta-1}\Big),\\
&I_3=\int_{\rho}^{1} |G_x|\tau d\tau\leq C\int_{\rho}^{1}
\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big)\tau^{1-\beta}\varepsilon^{\frac{\beta-1}{2}}
\varepsilon^{-\frac{1}{2}}d\tau+C\exp\Big(-\frac{\alpha}{2\varepsilon}\Big)\varepsilon^{\frac{\beta}{2}-1}\\
&\quad \leq C\varepsilon^{-\frac{1}{2}}+C\\
&\quad =C(1+\rho^{-1}).\\
\end{aligned}$$
Thus when $x\leq\rho$, we have $$|u_2'(x)|\leq C(1+\rho^{-1}).$$
For $x\geq\rho$, consider the last three integrals:\\
$I_1': 0\leq\tau\leq\rho\leq x\leq\frac{1}{2}$:
$$\begin{aligned}
|G_x|&\leq\varepsilon^{\frac{\beta-1}{2}}\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big)x^{-\beta-1}
+\varepsilon^{\frac{\beta-1}{2}}x^{\beta}\exp\Big(-\frac{\alpha}{2\varepsilon}(1-x^2)\Big)\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big)\varepsilon^{-1}\\
&\leq
C\varepsilon^{-1}\exp\Big(-\frac{\alpha}{2\varepsilon}\tau^2\Big),\\
I_1'&=\int_{0}^{\rho} |G_x|\tau d\tau\leq C.\\
\end{aligned}$$
$I_2': \rho\leq\tau\leq x\leq\frac{1}{2}$:
$$\begin{aligned}
|G_x|&\leq
\bigg(x^{-\beta-1}+\varepsilon^{-1}\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)x^{\beta}
\bigg)\bigg(
\exp\big(-\frac{\alpha}{2\varepsilon}\tau^2\big)\varepsilon^{\beta-\frac{1}{2}}\tau^{-\beta}+\tau^{\beta-1}
\bigg),\\
|G_x|\tau&\leq
\bigg(x^{-\beta-1}+\varepsilon^{-1}\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)x^{\beta}\bigg)
\bigg(\exp\big(-\frac{\alpha}{2\varepsilon}\tau^2\big)\varepsilon^{\beta-\frac{1}{2}}+\tau^\beta\bigg)\\
&\leq\Big(x^{-\beta-1}+\varepsilon^{-1}
\exp\big(-\frac{\alpha'}{2\varepsilon}\big)\Big)\Big(C\varepsilon^{\frac{\beta}{2}}+\tau^\beta\Big),\\
I_2'&=\int_{\rho}^x |G_x|\tau d\tau\leq C(1+x^{-1}).\\
\end{aligned}$$
$I_3': \rho\leq x\leq\tau\leq1,x\leq\frac{1}{2}$:
$$\begin{aligned}
&|G_x|\leq\bigg(\varepsilon^{\beta-\frac{1}{2}}\exp\big(-\frac{\alpha}{2\varepsilon}\big)x^{-\beta-1}
+\varepsilon^{-1}\exp\big(-\frac{\alpha}{2\varepsilon}(1-x^2)\big)x^{\beta}
\bigg)\\
&\quad\quad\quad\cdot\bigg(
\exp\big(\frac{\alpha}{2\varepsilon}(1-\tau^2)\big)\tau^{-\beta}+\tau^{\beta-1}
\bigg),\\
&I_3'=\int_{x}^1 |G_x|\tau d\tau\leq C(1+x^{-1}).\\
\end{aligned}$$
Therefore we have $$|u_2'(x)|\leq C(1+x^{-1}),$$
when $x\geq\rho$.
Considering that the behavior near $x=1$
has been studied well as an exponential boundary layer, we narrow down
the priori estimate for $u$ to $[0,1/2]$:
\begin{displaymath}
|u'(x)|\leq\left\{
\begin{aligned}
&C(1+\rho^{-1}), &0\leq x\leq\rho,\\
&C(1+x^{-1}),&\rho\leq x\leq 1/2.\\
\end{aligned}
\right.
\end{displaymath}
For higher derivatives $u^{(k)}(k\geq2)$, we
could
differentiate $(k-1)$
times the equation in \eqref{eq1c} and split
$v=u^{(k-1)}$ into three parts as $u$ is decomposed in
\eqref{eq:decomposition_of_u}. It is noticed that $v$ satisfies a
similar equation
to
\eqref{eq1c'}, with $\beta$ replaced by $\beta+k-1$ and boundary
conditions in
asymptotic forms. Actually, we can obtain by induction, for
$k=1,2,\cdots$:
$$\begin{aligned}
&u^{(k)}(0)=\varepsilon^{-\frac{k}{2}},\\
&u^{(k)}(1)=\varepsilon^{-k}.\\
\end{aligned}$$
Simple calculations yield the following result:
\begin{displaymath}
|u^{(k)}(x)|\leq\left\{
\begin{aligned}
&C(1+\rho^{-k}), &0\leq x\leq\rho,\\
&C(1+x^{-k}),&\rho\leq x\leq 1/2.\\
\end{aligned}
\right.
\end{displaymath}
\end{proof}
\begin{remark}
\rm\label{remark_positive}
If we take $x\approx1$ into consideration, the estimates could be
modified as:
\begin{equation}
|u^{(k)}(x)|\leq\left\{
\begin{aligned}
&C(1+\rho^{-k}), &0\leq x\leq\rho,\\
&C\Big(1+x^{-k}+(1-x)^{-k}\Big),&\rho\leq x\leq 1;\\
\end{aligned}
\right.\quad k=1,2,\cdots.
\end{equation}
Note that for $\frac{1}{2}\leq x\leq1,$
$$\exp(-\frac{\alpha}{2\varepsilon}(1-x^2))\leq
C(\frac{\varepsilon}{1-x})^k.$$
\end{remark}
The following proposition holds as a direct conclusion, and we will use
these propositions to show the convergence of the numerical
method.
\begin{proposition}
\rm\label{prop:positive}
For the solution $u$ of \eqref{eq1c}, when $p'(0)>0$, there exists a
constant
$C$, satisfying
\begin{equation}
\label{eq:prop:positive}
|xu'(x)|\leq C, \quad x\leq1/2,
\end{equation}
\begin{equation}
\label{eq:prop:positive2}
\tag{\ref{eq:prop:positive}{'}}
|(1-x)u'(x)|\leq C, \quad x\geq1/2.
\end{equation}
\end{proposition}
\subsubsection{The case $p'(0)=-\alpha<0$}
In this case, estimates for derivatives of $u$ are mildly different.
\begin{lemma}
\rm\label{lemma:negative}
Assume that $u$ is the solution to \eqref{eq1c} when $p'(0)<0$. Let
$\beta=b(0)/\alpha>0$, and $\rho$ is
defined as above. Then we have
the following estimates:
\begin{equation}
\label{eq:negative}
\begin{aligned}
|u^{(k)}(x)|&\leq\left\{
\begin{aligned}
&C\Big(1+\rho^{\beta-k}+|u_L|\rho^{-k}\Big),&0\leq
x\leq\rho,\\
&C\Big(1+x^{\beta-k}+|u_L|x^{-k}\Big),&\rho\leq x\leq1;\\
\end{aligned}
\right.\quad k=1,2,\cdots,\\
\end{aligned}
\end{equation}
or otherwise in a compact form:
\begin{equation}
\label{eq:negative2}
\tag{\ref{eq:negative}{'}}
|u^{(k)}(x)|\leq
C(1+(\max\{x,\rho\})^{\beta-k}+|u_L|(\max\{x,\rho\})^{-k}),\quad
0\leq
x\leq
1,\quad k=1,2,\cdots.
\end{equation}
\end{lemma}
\begin{proof}
If we let
$\tilde{x}=\frac{x}{\sqrt{-\varepsilon/\alpha}}$, the new variable
becomes pure imaginary.
From the property that evaluation of $U(a,iz)$ and $V(a,iz)$ could be
represented by
$U(-a,z),V(-a,z)$ (c.f. \eqref{eq:imag1},\eqref{eq:imag2}), we assume
the
solution to be:
$$u(x)=c_1\exp\Big(\frac{\tilde{x}^2}{4}\Big)U\Big(\frac{1}{2}+\beta,|\tilde{x}|\Big)+
c_2\exp\Big(\frac{\tilde{x}^2}{4}\Big)V\Big(\frac{1}{2}+\beta,|\tilde{x}|\Big).$$
For simplicity we denote
$\hat{x}=|\tilde{x}|=\frac{x}{\sqrt{\varepsilon/\alpha}}$, which gives:
$$u(x)=c_1\exp\Big(-\frac{\hat{x}^2}{4}\Big)U\Big(\frac{1}{2}+\beta,\hat{x}\Big)+
c_2\exp\Big(-\frac{\hat{x}^2}{4}\Big)V\Big(\frac{1}{2}+\beta,\hat{x}\Big).$$
Again we solve coefficients $c_1,c_2$ from boundary conditions
$u_L,u_R$. Let
\begin{displaymath}
\begin{aligned}
A&=\left(\begin{array}{cc}
U\Big(\frac{1}{2}+\beta,0\Big) &
V\Big(\frac{1}{2}+\beta,0\Big)\\
\exp(-\frac{\alpha}{4\varepsilon})U\Big(\frac{1}{2}+\beta,\frac{1}{\sqrt{\varepsilon/\alpha}}\Big)
&
\exp(-\frac{\alpha}{4\varepsilon})V\Big(\frac{1}{2}+\beta,\frac{1}{\sqrt{\varepsilon/\alpha}}\Big)
\\
\end{array}\right)\\
&\approx\left(\begin{array}{cc}
K_1 & K_2\\
K_3\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{(1+\beta)/2}
& K_4\varepsilon^{-\beta/2} \\
\end{array}\right).\\
\end{aligned}
\end{displaymath}
The inverse of $A$ reads:
\begin{displaymath}
\begin{aligned}
A^{-1}&\approx\left(\begin{array}{cc}
K_4 & -K_2\varepsilon^{\beta/2}\\
-K_3\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{1/2+\beta}
& K_1\varepsilon^{\beta/2} \\
\end{array}\right)\\
&\approx C\left(\begin{array}{cc}
1&\varepsilon^{\beta/2}\\
\exp(-\frac{\alpha}{2\varepsilon})\varepsilon^{1/2+\beta}&\varepsilon^{\beta/2}\\
\end{array}\right).
\end{aligned}
\end{displaymath}
Then $c_1,c_2$ could be written as
\begin{displaymath}
\begin{aligned}
\left(\begin{array}{c}
c_1\\c_2\\
\end{array}
\right)&=A^{-1}\left(\begin{array}{c}
u_L\\u_R\\
\end{array}
\right).\\
\end{aligned}
\end{displaymath}
As the previous case, for $k=1,2,\cdots$,
$$u^{(k)}(x)=\varepsilon^{-k/2}\Big[
c_1'\exp\Big(-\frac{\hat{x}^2}{4}\Big)U\Big(\beta+\frac{1}{2}-k,\hat{x}\Big)+
c_2'\exp\Big(-\frac{\hat{x}^2}{4}\Big)V\Big(\beta+\frac{1}{2}-k,\hat{x}\Big)
\Big].$$
For $\mu_0,\mu_1$, and $k=0,1,2,\cdots$,
\begin{displaymath}
\begin{aligned}
&\mu_0^{(k)}(x)=\exp\Big(-\frac{\hat{x}^2}{4}\Big)\varepsilon^{\frac{\beta-k}{2}}
\bigg(U\Big(\beta+\frac{1}{2}-k,\hat{x}\Big)+V\Big(\beta+\frac{1}{2}-k,\hat{x}\Big)\bigg),\\
&\mu_1^{(k)}(x)=\exp\Big(-\frac{\hat{x}^2}{4}\Big)
\varepsilon^{-\frac{k}{2}}\bigg(U\Big(\beta+\frac{1}{2}-k,\hat{x}\Big)
+\varepsilon^{\frac{1}{2}+\beta}\exp(-\frac{\alpha}{2\varepsilon})V\Big(\beta+\frac{1}{2}-k,
\hat{x}\Big)\bigg).\\
\end{aligned}
\end{displaymath}
We have the following estimates:
\begin{displaymath}
\begin{aligned}
&|\mu_0^{(k)}(x)|\leq\left\{
\begin{aligned}
&C\varepsilon^{\frac{\beta-k}{2}},&x\leq\rho,\\
&C\Big(\exp\big(-\frac{\alpha}{2\varepsilon}x^2\big)
\varepsilon^{\beta+\frac{1}{2}-k}x^{-\beta+k-1}+x^{\beta-k}\Big),&x\geq\rho;\\
\end{aligned}
\right.\\
&|\mu_1^{(k)}(x)|\leq\left\{
\begin{aligned}
&C\varepsilon^{-\frac{k}{2}},&x\leq\rho,\\
&C\Big(\exp\big(-\frac{\alpha}{2\varepsilon}x^2\big)
\varepsilon^{\frac{\beta+1}{2}-k}x^{-\beta+k-1}
+\exp(-\frac{\alpha}{2\varepsilon})
\varepsilon^{\frac{\beta+1}{2}}x^{\beta-k}\Big),&x\geq\rho;\\
\end{aligned}
\right.
\end{aligned}
\quad k=0,1,\cdots.
\end{displaymath}
Since these two estimates are different from the case $p'(0)>0$, we
might keep $u_L$ and $u_R$
as
independent variables.\\
\begin{displaymath}
\begin{aligned}
|u_1^{(k)}(x)|&\leq
|u_R||\mu_0^{(k)}(x)|+|u_L||\mu_1^{(k)}(x)|\\
&\leq\left\{
\begin{aligned}
&C\Big(|u_R|\varepsilon^{\frac{\beta-k}{2}}+|u_L|\varepsilon^{-\frac{k}{2}}\Big),&x\leq\rho,\\
&C\Big(|u_R|x^{\beta-k}+|u_L|x^{-k}\Big),&x\geq\rho;\\
\end{aligned}
\right. \quad k=1,2,\cdots.\\
\end{aligned}
\end{displaymath}
$u_2$ could be estimated similarly by computing integrals of Green's
function. We omit these details and present the following result.
\begin{displaymath}
\begin{aligned}
|u_2^{(k)}(x)|&\leq\left\{
\begin{aligned}
&C\Big(1+\rho^{\beta-k}\Big),&x\leq\rho,\\
&C\Big(1+x^{\beta-k}\Big),&x\geq\rho;\\
\end{aligned}
\right. \quad k=1,2,\cdots.\\
\end{aligned}
\end{displaymath}
The conclusion in Lemma \ref{lemma:negative} consists of estimates for
$u_1$ and $u_2$.
\end{proof}
\begin{remark}
\rm\label{remark_negative}
The solution of the case $p'(0)<0$ is smooth at the endpoint $x=1$,
hence estimates are
made on the whole interval $[0,1]$. The result in Lemma
\ref{lemma:negative} is quite
similar to
\cite{berger1984priori}, except that nonzero $u_L$ might lower
the regularity of the solution.
\end{remark}
\begin{proposition}
\rm\label{prop:negative}
If $u$ is the solution to \eqref{eq1c} with $p'(0)<0$, there exists a
constant $C$ independent of $\varepsilon$, such that
\begin{equation}
\label{eq:prop:negative}
|xu'(x)|\leq C.
\end{equation}
\end{proposition}
\begin{remark}
\rm\label{prop:compare}
Estimates \eqref{eq:mbtpp} are stronger than Lemma \ref{lemma:positive}
and \ref{lemma:negative}.
Analysis in \cite{vulanovic1993continuous} applies in the cases
$k\ge2$, where $k$ stands for multiples of the turning point, while the
same argument no longer holds for $k=1$.
Another difference is that their estimates are made on the whole
interval $[0,1]$. In contrast, estimates hold for $[0,1/2]$ in Lemma
\ref{lemma:positive} and for the whole interval in Lemma
\ref{lemma:negative}. Estimates \eqref{eq:positive} and
\eqref{eq:negative}
give upper bounds for $x\leq\rho$ and $x\geq\rho$ separately when the
turning
point is single, and it is unknown
whether these estimates could be combined into one expression in an
essential way.
\end{remark}
\section{Numerical method}\label{sec:3}
In this section we first introduce some definitions and weak formulations
in section \ref{sec:3.1}. We derive the weak solution using a
Petrov-Galerkin
finite
element method (PGFEM), summarized in Algorithm \ref{alg_PGFEM}.
If we know the analytic expressions of the solutions to the dual problems,
we directly use them as the test functions in PGFEM; otherwise, the dual
problems are solved
numerically by TFPM on a uniform mesh, as described in section
\ref{sec:3.2}. Furthermore, we prove
first-order convergency of PGFEM in $L^\infty$-norm and energy
norm in section
\ref{sec:3.3} when test functions are evaluated exactly.
\subsection{Definitions and formulations}\label{sec:3.1}
The weak form of problem \eqref{eq1} is: Find $u\in H^1(x_L,x_R)$ such that
\begin{equation}
\label{weak_eq1}
\begin{aligned}
&A_\epsilon(u,v)\equiv \epsilon(u',v')+(pu',v)+(bu,v)=(f,v),\quad
\forall
v\in H^1_0(x_L,x_R),\\
&u(x_L)=u_L,\quad u(x_R)=u_R.
\end{aligned}
\end{equation}
Let us take a partition $\{x_i, i=0,1,\cdots,N\}$ on $[x_L,x_R]$,
including any possible interior turning point:
$$x_L=x_0<x_1<\cdots<x_N=x_R,$$
$$I_i=[x_{i-1},x_i],\quad i=1,2,\cdots,N,$$
$$h_i=\left\{
\begin{aligned}
&x_{i}-x_{i-1},&i=1,\cdots,N,\\
&0,&i=0,N+1,\\
\end{aligned}
\right.$$
and the mesh size $h$ is defined as
$$h=\max_{1\leq i\leq N}h_i.$$
In this section, we use $L^\infty$, $L^2$ and an energy norm
$||\cdot||_\varepsilon$ for a function $u$:
\begin{align}
&||u||_{L^\infty}=\max_{x_L\leq x\leq x_R}|u(x)|,\\
&||u||_{L^2} = \sqrt{\int_{x_L}^{x_R}|u(x)|^2dx},\\
&||u||_\varepsilon =
\sqrt{||u||_{L^2}^2+\varepsilon||u'||_{L^2}^2},\label{eq:ucon}
\end{align}
and the corresponding discrete infinity norm $||\cdot||_{L^\infty_h}$ and
discrete energy norm $||\cdot||_{\varepsilon,h}$ for a grid function $u_h$:
\begin{align}
&||u_h||_{L^\infty_h}=\max_{0\leq i\leq N}|u_h(x_i)|,\label{eq:uinf}\\
&||u_h||_{\varepsilon,h} = \sqrt{||u_h||_{L^2_h}^2 +\varepsilon
||u_h'||_{L^2_h}^2}.\label{eq:ueng}
\end{align}
Here $L^2_h$ is the discrete $L^2$ space with the norm defined on the grid,
and $u_h'$ is computed by a difference scheme:
\begin{align}
||u_h||_{L^2_h}&=\sqrt{\sum_{i=0}^{N}u_h^2(x_i)\frac{h_i+h_{i+1}}{2}},\label{eq:ul2}\\
||u_h'||_{L^2_h}&=\sqrt{\sum_{i=1}^{N}\bigg(\frac{u_h(x_i)-u_h(x_{i-1})}{h_i}\bigg)^2h_i}.\label{eq:upl2}
\end{align}
Before discretization of finite element method, we first approximate
\eqref{eq1} by the following problem:
\begin{equation}
\label{eq1'}
\left\{
\begin{aligned}
& \bar{L}u_h\equiv-\varepsilon
u_h''+\bar{p}(x)u_h'+\bar{b}(x)u_h=\bar{f}(x),\quad
x_L<x<x_R,\\
& u_h(x_L) = u_L, \quad u_h(x_R) = u_R,\\
\end{aligned}\right.
\end{equation}
where $\bar{p},\bar{b},\bar{f}$ are piecewise approximations to the
corresponding functions. Test function space $V_h$ is defined by a group of
basis
functions $\{\psi_i\}_{i=1}^{N-1}$ with $\psi_i$ solving the dual problem
of
\eqref{eq1'} on
$I_i\cup I_{i+1}$:
\begin{equation}
\left\{
\begin{aligned}
& \bar{L}^*\psi_i\equiv-\varepsilon
\psi_i''-\bar{p}(x)\psi_i'+(\bar{b}(x)-\bar{p}'(x))\psi_i=0, \quad
x_{i-1}<x<x_{i+1},\\
& \psi_i(x_i) = 1, \quad\psi_i(x_j) = 0\,(j\neq i). \\
\end{aligned}\right.
\end{equation}
Functions $\{\psi_i\}_{i=1}^{N-1}$ are referred to as $L^*-splines$ in some
articles.
Then we use PGFEM to discretize the weak form of
\eqref{eq1'}: Find $u_h\in
U_h$ such that
\begin{equation}\label{pgfem}
\begin{aligned}
&A_\epsilon(u_h,v_h)\equiv
\epsilon(u'_h,v'_h)+(\bar{p}u'_h,v_h)+(\bar{b}u_h,v_h)=(\bar{f},v_h),
\quad \forall v_h\in V_h,\\
&u_h(x_L)=u_L,\quad u_h(x_R)=u_R.
\end{aligned}
\end{equation}
where
\begin{align}
U_h&=\bigg\{v_h\in C[x_L,x_R]\,\bigg|\,v_h|_{I_i} \mbox{ is linear
function}, i=1,\cdots,N\bigg\},\\
V_h&=\bigg\{v_h\,\bigg|\,v_h=\sum_{i=1}^{N-1}c_i\psi_i,\,c_i\in\mathbb{R},
\,i=1,\cdots,N-1\bigg\}.\label{def:V_h}
\end{align}
\begin{remark}
\rm\label{remark_exact_numerical}
If we use parabolic cylinder functions as test functions, it is
usual to compute a cut-off of the series expansion of these special
functions in order to generate the stiffness matrix and the
right-hand-side
term. We compute parabolic cylinder
functions in MATLAB using codes from fortran90
by\cite{temme2000numerical,olver2010nist}.
In some cases, numerical cost is
expensive when we need these special functions to be precise enough.
Moreover, we could not analytically represent
the solution to the dual problem with a nonlinear first-order
coefficient.
In practice, it works as well if we substitute exact evaluations of
special functions with numerical solutions described in the following
subsection.
\end{remark}
\subsection{Numerical method of dual problems}\label{sec:3.2}
We apply TFPM on the uniform mesh to each dual problem.
Precisely, for a specific dual problem:
\begin{equation}
\label{dual}
-\varepsilon \psi''+\hat{a}(x-X_0)\psi'+\hat{b}\psi=0,\quad X_1<x<X_2,
\end{equation}
the solution is determined with the following boundary conditions, for
instance:
\begin{equation}
\label{dual:bd}
\psi(X_1)=1,\quad\psi(X_2)=0.
\end{equation}
%
We make a uniform partition on the subinterval:
\begin{displaymath}
Y_j=X_1+(j-1)\frac{X_2-X_1}{N_1},\quad j=0,1,\cdots,N_1.
\end{displaymath}
TFPM solution on $[Y_{i-1},Y_{i+1}]$ is the linear
combination
of solutions to the equation:
\begin{equation}
-\varepsilon \psi''+\hat{a}(Y_{i}-X_0)\psi'+\hat{b}\psi=0,\quad
Y_{j-1}<x<Y_{j+1}.
\label{dual:subinterval}
\end{equation}
Equation \eqref{dual:subinterval} admits two exponential solutions
$\psi^{(1)},\psi^{(2)}$.
Denoting $\psi_j=\psi(Y_j)$, we suppose:
\begin{equation}
\label{eq:cond_dual}
\alpha_{i,i-1}\psi_{i-1}+\alpha_{i,i}\psi_i+\alpha_{i,i+1}\psi_{i+1}=0,
\end{equation}
where we presume $\alpha_{i,i}=1$ because
\eqref{dual:subinterval} is homogeneous, and the rest of the coefficients
are
determined by
requiring $\psi^{(1)}$ and $\psi^{(2)}$ to satisfy \eqref{eq:cond_dual}.
By gathering conditions \eqref{eq:cond_dual} at $i=1,\cdots,N_1-1$
together with boundary
conditions \eqref{dual:bd}, we obtain a tri-diagonal linear system
which gives evaluations of approximated dual solutions
$\{\psi(Y_i)\}_{i=0}^{N_1}$. We remark that TFPM on the uniform mesh
described above yields smaller errors than simple
finite difference methods.
To compute derivatives at $X_1$ and $X_2$, we represent the solution by
exponential basis functions on $[Y_0,Y_1]$ and $[Y_{N_1-1},Y_{N_1}]$
respectively. For instance, TFPM solution on $[Y_0,Y_1]$ is assumed to
satisfy \eqref{dual:subinterval} which is defined on $Y_0<x<Y_1$. By
boundary conditions at $Y_0$ and $Y_1$, the solution is
identified on $[Y_0,Y_1]$, and $\psi'(X_1)$ is available by direct
calculations.
\subsection{Main results}\label{sec:3.3}
For PGFEM using exact test functions, we have the following convergence
theorem:
\begin{theorem}
\rm\label{thm:Linf}
\textbf{(First-order $L^{\infty}$ Uniform Convergence)} Assume $p,b,f$
as
above, and singularities of type (a), (b), and (c) might occur in the
solution. Then PGFEM described in section \ref{sec:3.1} converges
uniformly
in $L^\infty$ norm, i.e., there exists a constant $C$ independent
of
$h,\varepsilon$, such that
\begin{equation}
\label{eq:thm:Linf}
||e||_{L^\infty}\leq Ch,
\end{equation}
with $e=u-u_h$, where $u$ is the strong solution to problem
\eqref{weak_eq1}, $u_h$ is the strong solution to problem \eqref{pgfem}.
\end{theorem}
\begin{proof}
We classify all the subintervals $I_k$'s into ones close
to singular points and ones far away. More
exactly, we list all singular points in ascending order as
$\{s_i\}_{i=1}^{m'}$ which
contain any possible interior turning points $z_i$ (with
$\lambda_i\in(0,1]$)
and endpoints (with a boundary layer). Let $0<\delta<\min_i
|s_{i}-s_{i-1}|/3$ and define:
\begin{displaymath}
\begin{aligned}
&J_i=[s_i-\delta,s_i+\delta],\quad (i=1,\cdots,m')\\
&J_r=[x_L,x_R]-\bigcup J_i.\\
\end{aligned}
\end{displaymath}
According to whether the middle point of a subinterval $I_k$ locates in
some $J_i$ or not, we
approximate $p$ on $I_k$ by either linear functions or constants:
\begin{displaymath}
\bar{p}(x)\big|_{I_k}=\left\{
\begin{aligned}
&p(x_{k-\frac{1}{2}}), &\text{if } x_{k-\frac{1}{2}}\in J_r,\\
&p'(x_k^*)(x-x_k^*)+p(x_k^*), &\text{if } x_{k-\frac{1}{2}}\in
J_i,\\
\end{aligned}
\right.
\end{displaymath}
where
\begin{displaymath}
x_k^*=\left\{
\begin{aligned}
&s_i, &\text{if } I_k \text{ is adjacent to some }
s_i,\\
&x_{k-\frac{1}{2}},& \text{otherwise}.
\end{aligned}
\right.
\end{displaymath}
Take $\bar{b}(x)\big|_{I_k}$ as piecewise constant
$b(x_{k-\frac{1}{2}})$ on every subinterval $I_k$ and
$\bar{f}(x)\big|_{I_k}$
likewise.
Thus test functions induced by linear approximations are
represented by parabolic cylinder functions and ones derived by
constants
are exponential functions.
We note that maximum principle holds for $L$ and $\bar{L}$, i.e., there
exists a constant $C$ independent of $\varepsilon,h$:
$$||v||_{\infty}\leq C\Big(||Lv||_{\infty}+|v(x_L)|+|v(x_R)|\Big),$$
$$||v||_{\infty}\leq
C\Big(||\bar{L}v||_{\infty}+|v(x_L)|+|v(x_R)|\Big).$$
With the same arguments by Gartland and
Farrell\cite{farrell1988uniform},
we have:
$$||e||_\infty\leq C\Big\{||(p-\bar{p})u'||_\infty+||b-\bar{b}||
_\infty||u||_\infty+||f-\bar{f}||_\infty\Big\},$$
where $e=u-u_h$. We separate the discussion by whether $x_{k-1/2}$
lies in some $J_i$, where $x\in I_k$:\\[1mm]
\textbf{Case (a)} $x_{k-1/2}\in J_i$.\\[-2mm]
\begin{itemize}
\item[] Suppose
$x_{k-1/2}\in J_i$. If the singular point
$s_i\in(x_L,x_R)$, we
have the estimate of the derivative $$|u'(x)|\leq
C\Big(|x-s_i|+\sqrt{\varepsilon}\Big)^{\lambda_i-1},$$
where the coefficient $\lambda_i=-b(s_i)/p'(s_i)\in(0,1]$;
$|u'(x)|$
is bounded by a constant $C$ for other
evaluations of $\lambda$. By definition of
$\bar{p}$, the term $|(p-\bar{p})u'|$ is under control due to the
following inequalities and \eqref{eq:prop:cusp}:\\[1mm]
\begin{displaymath}
\begin{aligned}
&|p-\bar{p}|\leq Ch^2\leq Ch|x-s_i|, &\mbox{if }|x-s_i|\geq
h,\\
&|p-\bar{p}|\leq C|x-s_i|^2\leq Ch|x-s_i|,
&\mbox{if }|x-s_i|\leq h.\\
\end{aligned}
\end{displaymath}\\[1mm]
In both cases $|x-s_i|$ multiplied by $|u'(x)|$ is bounded.
The second inequality holds, for we approximate $p$ by the
first-order Taylor polynomial at $x=s_i$ in the intervals
adjacent to $s_i$.
In the cases where $s_i$ is an endpoint of the whole interval,
generally
the derivative $u'(x)$ might be significant near the singular
point,
such as $\varepsilon^{-1}$ or $\varepsilon^{-1/2}$. We have shown
in
\eqref{eq:prop:exp}, \eqref{eq:prop:positive}, and
\eqref{eq:prop:negative}
that
$|(x-s_i)u'(x)|$ is bounded uniformly by a constant $C$.
\end{itemize}
\textbf{Case (b)} $x_{k-1/2}\in J_r$.\\[-2mm]
\begin{itemize}
\item[]
$J_r$ is $\delta$ away from boundary layers and interior layers.
Without singularity, we could write the bound of $|u'|$ as $C_1$,
which
may depend on a minus power of $\delta$.
\end{itemize}
In the end, using the facts that $|f-\bar{f}|,|b-\bar b|\leq Ch$
and
$|u|\leq C|f|$, the first-order convergence in $L^\infty$ norm is
proved.
\end{proof}
Applying approximations in Theorem \ref{thm:Linf}, we summarize the
algorithm as follows:
\begin{algorithm}
\label{alg_PGFEM}
\rm \textbf{(PGFEM for turning point problems)}
\begin{enumerate}
\item Identify singular points $s_i$'s and their types of
singularities;
\item Take a partition and add in singular points which are absent
in
the mesh;
\item Approximate $p,b,f$ by piecewise constants or
piecewise linear functions based on distance from the
midpoint of an interval to singular points (see the proof of
Theorem \ref{thm:Linf});
\item Solve dual problems analytically or numerically to
evaluate test functions;
\item Generate stiffness matrix and right-hand-side term using
test functions;
\item Solve a linear system to obtain the numerical solution
$u_h$
(on
grid points).
\end{enumerate}
\end{algorithm}
\begin{remark}
\rm\label{remark_of_alg}
This algorithm is a fitted operator method because we need no special
mesh
on the whole interval, and the solution is derived by selecting special
test functions. One needs to identify the location of singular
points in order to use information of the singularities and to solve
dual problems with enough precision to construct the linear system.
\end{remark}
\begin{theorem}
\rm\label{thm:L2}
\textbf{(First-order $L^2$ Uniform Convergence)} Providing the same
conditions as the previous theorem, PGFEM
is
uniformly convergent in $L^2$-norm and $\varepsilon$-norm, i.e., there
exists
a
constant $C$ independent of $h$ and $\varepsilon$, such that
\begin{equation}
\label{eq:thm:L2}
||e||_{L^2}\leq||e||_{\varepsilon}\leq Ch.
\end{equation}
\end{theorem}
\begin{proof}
If we consider $L^2$-norm, we start by
distracting approximated equation \eqref{eq1'} from the original one
\eqref{eq1}:
$$-\varepsilon e''+\bar{p}e'+\bar{b}e=F(x)\equiv
-(p-\bar{p})u'-(b-\bar{b})u+(f-\bar{f}).$$
We have proved that $|e|\leq C|F|\leq \tilde{C}h$. On the assumption
that $u,u_h\in C^1$, the
following energy
estimate holds:
\begin{equation}
\label{energy_estimate}
\int_{x_L}^{x_R} \varepsilon
(e')^2+\Big(\bar{b}-\frac{1}{2}\bar{p}'\Big)e^2
dx\leq||F||_{L^2}||e||_{L^2}+
\frac{1}{2}\sum_{i=1}^{N-1}e^2(x_i)[\bar{p}](x_i).
\end{equation}
For $h$ sufficiently small, using assumption in \eqref{eq:condition},
it holds that
\begin{displaymath}
\bar{b}-\frac{1}{2}\bar{p}'\geq
\frac{b_0+\gamma_0}{4}.
\end{displaymath}
Denoting
$$\gamma_1 = \min(1,\frac{b_0+\gamma_0}{4}),$$
we have the estimate in the energy norm from
\eqref{eq:thm:Linf} and \eqref{energy_estimate}:
\begin{equation}
\label{est2}
||e||_\varepsilon^2\leq Ch^2,
\end{equation}
where the constant $C$ may depend on $p,b,f,\gamma^{-1},$
and the jump of $\bar{p}$ is at most $Ch$:
$$|[\bar{p}](x_k)|\leq|\bar{p}(x_k^-)-p(x_k)|+|p(x_k)-\bar{p}(x_k^+)|\leq
Ch.$$
\end{proof}
\begin{proposition}
\label{prop:stab}
\rm \textbf{(Numerical stability)} The scheme satisfies discrete
maximum
principle, i.e.,
the matrix induced by PGFEM is
tri-diagonally
dominated, which could be verified by taking
integrals\cite{farrell1988uniform}.
\end{proposition}
\section{Numerical implementation}\label{sec:4}
In this section, we use three examples to validate the
efficiency and convergency of our algorithm.
Different cases of singularities (a), (b), and (c) are included in these
examples.
Test functions could be computed with exact parabolic cylinder
functions or approximated by numerical solutions.
PGFEM solutions with fine
girds (N=4096) using exact test functions are chosen to be reference
solutions in the
first two examples; the third one admits an exact solution.
We calculate errors by $||\cdot||_{L^\infty_h}$,
$||\cdot||_{L^2_h}$ and $||\cdot||_{\varepsilon,h}$ defined in
\eqref{eq:uinf}--\eqref{eq:upl2}.
\begin{example}
\label{ex:cos}
\rm Consider a turning point problem with a cusp-like interior layer
and an exponential-type boundary layer:
\begin{displaymath}
\left\{
\begin{aligned}
&-\varepsilon u''+\cos(2\pi x)u'+u=\frac{1}{1+x^2},\quad
0<x<1,\\
&u(0)=1,\quad u(1)=2.\\
\end{aligned}
\right.
\end{displaymath}
There is an interior layer at $x=1/4$ and a boundary
layer at $x=1$, corresponding to cases (b) and (a). We set
$\{1/4,3/4,1\}$ to be singular points which need special care.
The condition that $b-p'$ is bounded from below is not satisfied in
this case, while numerical experiments suggest that Algorithm
\ref{alg_PGFEM}
works, for
exact test functions, when the repulsive turning point $x=3/4$ is
specially treated; for approximate dual solutions, we need to
neglect $x=3/4$ for the sake of stablity.
In practice we take
$\delta$ in Theorem \ref{thm:Linf} as follows in all three examples:
$$\delta = \min \{0.1,\quad \min_i|s_i-s_{i-1}|/3\}.$$
The reference solution, together with numerical solutions using PGFEM
on the uniform mesh
and an up-winding scheme on Shishkin mesh \cite{natesan2003parameter}
with
both $256$ grids are shown in
Figure \ref{fig_sol_cos}. Exact dual solutions are selected as test
functions. Compared to non-equidistant mesh of
Shishkin
type,
PGFEM needs
no special grids, and values on the uniform mesh points are highly
accurate. The solution using Shishkin mesh has a lower
resolution outside
the interior layer, which could be improved by mesh refinement. PGFEM
errors
in three different discrete norms versus grid
number $N$ are
drawn with a log-log plot in Figure \ref{fig_err_cos}, where one may
find a
nearly
second-order convergency.
\begin{figure}[ht]
\subfloat[]{\label{fig:1A}
\centering
\includegraphics[width=7cm]{Ex1_256_1e-6.eps}}
\subfloat[]{\label{fig:1B}
\centering
\includegraphics[width=7cm]{Ex1_256_1e-6_Zoomin.eps}}
\caption{\small Numerical and reference solutions for Example \ref{ex:cos}
($\varepsilon=1\times10^{-6}$), where test functions in PGFEM
are
calculated
with
exact expressions. \textbf{(a)} PGFEM and the method
in \cite{natesan2003parameter} are employed with grid number
$N=256$, with the latter using an up-winding scheme on Shishkin
mesh.
The
reference solution is computed with PGFEM on grids
$N=4096$; \textbf{(b)} Horizontal Magnification near $x=1/4$.}
\label{fig_sol_cos}
\end{figure}
\begin{figure}[htpb]
\hspace{-14mm}\includegraphics[width=16cm]{Ex1_loglog.eps}
\caption{\small Log-log plot for PGFEM errors in Example \ref{ex:cos} versus
grid
number $N$, in $L^\infty_h,L^2_h$ and discrete energy norm.
The solid black line and black dashed
line have slopes -1 and -2, respectively.}
\label{fig_err_cos}
\end{figure}
\end{example}
\begin{example}
\label{ex:single}
\rm Consider a boundary turning point problem:
\begin{displaymath}
\left\{
\begin{aligned}
&-\varepsilon u''+(1-x^2)u'+3u=e^x,\quad -1<x<1,\\
&u(-1)=1,\quad u(1)=2.\\
\end{aligned}
\right.
\end{displaymath}
At both endpoints the solution appears singular with $p'(-1)>0$ at
$x=-1$ and $p'(1)<0$ at $x=1$, corresponding to case (c) with a
positive slope and a negative one. These two boundary layers are weaker
than those
in Example
\ref{ex:cos}, as the PGFEM solution and the reference solution are
drawn
in Figure
\ref{fig_sol_single}.
Setting $\{-1,1\}$ as singular points and exact dual solutions as test
functions, $L^\infty_h$ errors and discrete
energy errors are shown in Tables \ref{table_Linferr_single} and
\ref{table_Eerr_single} accordingly, where a
first-order
uniform convergence could be verified.
\begin{figure}[h]
\subfloat[]{\label{fig:2A}
\centering
\includegraphics[width=7cm]{Ex2_256_1e-6.eps}}
\subfloat[]{\label{fig:2B}
\centering
\includegraphics[width=7cm]{Ex2_256_1e-6_Zoomin.eps}}
\caption{\small Numerical and reference solutions for Example
\ref{ex:single} ($\varepsilon=1\times10^{-6}$). Test functions in PGFEM are
calculated
with
exact expressions. \textbf{(a)} PGFEM is implemented
with grids $N=256$, and the reference solution is calculated
using the
same algorithm with $N=4096$;
\textbf{(b)} Horizontal Magnification near $x=0$.}
\label{fig_sol_single}
\end{figure}
\begin{table}[h]
\caption{\small $L^\infty_h$ errors of PGFEM solutions for
Example \ref{ex:single}. Test functions are calculated with exact
expressions.}
\centering
\begin{tabular}{c|cc|cc|cc|cc}
\hline
$\varepsilon$ & \multicolumn{2}{c|}{1} &
\multicolumn{2}{c|}{1E-02} & \multicolumn{2}{c|}{1E-04} &
\multicolumn{2}{c}{1E-06} \\ \hline
$N$ & $L_h^\infty$ & rate & $L_h^\infty$ &
rate & $L_h^\infty$ & rate & $L_h^\infty$ &
rate \\ \hline
32 & 1.12E-04 & & 2.78E-03
& & 1.85E-03 & & 1.85E-03
& \\
64 & 2.39E-05 & 2.43 & 1.46E-03 &
1.52 & 7.22E-04 & 2.10 & 7.16E-04 &
1.93 \\
128 & 5.97E-06 & 2.00 & 3.72E-04 &
1.97 & 1.90E-04 & 1.93 & 1.83E-04 &
1.97 \\
256 & 1.56E-06 & 1.98 & 7.86E-05 &
2.37 & 4.49E-05 & 2.26 & 8.66E-05 &
2.31 \\
512 & 3.85E-07 & 2.02 & 1.94E-05 &
2.01 & 1.41E-05 & 1.89 & 3.73E-05 &
2.45 \\
1024 & 9.07E-08 & 2.10 & 4.83E-06 &
2.04 & 3.33E-06 & 2.08 & 1.51E-05 &
2.22 \\ \hline
\end{tabular}
\label{table_Linferr_single}
\end{table}
\begin{table}[h]
\caption{\small $||\cdot||_{\varepsilon,h} $ errors of
PGFEM solutions for Example \ref{ex:single}. Test functions are
calculated with exact expressions.}
\label{table_Eerr_single}
\centering
\begin{tabular}{c|cc|cc|cc|cc}
\hline
$\varepsilon$ & \multicolumn{2}{c|}{1} &
\multicolumn{2}{c|}{1.E-02} & \multicolumn{2}{c|}{1.E-04} &
\multicolumn{2}{c}{1.E-06} \\ \hline
$N$ & energy & rate & energy &
rate & energy & rate & energy &
rate \\ \hline
32 & 3.93E-04 & & 1.95E-03
& & 6.38E-04 & & 6.27E-04
& \\
64 & 9.56E-05 & 2.24 & 9.93E-04 &
1.52 & 2.08E-04 & 1.84 & 2.09E-04 &
1.77 \\
128 & 2.39E-05 & 2.00 & 2.55E-04 &
1.97 & 5.31E-05 & 1.99 & 5.50E-05 &
2.01 \\
256 & 6.02E-06 & 2.03 & 5.47E-05 &
2.37 & 1.51E-05 & 2.17 & 1.42E-05 &
2.18 \\
512 & 1.49E-06 & 2.02 & 1.35E-05 &
2.01 & 4.46E-06 & 2.01 & 3.93E-06 &
2.15 \\
1024 & 3.53E-07 & 2.08 & 3.36E-06 &
2.05 & 1.24E-06 & 2.03 & 1.14E-06 &
2.13 \\ \hline
\end{tabular}
\end{table}
\end{example}
For Example \ref{ex:single}, if we compute test functions numerically,
using the
same reference solution, convergence is the same as above (see
Table \ref{table_Linferr_single_app}). Convergency also holds for multiple
turning point problems if we follow
the same procedure to compute test functions, although it is unclear
what analytic expressions of the solutions to dual problems are.
\begin{table}[h]
\caption{\small $L^\infty_h$ errors of PGFEM solutions for
Example \ref{ex:single}. Test functions are approximated by numerical
solutions.}
\label{table_Linferr_single_app}
\centering
\begin{tabular}{c|cc|cc|cc|cc}
\hline
$\varepsilon$ & \multicolumn{2}{c|}{1} &
\multicolumn{2}{c|}{1.E-02} & \multicolumn{2}{c|}{1.E-04} &
\multicolumn{2}{c}{1.E-06} \\ \hline
$N$ & $L_h^\infty$ & rate & $L_h^\infty$ &
rate & $L_h^\infty$ & rate & $L_h^\infty$ & rate
\\ \hline
32 & 1.12E-04 & & 2.78E-03
& & 1.86E-03 & & 1.86E-03
& \\
64 & 2.39E-05 & 2.43 & 1.46E-03 &
1.52 & 7.23E-04 & 2.11 & 7.21E-04 & 1.94
\\
128 & 5.96E-06 & 2.00 & 3.72E-04 &
1.97 & 1.90E-04 & 1.93 & 1.84E-04 & 1.97
\\
256 & 1.56E-06 & 1.98 & 7.86E-05 &
2.37 & 4.50E-05 & 2.26 & 8.66E-05 & 2.32
\\
512 & 3.85E-07 & 2.02 & 1.94E-05 &
2.01 & 1.40E-05 & 1.89 & 3.68E-05 & 2.45
\\
1024 & 9.07E-08 & 2.10 & 4.83E-06 &
2.04 & 3.33E-06 & 2.08 & 1.51E-05 & 2.29
\\ \hline
\end{tabular}
\end{table}
\begin{example}
\rm\label{ex:multiple}
Consider the following multiple boundary turning point problem
in\cite{vulanovic1993continuous}:
\begin{displaymath}
\left\{
\begin{aligned}
& -\varepsilon u''-x^3u'+u=f(x),\quad 0<x<1,\\
& u(0)=2,\quad u(1)=e^{-1/\sqrt{\varepsilon}}+e,\\
\end{aligned}
\right.
\end{displaymath}
where $f(x)$ is determined by the
exact solution: $$u(x)=e^{-x/\sqrt{\varepsilon}}+e^x.$$
There is a $\sqrt{\varepsilon}$-wide boundary layer at the turning
point $x=0$, as drawn in Figure \ref{fig_sol_multiple}. Results of
PGFEM
are
compared with
one in
\cite{vulanovic1993continuous}, where we compute with two methods on
the same uniform mesh, and PGFEM obtains solutions with higher
precision. In this
case, analytic expressions of dual solutions are unknown. Thus we
utilize numerical solutions to dual problems as test functions.
The $L^\infty_h$ convergence rate of PGFEM is almost two, as shown in
Table \ref{table_Linferr_multiple}.
\begin{figure}[h]
\subfloat[]{\label{fig:3A}
\centering
\includegraphics[width=7cm]{Ex3_256_1e-6.eps}}
\subfloat[]{\label{fig:3B}
\centering
\includegraphics[width=7cm]{Ex3_256_1e-6_Zoomin.eps}}
\caption{\small Numerical and exact solutions for Example \ref{ex:multiple}
($\varepsilon=1\times10^{-6}$). Test functions in PGFEM are
approximated
by numerical solutions on grids $N=4096$. \textbf{(a)} PGFEM
and the method
in \cite{vulanovic1993continuous}
are manipulated with uniform grids $N=256$, and the reference
solution is computed with PGFEM and
$N=4096$;
\textbf{(b)} Horizontal Magnification near $x=0$.}
\label{fig_sol_multiple}
\end{figure}
\begin{table}[h]
\caption{\small $L^\infty_h$ errors of PGFEM solutions for
Example \ref{ex:multiple}. Test functions are approximated by numerical
solutions.}
\label{table_Linferr_multiple}
\centering
\begin{tabular}{c|cc|cc|cc|cc}
\hline
$\varepsilon$ & \multicolumn{2}{c|}{1} &
\multicolumn{2}{c|}{1.E-02} & \multicolumn{2}{c|}{1.E-04} &
\multicolumn{2}{c}{1.E-06} \\ \hline
$N$ & $L_h^\infty$ & rate & $L_h^\infty$ &
rate & $L_h^\infty$ & rate & $L_h^\infty$ &
rate \\ \hline
32 & 1.84E-05 & & 1.65E-04
& & 3.71E-04 & & 1.05E-03
& \\
64 & 4.61E-06 & 2.00 & 4.82E-05 &
1.77 & 5.89E-05 & 2.66 & 3.01E-04 &
1.81 \\
128 & 1.15E-06 & 2.00 & 1.26E-05 &
1.93 & 8.85E-06 & 2.82 & 7.64E-05 &
1.98 \\
256 & 2.88E-07 & 2.00 & 3.20E-06 &
1.98 & 2.22E-06 & 1.99 & 1.77E-05 &
2.11 \\
512 & 7.21E-08 & 2.00 & 8.02E-07 &
2.00 & 5.50E-07 & 2.01 & 4.57E-06 &
2.31 \\
1024 & 1.79E-08 & 2.01 & 2.01E-07 &
2.00 & 1.33E-07 & 2.04 & 1.17E-06 &
1.97 \\ \hline
\end{tabular}
\end{table}
\end{example}
\section{Conclusion}
In this paper, we develop a Petrov-Galerkin finite element method (PGFEM)
to solve a class of turning point problems
in one dimension. Priori estimates have been established for the single
boundary
turning point case. Numerical analysis shows that our scheme has
first-order uniform convergency in several different norms. In numerical
examples, errors in different discrete norms validate the feasibility and
efficiency of the scheme. We emphasize that such an algorithm not only
could be implemented with evaluations of exact solutions to the dual
problems but also is considerable if test functions are approximated
numerically.
|
1,314,259,995,023 | arxiv | \section{Introduction}
Grammar induction is a fundamental and long-lasting \cite{lari1990estimation,clark2001unsupervised,klein2002generative}
problem in computational linguistics, which aims to find hierarchical syntactic structures from plain sentences.
Unlike supervised methods \cite{charniak2000maximum,collins2003head,petrov2007improved,zhang2011syntactic,cross2016span,kitaev2018constituency} that require human annotated treebanks, \textit{e.g.}, Penn Treebank \cite{marcus1993building}, grammar inducers do not rely on any human annotations for training.
Grammar induction is attractive since annotating syntactic trees by human language experts is expensive and time consuming, while the current treebanks are limited to several major languages and domains.
Recently, deep learning models have achieved remarkable success across
NLP tasks, and neural models have been designed \cite{shen2018ordered,shen2018neural,kim2019compound,kim2019unsupervised,jin2018unsupervised} for grammar induction, which greatly advanced model performance on induction with raw text.
Recent efforts have started to consider other useful information from multiple modalities, such as images \cite{shi2019visually,jin2020grounded} and videos \cite{zhang2021video}.
Specifically, \citet{zhang2021video} show that multi-modal information (e.g. motion, sound and objects) from videos can significantly improve the induction accuracy on verb and noun phrases.
Such work uses curated multi-modal data publicly available on the web, which all assume that the meaning of a sentence needs to be identical (e.g., being a caption) to the corresponding video or image.
This assumption limits usable data to several small-scale benchmarks \cite{lin2014microsoft,xu2016msr,hendricks17iccv} with expensive human annotations on image/video captions.
The noisy correspondence between form and meaning is one of the main research questions in language acquisition \cite{akhtar1999,gentner2001individuation,DOMINEY2004121}, where different proposals attempt to address this indeterminacy faced by children. There has been computational work incorporating such indeterminacy into their models \cite{yu2013grounded,NEURIPS2021_f5e62af8}. For modeling empirical grammar learning with multi-modal inputs, two important questions still remain open:
1) \emph{how can a grammar inducer benefit from large-scale multi-media data (e.g., YouTube videos) with noisy text-to-video correspondence}? and
2) \emph{how can a grammar inducer show robust performances across multiple domains and datasets}?
By using data with only weak cross-modal correspondence, such as YouTube videos and their automatically generated subtitles, we allow the computational models to face a similar indeterminacy problem, and examine how indeterminacy interacts with data size to influence learning behavior and performance of the induction models.
In this paper, we conduct the first investigation on both questions.
Specifically, we collect 2.4 million video clips and the corresponding subtitles from instructional YouTube videos (HowTo100M \citealt{miech2019howto100m}) to train multi-modal grammar inducers, instead of using the training data from a benchmark where text and video are in alignment.
We then propose a novel model, named Pre-Trained Compound Probabilistic Context-Free Grammars (PTC-PCFG), that extends previous work \cite{shi2019visually,zhang2021video} by incorporating a video-span matching loss term into the Compound PCFG \cite{kim2019compound} model.
To better capture the video-span correlation, it leverages CLIP \cite{miech2020end}, a state-of-the-art model pretrained on video subtitle retrieval, as the encoders for both video and text.
Compared with previous work \cite{zhang2021video} that independently extracts features from each modality before merging them using a simple Transformer \cite{vaswani2017attention} encoder, the encoders of our model have been pretrained to merge such multi-modal information, and no human efforts are needed to select useful modalities from the full set.
Experiments on three benchmarks show that our model, which is trained on noisy YouTube video clips and no data from these benchmarks, produces substantial gains over the previous state-of-the-art system \cite{zhang2021video} trained on in-domain video clips with human annotated captions.
Furthermore, our model demonstrates robust performances across all three datasets.
We suggest the limitations of our model and future directions for improvements through analysis and discussions.
Code will be released upon paper acceptance.
In summary, the main contributions are:
\begin{itemize}[leftmargin=*]
\item We are the first to study training a grammar inducer with massive general-domain noisy video clips instead of benchmark data, introducing the indeterminacy problem to the induction model.
\item We propose PTC-PCFG, a novel model for unsupervised grammar induction.
It is simpler in design than previous models and can better capture the video-text matching information.
\item Trained only on noisy YouTube videos without finetuning on benchmark data, PTC-PCFG reports stronger performances than previous models trained on benchmark data across three benchmarks.
\end{itemize}
\section{Background and Motivation}
\subsection{Compound PCFGs}
\label{sec:cpcfg}
A PCFG model in Chomsky Normal Form can be defined as a tuple of 6 terms $(\mathcal{S}, \mathcal{N}, \mathcal{P}, \Sigma, \mathcal{R}, \Pi)$, where they correspond to the start symbol, the sets of non-terminals, pre-terminals, terminals, production rules and their probabilities.
Given pre-defined numbers of non-terminals and pre-terminals, a PCFG induction model tries to estimate
the probabilities for all production rules.
The compound PCFG (C-PCFG) model \cite{kim2019compound} adopts a mixture of PCFGs.
Instead of a corpus-level prior used in previous work \cite{kurihara2006variational,johnson2007bayesian,wang2013collapsed,jin2018unsupervised}, C-PCFG imposes a sentence-specific prior on the distribution of possible PCFGs.
Specifically in the generative story, the probability $\pi_r$ for production rule $r$ is estimated by model $g$ that assigns a latent variable $\mathbf{z}$ for each sentence $\sigma$, and $\mathbf{z}$ is drawn from a prior distribution:
\begin{equation}
\pi_r = g(r, \mathbf{z}; \theta), ~~~~~ \mathbf{z} \sim p(\mathbf{z}).
\end{equation}
where $\theta$ represents the model parameters.
The probabilities for all three types of CFG rules are defined as follows:
\begin{equation}
\begin{aligned}
\pi_{S\rightarrow A}&=\frac{\exp(\mathbf{u}_A^\top f_s([\mathbf{w}_S;\mathbf{z}]))}{\sum_{A'\in \mathcal{N}} \exp(\mathbf{u}_{A'}^\top f_s([\mathbf{w}_S;\mathbf{z}]))}, \\
\pi_{A\rightarrow BC}&=\frac{\exp(\mathbf{u}_{BC}^\top [\mathbf{w}_A;\mathbf{z}])}{\sum_{B',C'\in \mathcal{N}\cup \mathcal{P}} \exp(\mathbf{u}_{B'C'}^\top [\mathbf{w}_A;\mathbf{z}]))},\\
\pi_{T\rightarrow w}&=\frac{\exp(\mathbf{u}_w^\top f_t([\mathbf{w}_T;\mathbf{z}]))}{\sum_{w'\in \Sigma} \exp(\mathbf{u}_{w'}^\top f_t([\mathbf{w}_T;\mathbf{z}]))},
\end{aligned}
\end{equation}
where $A\in\mathcal{N}$, $B$ and $C\in\mathcal{N}\cup\mathcal{P}$, $T\in\mathcal{P}$, $w\in \Sigma$.
Both $\mathbf{w}$ and $\mathbf{u}$ are dense vectors representing words and all types of non-terminals, and $f_s$ and $f_t$ are neural encoding functions.
Optimizing the C-PCFG model involves maximizing the marginal likelihood $p(\sigma)$ of each training sentence $\sigma$ for all possible $\mathbf{z}$:
\begin{equation}
\log p_\theta(\sigma) = \log \int_{\mathbf{z}} \sum_{t\in \mathcal{T_G}(\sigma)} p_\theta(t|\mathbf{z}) p(\mathbf{z}) d\mathbf{z}
\end{equation}
where $\mathcal{T_G}(\sigma)$ indicates all possible parsing trees for sentence $\sigma$.
Since computing the integral over $\mathbf{z}$ is intractable, this objective is optimized by maximizing its evidence lower bound ELBO($\sigma$; $\phi$, $\theta$):
\begin{equation}
\begin{split}
\text{ELBO}(\sigma; \phi, \theta) = \mathbb{E}_{q_\phi(\mathbf{z}|\sigma)}[\log p_\theta(\sigma|\mathbf{z})] \\
-\text{KL}[q_\phi(\mathbf{z}|\sigma)||p(\mathbf{z})],
\end{split}
\end{equation}
where $q_\phi(\mathbf{z}|\sigma)$ is the variational posterior calculated by another neural network with parameters $\phi$.
Given a sampled $\mathbf{z}$, the log-likelihood term $\log p_\theta(\sigma|\mathbf{z})$ is calculated via the inside algorithm.
The KL term can be computed analytically when both the prior $p(\mathbf{z})$ and the variational posterior $q_\phi(\mathbf{z}|\sigma)$ are Gaussian \cite{kingma2013auto}.
\subsection{Multi-Modal Compound PCFGs}
Multi-Modal Compound PCFGs (MMC-PCFG)~\cite{zhang2021video} extends C-PCFG with a model to match a video $v$ with a span $c$ in a parse tree $t$ of a sentence $\sigma$.
It extracts $M$ visual and audio features from a video $v$ and encodes them via a multi-modal transformer~\cite{gabeur2020multi}, denoted as \revision{$\bm{\Psi}=\{\bm{\psi}^i\}_{i=1}^M$}.
The word representation $\mathbf{h}_i$ of the $i$th word is computed by BiLSTM.
Given a particular span $c=w_i,\dotsc, w_j$, its representation $\mathbf{c}$ is the weighted sum of all label-specific span representations:
\begin{align}
\mathbf{c}
&= \sum_{k=1}^{|\mathcal{N}|} p(k|c, \sigma) f_k \left(\frac{1}{j-i+1} \sum_{l=i}^{j} \mathbf{h}_l\right),
\label{eq:mm_span}
\end{align}
where $\{p(k|c, \sigma)|1\leq k \leq |\mathcal{N}|\}$ are the phrasal label probabilities of span $c$.
The representation of a span $\mathbf{c}$ is then correspondingly projected to $M$ separate embeddings via gated embedding~\cite{miech2018learning}, denoted as \revision{$\bm{\Xi}=\{\bm{\xi}^i\}_{i=1}^M$}.
Finally the video-text matching loss is defined as a sum over all video-span matching losses weighted by the marginal probability of a span from the parser:
\begin{equation}
s_{mm}(v, \sigma) = \sum_{c\in\sigma} p(c|\sigma) h_{mm}(\bm{\Xi},\bm{\Psi}),
\end{equation}
where $h_{mm}(\bm{\Xi},\bm{\Psi})$ is a hinge loss measuring the distances from video $v$ to the matched and unmatched (\textit{i.e.} span from another sentence) span $c$ and $c^\prime$ and the distances from span $c$ to the matched and unmatched (\textit{i.e.} another video) video $v$ and $v^\prime$:
\begin{align}
\omega_i(&\mathbf{c})=\frac{\exp(\mathbf{u}_i^\top\mathbf{c})}{\sum_{j=1}^M\exp(\mathbf{u}_j^\top\mathbf{c})},\\
o(\bm{\Xi},&\bm{\Psi})=\sum_{i=1}^M\omega_i(\mathbf{c})\mathrm{cos}(\bm{\xi}^i,\bm{\psi}^i), \\
h_{mm}(\bm{\Xi},&\bm{\Psi}) = \mathbb{E}_{{c}^\prime}[o(\bm{\Xi}^\prime, \bm{\Psi}) - o(\bm{\Xi}, \bm{\Psi})) + \epsilon ]_+ \nonumber \\
&+ \mathbb{E}_{v^\prime}[o(\bm{\Xi}, \bm{\Psi}^\prime) - o(\bm{\Xi}, \bm{\Psi}) + \epsilon]_+,
\end{align}
where \revision{$\bm{\Xi}^\prime$ is a set of unmatched span expert embeddings of $\bm{\Psi}$, $\bm{\Psi}^\prime$ is a set of unmatched video representations of $\bm{\Xi}$,} $\epsilon$ is a positive margin, $[\cdot]_+ = max(0, \cdot)$, $\{\mathbf{u}_i\}_{i=1}^M$ are learned weights, and the expectations are approximated with one sample drawn from the training data.
During training, both ELBO and the video-text matching loss are jointly optimized.
\subsection{Limitation and Motivation}
Existing work on multi-modal grammar induction aims at leveraging strict correspondence between image/video and text for information about syntactic categories and structures of the words and spans in the text. However, such datasets are
expensive to annotate. Besides, the ambiguous correspondence between language and real-world context, observed in language acquisition, is not really reflected in such training setups.
As a result, we believe that the previous work fails to answer the following important questions: 1) how well a grammar inducer would perform when it is trained only on noisy multi-media data; 2) how the scale of training data would affect the performance and cross-domain robustness?
\begin{figure*}[t!]
\centering
\includegraphics[width=0.95\textwidth]{figures/framework.png}
\caption{The pipeline of our approach.}
\label{fig:framework}
\end{figure*}
\section{Training a Grammar Inducer with Massive YouTube Videos}
We make the first investigation into the above questions by leveraging massive video clips from instructional YouTube videos to train our grammar inducer.
Different from the benchmark data used by previous work, the YouTube video clips do not contain paired sentences.
This section will first introduce the method for generating noisy training instances (video clip and sentence pairs) from YouTube videos (\S \ref{sec:data_process}), before describing a novel grammar induction model (\S \ref{sec:model}) with pre-trained text and video encoders.
\subsection{Harvesting Training Instances from YouTube Videos}
\label{sec:data_process}
Given a YouTube video, we would like to generate a set of video clip and subtitle pairs $\Omega=\{(v, \sigma)\}$, where each subtitle $\sigma$ is a complete sentence and is aligned in time with its paired video clip $v$.
To this end, the YouTube API is chosen to obtain all subtitles of the video.
But, our observation finds that most obtained subtitles are not complete sentences, and in some cases, a complete sentence can last for several continuous video fragments.
Meanwhile, they do not contain any punctuation, which is a key factor for sentence segmentation.
As shown in the top part of Figure \ref{fig:framework}, we design an algorithm that takes the following steps to find each complete sentence and its corresponding video clip.
\textbf{Sentence segmentation.}
In the first step, we try to find complete sentences from the subtitles.
We first concatenate all subtitles from the same video are concatenated into a very long sequence of tokens.
Next, a punctuation restoration model\footnote{\revision{We manually punctuate subtitles from $10$ videos randomly selected from HowTo100M, which contains $461$ sentences after annotation. The punctuation restoration model has an overall F1 score of $74.1\%$ with the manual labels.}}~\cite{tilk2016} is adopted to insert punctuation into the sequence.
Lastly, sentences are segmented based on certain punctuation (\textit{e.g.}, ``\textit{.}'', ``\textit{?}'', ``\textit{!}'').
\textbf{Video clip extraction.}
In the second step, we trim the corresponding video clips.
Each raw subtitle contains its start and an end times.
We assume each word within the raw subtitle occupies equal time and record the start and end times for each word.
After that, given a complete sentence $\sigma=w_1,w_2,...,w_N$, we use the start time of its first word $w_1$ and the end time of its last word $w_N$ as the start and end times of $\sigma$. Lastly, we segment a complete sentence $\sigma$'s corresponding video clip $v$ based on its start and end times.
\subsection{Model: Pre-Trained Compound PCFGs}
\label{sec:model}
After harvesting large-scale sentence and video pairs, the next step is to build a strong grammar induction model that can benefit from them.
In this section, we introduce our Pre-Trained Compound PCFGs (PTC-PCFG) model for unsupervised grammar induction.
As shown in the lower part of Figure~\ref{fig:framework}, the PTC-PCFG model composes of a video encoder, a span encoder and a parsing model.
Both the video encoder and the span encoder are initialized from the MIL-NCE model~\cite{miech2020end}, a pre-trained video-text matching model that takes a simple design and has shown superior zero-shot results on many video understanding tasks, such as video retrieval, video question answering, \textit{etc}.
We first introduce the pre-trained video and span encoders, before covering the training and inference details of PTC-PCFG.
\noindent\textbf{Video encoding.}
The first step is to encode a video $v$ to its representation $\mathbf{v}$.
To do this, we first segment $v$ into small video clips, where each video clip $v_i$ consists of $T$ frames. Following~\citet{zhang2021video}, we sample $L$ video clips with equal interval for efficiency.
We use the video encoder from the MIL-NCE model~\cite{miech2020end} as our video encoder and only fine-tune its last fully connected layer $f_{v}$ for efficiency.
In more detail, for each sampled video clip, we pre-compute the input of $f^{v}$ as its representation, denoted as $\{\mathbf{h}^v_i\}_{i=1}^L$. Then we feed them into $f^v$ and average the output as its representation $\mathbf{v}$, denoted as,
\begin{align}
\mathbf{v} &= \texttt{AvgPool}(\{f^{v}(\mathbf{h}^v_i)\}_{i=1}^L),
\end{align}
where $\texttt{AvgPool}$ indicates average pooling.
\noindent\textbf{Span encoding.}
The next step is to compute a span representation $\mathbf{c}$ for each particular span $c=w_i,\dots,w_j$ $(1\leq i< j \leq N)$ in sentence $\sigma=w_1,w_2,\dots,w_N$.
The pre-trained text encoder of MIL-NCE consists of a word embedding layer and two stacked fully connected layers, $f^c_0$ and $f^c_1$.
Motivated by~\citet{zhao2020visually,zhang2021video}, we expect to learn $|\mathcal{N}|$ different span representations, each is specified for one non-terminal node.
However, directly applying the pre-trained text encoder is not feasible, since it has only one output layer $f^c_1$.
Therefore, we duplicate $f^c_1$ for $|\mathcal{N}|$ times, denoted as $\{f^c_{k}\}_{k=1}^{|\mathcal{N}|}$, and compose $|\mathcal{N}|$ label-specific output layers.
In more detail, we first encode each word $w_i$ with the word embedding layer, denoted as $\mathbf{h}^c_i$.
Then we feed the word embeddings to $f^c_0$, ReLU, maximum pooling and each label-specific output layer sequentially.
we also compute the probabilities of its phrasal labels $\{p(k|c, \sigma) |1\le k \le |\mathcal{N}|\}$, as illustrated in Section~\ref{sec:cpcfg}.
Lastly, the span representation $\mathbf{c}$ is the sum of all label-specific span representations weighted by the probabilities we predicted, denoted as:
\begin{equation}
\begin{split}
\mathbf{\tau} &= \texttt{MaxPool}(\texttt{ReLU}(f^c_0(\mathbf{h}^c_i))) \\
\mathbf{c} &= \sum_{k=1}^{|\mathcal{N}|} p(k|c, \sigma) f^c_k(\mathbf{\tau}),
\label{eq:pt_span}
\end{split}
\end{equation}
where \texttt{MaxPool} is a maximum pooling operation and \texttt{ReLU} is a ReLU activation function.
\noindent\textbf{Training.}
As shown in lower left of Figure~\ref{fig:framework}, we optimize both the video-text matching loss and evidence lower bound during training.
We first compute the similarity between a video clip $v$ and a particular span $c$ via dot product and then compute a triplet hinge loss as following,
\begin{align}
h(v,c)&=\mathbb{E}_{c^\prime}[\mathbf{c^\prime}\cdot\mathbf{v}-\mathbf{c}\cdot\mathbf{v}+\epsilon]_{+} \nonumber\\
&+\mathbb{E}_{v^\prime}[\mathbf{c}\cdot\mathbf{v^\prime}-\mathbf{c}\cdot\mathbf{v}+\epsilon]_{+},
\end{align}
where $\epsilon$ is a positive margin, $[\cdot]_+ = max(0, \cdot)$, $v^\prime$ is a clip from a different video and $c^\prime$ is a span from a different sentence.
The video-text matching loss is correspondingly defined as,
\begin{align}
s(v,\sigma) = \Sigma_{c\in \sigma} p(c|\sigma)h(v,c),
\end{align}
where $p(c|\sigma)$ is the probability of a particular span $c$ being a syntactic phrase.
Finally, the overall loss function is composed by the $\mathrm{ELBO}$ and the video-text matching loss:
\begin{equation}
\mathcal{L}(\phi,\theta)=\sum_{(v,\sigma)\in \Omega}-\mathrm{ELBO}(\sigma;\phi,\theta)+\alpha s_{}(v,\sigma),
\end{equation}
where $\alpha$ is a constant balancing these two terms.
\noindent\textbf{Inference.}
During inference, given a sentence $\sigma$, we predict the most likely tree $t^*$ without accessing videos, as shown in the lower right of Figure~\ref{fig:framework}. Since computing the integral over $\mathbf{z}$ is intractable, we estimate $t^*$ with the following approximation,
\begin{equation}
\begin{aligned}
t^*&=\argmax_t\int_{\mathbf{z}} p_\theta (t|\mathbf{z}) p_\theta(\mathbf{z}|\sigma) d \mathbf{z}\\
&\approx \argmax_t p_\theta (t|\sigma,\bm{\mu}_{\phi}(\sigma)),
\end{aligned}
\end{equation}
where $\bm{\mu}_{\phi}(\sigma)$ is the mean vector of the variational posterior $q_\phi(\mathbf{z}|\sigma)$, and $t^*$ is obtained by the CYK algo.~\cite{cocke1969programming,younger1967recognition,kasami1966efficient}.
\section{Experiments}
\subsection{Datasets}
Following previous work, we evaluate all systems on three benchmarks (i.e., DiDeMo, YouCook2 and MSRVTT). Instead of training on benchmark data, our models are trained on the data harvested from HowTo100M dataset. Below shows more details about these datasets:
\noindent\textbf{DiDeMo}~\cite{hendricks17iccv} contains $10$k unedited personal Flickr videos. Each video is associated with roughly $3$-$5$ video-sentence pairs. There are $\numprint{32994}$, $\numprint{4180}$ and $4021$ video pairs in the training, validation and testing sets.
\noindent\textbf{YouCook2}~\cite{ZhXuCoAAAI18} contains $2000$ long untrimmed YouTube videos from $89$ cooking recipes. The procedure steps for each video are annotated with temporal boundaries and described by imperative English sentences.
There are $\numprint{8913}$, $\numprint{969}$ and $\numprint{3310}$ video-sentence pairs in the training, validation and testing sets.
\noindent\textbf{MSRVTT}~\cite{xu2016msr} contains $10$k generic YouTube videos accompanied by $200$k captions annotated by paid human workers.
There are $\numprint{130260}$, $\numprint{9940}$ and $\numprint{59794}$ video-sentence pairs in the training, validation and testing sets.
\noindent\textbf{HowTo100M}~\cite{miech2019howto100m} is a large-scale dataset of $136$ million video clips sourced from $1.22$M narrated instructional web videos depicting humans performing more than $23$k different visual tasks.
Noted that there are $404$ videos in HowTo100M exists in YouCook2, we exclude these videos during training.
\subsection{Evaluation}
We discard punctuation, lowercase all words, replace numbers with a special token and ignore trivial single-word and sentence-level spans during testing following~\citet{kim2019compound}.
Besides, we follow previous work \cite{shi2019visually,zhang2021video} by using a state-of-the-art constituency parser (Benepar~\citealt{kitaev-etal-2019-multilingual}) to obtain the reference trees for evaluation\footnote{\revision{For each dataset, we randomly select $50$ sentences and manually label their constituency parse trees. Benepar has S-F1 scores of $98.1\%$ (DiDeMo), $97.2\%$ (YouCook2) and $98.1\%$ (MSRVTT) with manual labels.}}. \revision{Following~\citet{shi2020role,zhang2021video}, all models are run $5$ times for $1$ epoch with different random seeds. For each model, we report the averaged sentence-level F1 (S-F1) and corpus-level F1 (C-F1) of its runs on each testing set.}
\subsection{Implementation Details}
\label{sec:implementation_details}
We use Spacy~\footnote{\url{https://spacy.io/}} for tokenization and keep sentences with fewer than $40$ words for training due to the limited computational resources.
Each video is decoded at $16$ fps and $L=8$ video clips are sampled in total, where each clip contains $T=16$ frames.
We train baseline models, C-PCFG and MMC-PCFG with the same hyper-parameters suggested by ~\citet{kim2019compound} and \citet{zhang2021video}.
The parsing model of PTC-PCFG has the same hyper-parameter setting as C-PCFG and MMC-PCFG (Please refer their papers for details).
The constant $\alpha$ is set to $1$.
We select the top $\numprint{20000}$ most common words in HowTo100M as vocabulary for all datasets.
All baseline methods and ours are optimized by Adam~\cite{kingma2014adam} with a learning rate of $0.001$, $\beta_1=0.75$ and $\beta_2=0.999$.
All parameters (except the video-text matching model in PTC-PCFG) are initialized with Xavier uniform initializer~\cite{glorot2010understanding}.
All our models in experiments are trained for $1$ epoch with batch size of $32$, without finetuning on the target dataset.
\subsection{Main Results}
Figure~\ref{fig:didemo_scale}-\ref{fig:msrvtt_scale} compare our proposed PTC-PCFG approach with recently proposed state-of-the-art models: C-PCFG~\cite{kim2019compound} and MMC-PCFG\footnote{Since audios are removed by HowTo100M authors, we implement MMC-PCFG with video features only, including object features(ResNeXt, SENet), action features (I3D, R2P1D, S3DG), scene features, OCR features and face features.}~\cite{zhang2021video}.
To pinpoint more fine-grained contributions, we also train these models on HowTo100M data.
\noindent\textbf{The effectiveness of HowTo100M.}
\revision{
We find that C-PCFG achieve better performance when they are trained with more instances from HowTo100M than the original in-domain training sets, where the largest improvements are $+18.1\%$, $+21.7\%$ and $+1.4\%$ S-F1 scores on DiDeMo, YouCook2 and MSRVTT, respectively.
}
These results indicate that grammar inducers are generally robust against the instances with noisy text-video correspondence.
As the results, learning from noisy YouTube videos can benefit model's overall performance and its generalization ability across multiple domains.
\noindent\textbf{The effectiveness of PTC-PCFG.}
Comparing
C-PCFG, MMC-PCFG and PTC-PCFG trained on different amount of HowTo100M data, we found that PTC-PCFG achieves the best performances in all three datasets.
It can further improve S-F1 to \revision{$+6.3\%$} on DiDeMo, \revision{$+16.7\%$} on YouCook2 and \revision{$+2.8\%$} on MSRVTT.
This demonstrates the effectiveness of the PTC-PCFG model.
In particular, utilizing the video and span encoders pre-trained on a relevant tasks (\textit{e.g.}, video retrieval) can benefit unsupervised grammar induction.
\noindent\textbf{Performance comparison over data scale.}
On DiDeMo and MSRVTT, we observe that PTC-PCFG achieves the best performance with \revision{$592$}k HowTo100M training samples, and further increasing the number of training instances does not improve the parsing performance on these two datasets.
In contrast, the performance gain of PTC-PCFG on YouCook2 further increases with increasing training data.
The reason can be that the domain of HowTo100M is closer to YouCook2 (both are instructional videos) than the other two datasets.
Future work includes adding data from other sources to the whole training set more domain generic.
\begin{figure}[t!]
\centering
\includegraphics[width=0.45\textwidth]{figures/didemo_scale.png}
\caption{Performance Comparison on DiDeMo. The doted lines and their enclosed area represent the mean and variance of each model trained on HowTo100M at different scales. We mark the highest average S-F1 achieved by each method with numbers. The remaining figures follow the same notations.}
\label{fig:didemo_scale}
\end{figure}
\begin{figure}[t!]
\centering
\includegraphics[width=0.45\textwidth]{figures/youcook2_scale.png}
\caption{Performance Comparison on YouCook2.}
\label{fig:youcook2_scale}
\end{figure}
\begin{figure}[t!]
\centering
\includegraphics[width=0.45\textwidth]{figures/msrvtt_scale.png}
\caption{Performance Comparison on MSRVTT.}
\label{fig:msrvtt_scale}
\end{figure}
\begin{table*}[t!]
\small
\centering
\caption{Performance comparison across different training set. We use HT to represent HowTo100M dataset for short, where the number in the brackets indicates the number of samples used for training. The values highlighted by \textbf{bold} and \textit{italic} fonts indicate the top-2 methods, respectively. All numbers are shown in percentage($\%$). The remaining tables follow the same notations.}
\begin{tabular}{cccccccc}
\toprule
\multirow{2}{*}{Method} & \multirow{2}{*}{Trainset} & \multicolumn{2}{c}{DiDeMo} & \multicolumn{2}{c}{YouCook2} & \multicolumn{2}{c}{MSRVTT}\\
\cmidrule(lr){3-4}
\cmidrule(lr){5-6}
\cmidrule(lr){7-8}
& & C-F1 & S-F1 & C-F1 & S-F1 & C-F1 & S-F1 \\
\midrule
MMC-PCFG & DiDeMo & ${55.0}_{\pm3.7 }$ & ${58.9}_{\pm3.4 }$ & $49.1_{\pm4.4}$ & $53.0_{\pm4.9}$ & $49.6_{\pm1.4}$ & $53.8_{\pm0.9}$ \\
MMC-PCFG & YouCook2 & $40.1_{\pm4.4}$ & $44.2_{\pm4.4}$ & ${44.7}_{\pm5.2}$ & ${48.9}_{\pm5.7}$ & $34.0_{\pm6.4}$ & $37.5_{\pm6.8}$ \\
MMC-PCFG & MSRVTT & $\mathit{59.4}_{\pm2.9}$ & $\mathit{62.7}_{\pm3.3}$ & $49.6_{\pm3.9}$ & $54.2_{\pm4.1}$ & $\mathit{56.0}_{\pm1.4}$ & $60.0_{\pm1.2}$ \\
MMC-PCFG & HT(592k) & $58.5_{\pm7.3}$ & $62.4_{\pm7.9}$ & $\mathit{53.9}_{\pm6.6}$ & $\mathit{58.0}_{\pm7.1}$ & $55.1_{\pm7.0}$ & $\mathit{60.2}_{\pm8.0}$ \\
\textbf{PTC-PCFG} & HT(592k) & $\mathbf{61.3}_{\pm3.9}$ & $\mathbf{65.2}_{\pm5.3}$ & $\mathbf{58.9}_{\pm2.5}$ & $\mathbf{63.2}_{\pm2.3}$ & $\mathbf{57.4}_{\pm4.6}$ & $\mathbf{62.8}_{\pm5.7}$ \\
\bottomrule
\end{tabular}
\label{tab:cross-dataset}
\end{table*}
\subsection{Cross-dataset Evaluation}
We evaluate the robustness of models across different datasets, as shown in Table~\ref{tab:cross-dataset}.
Comparing MMC-PCFG trained on in-domain datasets (Row $1$-$3$), we can observe that MMC-PCFG trained on MSRVTT achieves the best overall performance, while MMC-PCFG trained on YouCook2 is the worst.
We believe this is due to the different number of training instances\footnote{The number of training instances in YouCook2, DiDeMo and MSRVTT are 8.9K, 32.9K and 130.2K, respectively.} and the domain gap between different datasets.
Comparing Rows $1$-$4$, we can observe that the MMC-PCFG model trained on HT(592k) (Row $4$) is the best or the second place regarding C-F1 and S-F1 compared with its variants trained on in-domain datasets (Rows $1$-$3$).
This demonstrates that the our processed video-text training instances are abundant, rich in content and can serve for general purpose.
Comparing Rows $4$ and $5$, PTC-PCFG outperforms MMC-PCFG in both C-F1 and S-F1 in all three datasets and has smaller variance.
This demonstrate that our model can leverage pre-trained video-text matching knowledge and learn consistent grammar induction.
\begin{table*}[t!]
\small
\centering
\caption{Performance comparison across different video and span encoders.}
\begin{tabular}{ccccccccc}
\toprule
\multicolumn{2}{c}{Video-Text Model} & \multirow{2}{*}{Trainset} & \multicolumn{2}{c}{DiDeMo} & \multicolumn{2}{c}{YouCook2} & \multicolumn{2}{c}{MSRVTT}\\
\cmidrule(lr){1-2}
\cmidrule(lr){4-5}
\cmidrule(lr){6-7}
\cmidrule(lr){8-9}
Video Encoder& Span Encoder & & C-F1 & S-F1 & C-F1 & S-F1 & C-F1 & S-F1 \\
\midrule
MIL-NCE & LSTM & HT(296k) & $52.4_{\pm5.5}$ & $54.4_{\pm5.4}$ & $51.5_{\pm5.4}$ & $56.5_{\pm5.2}$ & $49.7_{\pm5.5}$ & $53.4_{\pm5.8}$ \\
MM & LSTM & HT(296k) & $53.6_{\pm3.2}$ & $55.8_{\pm3.1}$ & $53.1_{\pm5.7}$ & $57.9_{\pm5.6}$ & $48.9_{\pm3.5}$ & $52.5_{\pm3.6}$ \\
MIL-NCE & TinyBERT & HT(296k) & $\mathit{54.8}_{\pm5.4}$ & $\mathit{56.4}_{\pm6.0}$ & $\mathit{55.7}_{\pm4.0}$ & $\mathit{60.2}_{\pm3.5}$ & $\mathit{52.3}_{\pm4.3}$ & $\mathit{56.0}_{\pm5.0}$ \\
MIL-NCE & MIL-NCE & HT(296k) & $\mathbf{59.5}_{\pm4.3}$ & $\mathbf{63.7}_{\pm4.7}$ & $\mathbf{57.1}_{\pm1.7}$ & $\mathbf{62.1}_{\pm1.3}$ & $\mathbf{55.7}_{\pm5.0}$ & $\mathbf{61.1}_{\pm5.9}$ \\
CLIP & CLIP & HT(296k) & $52.9_{\pm2.3}$ & $54.9_{\pm2.6}$ & $53.3_{\pm2.2}$ & $58.9_{\pm2.1}$ & $49.1_{\pm2.6}$ & $53.0_{\pm2.9}$ \\
\bottomrule
\end{tabular}
\label{tab:pretraining}
\end{table*}
\subsection{Effectiveness of Pre-Training}
In this section, we explore how different pre-trained video and text encoders can affect the parsing performance, and the results are shown in Table~\ref{tab:pretraining}.
In particular, we study different video encoders\footnote{~\revision{We list the video processing details in Appendix~\ref{appendix:preprocessing_details}.}}, including the S3D-based encoder from MIL-NCE~\cite{miech2020end} (\textit{MIL-NCE}), the multi-modal video encoder from MMC-PCFG~\cite{zhang2021video} (\textit{MM}) and the CLIP model for image-text pre-training~\cite{radford2021learning} (\textit{CLIP}).
We also investigate various text encoders, including an LSTM encoder with random initialization~\cite{zhang2021video,zhao2020visually}, a pre-trained TinyBERT~\cite{jiao2020tinybert} model, the text encoder from MIL-NCE~\cite{miech2020end}, and the text encoder from CLIP~\cite{radford2021learning}.
Comparing Rows $1$ with $2$, we can observe that MM is better than the video encoder of MIL-NCE regarding C-F1 and S-F1 on all three datasets, as MM provides more comprehensive video features.
By comparing row $1$ with $3$, we can also observe that TinyBERT, which is distilled from BERT \cite{devlin2019bert}, outperforms the randomly initialized LSTM encoder.
However, both MM and TinyBERT are independently trained only on vision or language tasks, where the vision-language correspondences are not considered during pre-training.
Therefore, we further investigate the encoders jointly pre-trained on large scale multi-media datasets, including the video-text matching model MIL-NCE (Row $4$) and the image-text matching model CLIP (Row $5$).
We can observe that by leveraging both video and text encoders in MIL-NCE can improve the parsing performance by a large margin on all three datasets.
On the other hand, CLIP does not perform well, since it is designed for static images and other multi-modal information (e.g., motion) is ignored.
\subsection{Qualitative Analysis}
\begin{figure}
\centering
\includegraphics[width=0.48\textwidth]{figures/example.png}
\caption{Parse trees predicted by different models for sentence \textit{a lady describing the groceries she had kept in her refrigerator}. The \textcolor{red}{red} line shows the difference between the predicted trees and the reference tree.}
\label{fig:examples}
\end{figure}
In figure~\ref{fig:examples}, we visualize a parser tree predicted by the best run of C-PCFG trained on MSRVTT, MMC-PCFG trained on MSRVTT, MMC-PCFG trained on HT(296k) and PTC-PCFG trained on HT(296k), as well as its reference tree. We can observe that C-PCFG trained on MSRVTT fails at noun phrase ``\textit{a lady}'', while MMC-PCFG trained on MSRVTT succeeds. MMC-PCFG can be further improved by training on HT(296k), however, fails at noun phrase ``\textit{the groceries she had kept in her refrigerator}''. Our PTC-PCFG can leverage the pre-trained matching knowledge and make the correct prediction.
\section{Related Work}
\noindent\textbf{Grammar Induction}
has a long and rich history in the computational linguistics.
Earlier work~\cite{shen2018neural,shen2018ordered,drozdov2019unsupervised,kim2019compound,Jin2019-us,yang-etal-2021-neural,yang-etal-2021-pcfgs} on grammar induction with pure unsupervised learning showed promising results. Instead of learning purely from text, recent work improved the parsing performance with paired images~\cite{shi2019visually,zhao2020visually} or videos~\cite{zhang2021video}. However, they are all limited to small benchmarks and specified for a few domains. In contrast, our work leverages massive noisy video-subtitle pairs from YouTube without any manual annotations.
\noindent\textbf{Video Retrieval}
has been a hot topic in the computer vision field for many years. Earlier approaches focused on model design~\cite{gabeur2020multi,zhang2019exploiting}, while more recent approaches~\cite{radford2021learning,miech2020end} focused on the pre-training on a large scale dataset and demonstrated superior zero-shot results on
many downstream tasks.
These models are simple in design and provide representative features with less human effort in annotations. In this work, we demonstrate that unsupervised grammar induction can also benefit from the pre-trained video-text model.
\section{Conclusion}
In this paper, we have investigated how massive instructional YouTube video and subtitle pairs can improve grammar induction. We have also proposed a new model that leverages the latest advances in multi-modal pre-training to learn better video-span correlation. Experiments on three benchmarks demonstrate superior and robust performances of our model over previous systems.
We leave exploring other pre-trained video-text matching models and more publicly available data (e.g., YouTube videos from other domains and TV shows) in future work.
\section{Limitations}
Although our model faces a similar indeterminacy problem like children do, and results show that induction works even with noisy correspondence, there are a few factors which prevent this result from being directly applied to language acquisition. Our models only use instructional video and do not have the capability to interact with the world, both of which are unrealistic for human language learners. The complexity of the PCFG induction algorithm we use is cubic to the number of syntactic categories, therefore potentially limits the usefulness of larger amounts of data, where finer subcategories may be learned. Algorithms such as in \citet{yang-etal-2021-pcfgs} could be used in conjunction with multimodal inputs to examine this issue.
Following previous work, our experiments are only conducted on English video-text datasets. However, our framework is general for grammar induction in many languages.
Since our training instances are originally collected from Internet and are uploaded by users, the dataset itself might have misinformation.
Meanwhile, training a model on a large-scale dataset could have high cost in energy and carbon emission. We list our computational cost of our experiments in Appendix~\ref{appendix:computational_cost}.
|
1,314,259,995,024 | arxiv | \section{Introduction}
Analyzing correlations among cricket teams of different era has been a topic of interest for sports experts and journalists for decades. In this paper we study such influence (or interaction) by constructing cross-correlation matrix $C$ \cite{plerou,fin,fin1,fin2,fin3,quantrans} formed by runs scored by teams over different time intervals, formally called a time series.We consider the time series of batting scores posted per innings by a team in all official ICC International Test matches played. Then we construct an ensemble of cross-correlation matrices corresponding to Test data for that cricket team. We repeat the process for One Day International (ODI) and Indian Premier League (IPL) T20 cricket matches. We assume
the correlations to be random and compare the fluctuating properties of $C$ with that of random matrices. Within the bounds imposed by the RMT model, fluctuations of
$C$ show brilliant agreement with the ``universal'' results of GUE \cite{mehta,ghosh,ghoshbook}, while the level density corresponds to the MP distribution \cite{marchenko}. This implies that
interactions in $C$ are random, or in simple words not governed by any causality principal. However outside the bounds, eigenvalues of $C$ show departure from RMT predictions, implying
influence of external non-random factors common to all matches played during this period. To understand this effect, we remove $k$ extreme bands from $C$ and perform the Kolmogorov-Smirnov (KS) Test. We observe a better agreement with RMT predictions.
We organize the paper as follows: After a brief description of the data analyzed in sub-section [\ref{sec:data}], we define cross-correlation matrix in sub-section[\ref{sec:acm}]. Section[\ref{sec:rmt}] introduces our RMT model along with a brief proof of MP distribution. We analyze our results and its corresponding RMT model in Section [\ref{sec:analysis}]. This is followed by concluding remarks.
\subsection{Data analysed}
\label{sec:data}
We construct three ensembles, corresponding to runs scored in Tests, ODIs and Indian Premier League (IPL).
\begin{itemize}
\item The ODI ensemble comprises of cross-correlation matrices constructed from runs scored by India, England, Australia, West Indies, South Africa, New Zealand, Pakistan and Sri Lanka for all official ICC One Day International matches played between 1971 and 2014. For each country we have a sequence of runs scored in both home and away matches. An ensemble of fifty one $90\times 90$ matrices are constructed from the time series data.
\item The Test ensemble comprises of cross-correlation matrices constructed from runs scored by India, England, Australia, West Indies, South Africa, New Zealand, Pakistan and Sri Lanka. For each country we have a sequence of runs scored per innings (each match has a maximum of two innings) in both home and away matches. The Test scores have been taken for all matches played between England, Australia and South Africa between 1877 and 1909 and all official ICC Test matches thereafter, till 2014. An ensemble of seventy $90 \times 90$ matrices are constructed from the time series data.
\item The IPL ensemble comprises of cross-correlation matrices constructed from runs scored by Chennai Super Kings, Rajasthan Royals, Royal Challengers Bangalore, Delhi Daredevils, Kings XI Punjab, Kolkata Knight Riders and Mumbai Indians for all official BCCI IPL T20 matches played between 2008 and 2014. For each team we have a sequence of batting scores posted per match. An ensemble of twenty eight $20 \times 20$ matrices are constructed from the time series data.
\end{itemize}
\subsection{Cross-correlation matrix}
\label{sec:acm}
Cross-correlation matrix $C$ is constructed from a given time series $X=\left\lbrace X(1),X(2),\ldots\right\rbrace$ by defining subsequences
$X_{i}=\left\lbrace X(i),X(i+1),\ldots,X(N) \right\rbrace$ and
$X_{j}=\left\lbrace X(j),X(j+1),\ldots,X(N-\Delta t) \right\rbrace$,
separated by a ``lag'' $\Delta t = i-j$, $j<i$ and $i,j \in \mathbb{N}$.
We then normalize the subsequences by defining
\begin{equation}
Y_i=\frac{X_i - \mu_{X_i}}{\sigma _{X_i}}.
\end{equation}
Finally, cross-correlation matrix $C$\cite{plerou} is defined as
\begin{equation}
C_{i,j}=\left< Y_i Y_j \right>,
\end{equation}
where $\mu_{X_i}$ and $\sigma_{X_{i}}$ are sample mean score and standard deviation of the subsequence $X_i$ respectively, and $\left<\ldots\right>$ denotes a time average over the period studied. This is the correlation coefficient between the subsequences $Y_i$ and $Y_j$ and help us understand the correlation between runs scored by a given team at different time intervals. The matrix elements lie between -1 and 1 and the matrices so constructed are Hermitian.
Now, we construct multiple matrices on a single time series, giving rise to an ensemble of matrices. Letting $C^{(1)}=C$ (as constructed above), we construct another matrix $C^{(2)}$ by removing first $N$ elements of the time series considered, and constructing the cross-correlation matrix with the method described above. We continue this process of construction till the length of the truncated time series becomes less than $N$.
\section{Random Matrix Model}
\label{sec:rmt}
Unitary Ensemble of random matrices is invariant under unitary transformation $H\rightarrow W^{T}HW$ where the ensemble is defined in the space $T_{2G}$ of Hermitian matrices and $W$ is any unitary matrix. Also, the various linearly independent elements of $H$, must be statistically independent\cite{mehta}.
Joint probability distribution function of eigenvalues $\{ x_1,x_2,...,x_N \}$ is given by,
\begin{equation}
\label{jpdf}
P_{N\beta}(x_1,..,x_N)=C_{N \beta}. \, \prod_{j<k}x_{j}^{N\beta a}\exp\left(-N\beta b \sum_{1}^{N}x_j\right)\left | x_j-x_k \right |^\beta,
\end{equation}
where $\beta=1,2$ and $4$ correspond to orthogonal (OE), unitary (UE) and symplectic (SE) ensembles respectively and $C_{N\beta}$ is the normalization constant \cite{mehta}.
We define $n$-point correlation function by
\begin{equation}
R_{n}^{(\beta)}(x_1,..,x_n)=\frac{N!}{(N-n)!}\int dx_{n+1}\ldots\int dx_{N}P_{N\beta}(x_1,..,x_N).
\end{equation}
This gives a hierarchy of equations \cite{ghoshbook} given by
\begin{eqnarray}
\label{hier}
\beta R_{1}(x)\int\frac{R_{1}(y)}{(x-y)}dy+\frac{w\prime(x)}{w(x)}R_{1}(x)=0,
\end{eqnarray}
where
\begin{equation}
w(x)=x^{N\beta a}\exp[{-N\beta b x}].
\end{equation}
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth]{testld.pdf}
\caption{Level Density for averaged Test data with $k=5$ . The solid line refers to Marchenko-Pastur result (\ref{den(th)}) and the dashed line refers to the finite $N$ result, obtained by the polynomial method described in Section \ref{sec:rmt}. Here, $a=2.75$, $b=3.535$, $X_{-}=0.339601$ and $X_{+}=1.78204$ in (\ref{den(th)}). The largest eigenvalue is circled towards the end of the spectrum.}\label{ld}
\end{figure}
We solve the integral equation using the resolvent
\begin{equation}
G(z)=\int\frac{R_{1}(y)}{z-y}dy,
\end{equation}
which satisfies
\begin{equation}
G(x+i0)=\int\frac{R_{1}(y)}{x-y}dy-i\pi R_{1}(x).
\end{equation}
Multiplying Eq.(\ref{hier}) by $x/(z-x)$ and integrating over $x$ we get after some elementary calculation
\begin{eqnarray}
\label{den(th)}
\rho(x)\equiv \frac{R_{1}(x)}{N} &=& \frac{b}{\pi x}\sqrt{(x-X_{-})(X_{+}-x)};\hspace{1cm} X_{-}<x<X_{+},\\
\nonumber
&=& 0, \hspace{2cm}\textrm{otherwise.}
\end{eqnarray}
where
\begin{equation}
X_{\pm}=\frac{a+1}{b}\pm \frac{\sqrt{2a+1}}{b}.
\end{equation}
For finite $N$, following Dyson-Mehta method \cite{mehta}, we use
\begin{equation}
\label{den(fin)}
\rho(x)=\frac{1}{N}\sum_{j=0}^{N-1}\phi^{2}_{j}(x),\hspace{1cm}\phi_{j}(x)=\sqrt{w(x)}P_{j}(x),
\end{equation}
where $P_{j}(x)$ are orthonormal polynomials which satisfy
\begin{equation}
\int_{X_{-}}^{X_{+}}P_{j}(x)P_{k}(x)w(x)dx=\delta_{j,k},\hspace{1cm}j,k\in \mathbb N.
\end{equation}
To understand the correlation in the system, we first need to unfold the eigenvalues to eliminate global effect over fluctuation. The sequence of scores for each country is unfolded independently. The corresponding unfolded eigenvalues $y_k$ are given by\cite{verbaarschot},
\begin{equation}\label{unf}
y_k=\int_{X_-}^{x_k}\rho(x)\mathrm{d}x,
\end{equation}
and the mean spacing of the unfolded eigenvalues $y_k$ is 1. We perform unfolding using both (i) the theoretical level density (\ref{den(th)}) and (ii) numerical integration of the data and obtain the best-fit over the integrated density.
\begin{figure}[H]
\centering
\begin{subfigure}[b]{0.47\textwidth}
\includegraphics[width=\textwidth]{testsd.pdf}
\caption{Theoretical unfolding}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.47\textwidth}
\includegraphics[width=\textwidth]{testnumsd.pdf}
\caption{Numerical unfolding}
\end{subfigure}
\caption{Nearest neighbour spacing distribution for mixed and averaged Test data obtained via numerical and theoretical unfolding (using Marchenko-Pastur result (\ref{den(th)}) with $a=2.75$, $b=3.535$, $X_{-}=0.339601$ and $X_{+}=1.78204$). The solid line refers to spacing distribution of experimental data with $k=5$, the dotted line refers to GUE result and the dashed line refers to the Poisson case.}\label{sd}
\end{figure}
For $\left \{S_i | S_i=y_{i+1}-y_i \right \}$, $s_i=S_i/D$ where $y_i$ denote successive unfolded levels and $D$ is the average spacing, the level spacing distribution $p(s)ds$ is defined as the probability of finding an $s_i$ between $s$ and $s+ds$ \cite{mehta}.
For no correlations between the levels, we have the Poisson distribution
\begin{equation} \label{poisson}
p(s)=\exp[-s],
\end{equation}
while for GUE, we get the Wigner's surmise
\begin{equation} \label{guespacing}
p(s)=\frac{32 s^2}{\pi^2} \exp \left[-\frac{4}{\pi} s^2\right].
\end{equation}
We consider $8$ sequences of eigenvalues for Test data obtained by ensemble averaging over each country. We unfold these sequences individually and average over the $8$ sequences of spacings. The result shows remarkable agreement with GUE predictions (Fig. \ref{sd}). Upon mixing of the eigenvalues of the Test data we observe Poisson distribution (Fig. \ref{sd}).
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth]{testnv.pdf}
\caption{Number variance for the averaged and mixed Test data obtained via numerically unfolding over the spectra. The solid line refers to GUE result (\ref{numbervariance}) and the dashed line refers to Poisson case. The figure plots three cases: (i) Averaged Test data with $k=5$ extreme diagonals removed (ii) Mixed Test data with $k=5$ extreme diagonals removed and (iii) Mixed Test data for the entire spectrum when no diagonals are removed from the matrices.}\label{nv}
\end{figure}
Another statistic considered is the linear statistic or the number variance. For $n_k$ unfolded levels in consecutive sequences of length $n$, we define the moments \cite{verbaarschot},
\begin{equation}
M_p(n)=\frac{1}{N}\sum_{k=1}^{N}n_k^p,
\end{equation}
where $N$ is the number of sequences considered, each of length $n$ covering the entire spectrum. Then the number variance $\Sigma^2(n)$ is given by
\begin{equation}
\Sigma^2(n)=M_2(n)-n^2.
\end{equation}
For GUE, number variance is given by \cite{mehta},
\begin{equation} \label{numbervariance}
\Sigma^2(n)=\frac{1}{\pi^2}\left(\ln(2\pi n)+ \gamma +1 \right),
\end{equation}
where $\gamma$ is the well known Euler constant. Number variance is known to be very sensitive for larger values of $n$ on account of spectral rigidity. Fig \ref{nv} shows a very good agreement of the experimental number variance result of the Test data to that of the GUE result for cases when $k=0$ and $k=5$ extreme diagonals are removed from both ends of the matrices involved in calculation.
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth]{testnumdel.pdf}
\caption{The Dyson-Mehta least squares statistic for the averaged and mixed Test data with $k=5$ extreme diagonals removed from both ends of the matrices involved in calculation obtained via numerically unfolding the spectrum . The solid line refers to the GUE result (\ref{guedel}) and the dashed line refers to the result for the Poisson case (\ref{poisdel}).}\label{del}
\end{figure}
The other statistics considered is the Dyson-Mehta least square statistic or the spectral rigidity statistic\cite{mehta} which measures the long-range correlations and irregularity in the level series in the system by calculating the least square deviation of the unfolding function from a straight line $y=a E + b$ over different ranges $L$. The statistic $\Delta(L)$ for $L=L_2-L_1$ is given by the integral,
\begin{equation}\label{delta}
\Delta (L)=\frac{1}{L}\int_{L_1}^{L_2}(N(E)-aE-b)^2dE,
\end{equation}
where $N(E)$ is the unfolding function. The mean value of the statistic for the GUE case is given by \cite{mehta},
\begin{equation}\label{guedel}
\left \langle \Delta \right \rangle = \frac{1}{2 \pi^2} (\ln (2 \pi L) + \gamma - 5/4).
\end{equation}
For Poisson case, the least square statistics is given by
\begin{equation}\label{poisdel}
\left \langle \Delta \right \rangle = \frac{s}{15}.
\end{equation}
\section{Analysis}
\label{sec:analysis}
The problem that one encounters in analysis of such data are
1. The finite length of time series available introduces measurement noise.
2. A bigger time series will introduce more contributions from non-random events which will affect the ``universality'' result
but will provide information about the correlations among different time series.
We study the RMT model defined by Eq.(\ref{jpdf}). We obtain MP distribution (\ref{den(th)}) for the level density as $N\rightarrow \infty$. We observe that
the level density of eigenvalues of $C$ in the bulk shows a remarkable agreement with the MP distribution for all Test, ODI and IPL data. However, some large eigenvalues exist outside the bounds $[X_-, X_+]$. To ensure that these eigenvalues are not due to finite $N$ effect, we obtain level-density for finite $N$.
For this, we develop the corresponding orthonormal polynomials using Gram-Schmidt method and using Eq.(\ref{den(fin)}) for $N=10$ obtain the level density and
compare that with ensembles of cricketing data. (Fig. \ref{ld}). We observe that the large eigenvalues still remain outside the bounds.\\
The next question is if these large eigenvalues non random, in which case our RMT model will not only show disagreement with the level density but also ``spoil'' the RMT predictions. To verify this, we make RMT analysis over the entire spectrum and compare its results with the truncated sparse matrix, which removes the large eigenvalues. KS test shows that our level density and spacing distribution analysis is considerably hampered by the presence of these large eigenvalues, thereby conforming the existence of non random long range correlations.
To track the level of non-randomness, we remove $k$, ($k<<N$) extreme bands out of $2N-1$ bands of the $N\times N$ matrices $C$ and perform the KS test. We perform numerical unfolding over the eigenvalues where the integrated density of states are fitted with a polynomial. For ODI, where $N=90$ we obtain a p-value of $0.640311$ for the full spectrum and a p-value of $0.9025$ for spectrum of the matrix with $k=15$. For the Test data (again $N=90$), we obtain a p-value of $0.49$ for unfolding the full spectrum and a p-value of $0.855394$ when unfolding the spectrum of the matrix with $k=5$.Thus
by creating a sparse matrix, which removes the large eigenvalues, our results converge to RMT predictions by $\approx30 \%$. This proves the existence of non randomness in the system introduced by elements $C_{ij}$, with $|i-j|\approx N$.
We observe that as we increase the value of k, the largest eigenvalue in the spectrum gradually reduces and converges towards the bound imposed by the RMT model as shown in Fig. \ref{del}.
We then do theoretical unfolding on the new data and observe similar agreement on KS test.
For the number variance calculation, we first unfold the spectrum and calculate number variance both within bounds and over the entire spectrum. The former gives a good agreement with GUE while
the latter, as expected, shows deviation, pointing towards the presence of large eigenvalues which are due to correlation coefficients between runs scored over a long time gap.
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth]{maxeigs.pdf}
\caption{Largest eigenvalue in the averaged spectrum vs. $k$ for the Test, ODI and IPL data}\label{maxeigs}
\end{figure}
Finally, theoretical unfolding is performed over the spectra using Eqs.(\ref{unf}) and (\ref{den(th)}). The MP distribution parameters for the Test data ($k=5$) are given in Fig. \ref{sd}. For the ODI data ($k=15$), we have $a=2.475$, $b=3.15$, $X_{-}=0.328806$ and $X_{+}=1.87754$ as the optimal parameters for Eq. \ref{den(th)}.
Lastly, we mix levels obtained from the time series of all teams and observe a Poisson distribution (Fig. \ref{sd}).
\section{Conclusion}
\label{sec:conclusion}
From the statistical analysis of test, ODI and IPL data, we conclude that the eigenvalues of cross-correlation matrices display GUE universality. The Test and ODI data are the only sets of data we found to be large enough to give results of the nature produced in this paper. Thus even though the T20 results of the BCCI IPL matches are also considered the small $N$ effect is visible in our GUE results.
We observe Wigner surmise when we study the ensembles of different countries (in tests and ODI s)/teams (IPL) separately. However, upon mixing the data of all countries, we get Poisson statistics, both for
spacing and number variance. Here we may recall that while studying nuclear data statistics \cite{pandey}, eigenvalues with same spin show GOE but mixed data gives Poisson.
To ensure that the large eigenvalue which lies outside the bounds are not due to the size of the matrices, we obtain the level density using the polynomial method for finite $N$. We observe that the
large eigenvalues were still lying well outside the bounds. Also while numerical unfolding over the whole spectra (and not under the MP bound), we observe that the number variance show departure from GUE. However, by removing the long-range interaction terms from $C$, we observe a better agreement with RMT predictions, both for level density as well as spacing distribution and number variance.
We believe that eigenvalues close to the upper bound still maintains randomness and any deviation is due to temporal effect. For example, scores getting affected due to a sudden burst of performance of an individual player over a tournament or bilateral series.
However, the larger eigenvalues are probably caused due to more stable, non random influence like the effect on cricketing performance due to the advent of new technology. However this needs a thorough investigation. We wish to come back to this in a later publication.
\section*{Acknowledgement}
We acknowledge ESPN Cricinfo for providing us with the cricket data.
\bibliographystyle{unsrt}
|
1,314,259,995,025 | arxiv | \section*{Abstract}
R2BEAT ("R 'to' Bethel Extended Allocation for Two-stage sampling") is an R package for the allocation of a sample.
Besides other software and packages dealing with the allocation problems, its peculiarity lies in facing properly allocation problems for complex sampling designs with multi-domain and multi-purpose aims.
This is common in many official and non-official statistical surveys, therefore R2BEAT could become an essential tool for planning a sample survey.
The package implements the \cite{tschprow1923optimal} - \cite{neyman1934optimal} method for the optimal allocation of units in stratified sampling, extending it to the multivariate (accordingly to \citeauthor{bethel1989sample}'s proposal (\citeyear{bethel1989sample})), multi-domain and to the complex sampling designs case \citep{falorsi1998principi}.
The functions implemented in R2BEAT allow the use of different workflows, depending on the available information on one or more interest variables.
The package covers all the phases, from the optimization of the sample to the selection of the Primary and Secondary Stage Units.
Furthermore, it provides several outputs for evaluating the allocation results.\\
\noindent
\textbf{Keywords} sample survey, multistage, multipurpose, optimal allocation, sample selection.
\section{Introduction}
\label{sec:intro}
National Statistical Institutes (NSIs) and other official statistics institutions usually stratify the target population into homogeneous groups, defined by variables.
Survey data usually benefits from stratification, and sampling error decreases.
However, from a logistic point of view, the stratified sample could be geographically widespread, entailing such a cost increase in the data collection process.
For solving this issue and to avoid sample dispersing, the two-stage stratified sampling design is often used for planning surveys, mainly the social ones carried out in households.
This sampling design enables to control of the number of Primary Stage Units (PSUs) selected in the survey.
For instance, the municipalities or the enumeration areas in which the selected households (Secondary Stage Units, SSUs) belong.
Controlling the municipalities number remarkable reduces data collection costs, mainly for face-to-face interviews, and avoids logistic problems given by a geographically scattered sample.
Nevertheless, allocating a two-stage sample among strata can be tricky: usually, households surveys are defined as multipurpose, since they estimate many target variables; moreover, produced estimates are provided for many estimation domains, such as national level, geographical areas, municipality types, etc.
In this context, the allocation of the whole sample size becomes a multivariate and multi-domain problem.
It is important to point out that the total size is defined according to three types of constraints: estimates precision, budget, logistic ones, or more likely by a combination of the three.
Once indicatively defined the whole sample size, intended as the number of SSUs to select and interview, has to be allocated among the strata in which the PSUs population is partitioned.
Different methods can be used for allocating the sampling units among the strata according to the available information.
The easiest methods are uniform and proportional allocation.
If, however, the values and the variances of some survey target variables are known in each design stratum, from auxiliary sources such as registers or previous survey occasions, then an optimal allocation can be computed.
The idea behind the optimal allocation is that strata with larger sizes and larger variability recorded on the target variables need a larger sample size to provide better estimates. Several publications and packages focus on this aspect.
{R2BEAT} extends the methodology implemented in Istat's open-source software called \textbf{Mauss-R} \citep{maussr}, which stands for ``Multivariate Allocation of Units in Sampling Surveys", widely used for designing one-stage sample surveys and also in the {SamplingStrata} \citep{barcaroli2014samplingstrata}.
Furthermore, it faces the optimal allocation definition in the two-stage sampling design case.
Its name stands for {R} "to" Bethel Extended Allocation for Two-stage. The package represents a very specific tool for designing, allocating and selecting the most complex and challenging sample in the context of survey designs.
Furthermore, {R2BEAT} fills a gap, within the range of statistical software concerning sample size allocation. In fact, several {R} packages are available for allocating a stratified sample, such as {surveyplanning} \citep{surveyplanning}, {PracTools} \citep{practools}, {optimStrat} \citep{optimstrat}, and the already mentioned {Mauss-R} and {SamplingStrata}, but none of these can compute the optimal allocation among strata in such a complex sampling design context, considering both multivariate and multi-domain case.
In the following paragraph the methodological aspects, underlying the package and its functions, will be presented in detail: the optimal allocation of the sample and its selection will be illustrated.
In the third paragraph will be shown how to prepare, organize and check the input data needed by the package for allocating the whole sample size among strata and to finally select the units.
A case study on a synthetic dataset will be used as an example to test the package functions. Finally, the results will be discussed in the concluding remarks.
\section{Methodological aspects}
\label{sec:method}
Sample surveys carried out by National Statistical Institutes and by other institutions have multi-domains and multi-purpose objectives, so they have to provide accurate estimates for different parameters and different domains (i.e. geographical areas such as national, regional, and more).
However, usually, the survey has budgetary constraints, then, they must be carefully planned to provide high-quality estimates for parameters of interest.
A seminal work in this perspective is due to \citet{kish1965survey}.
While a broad theoretical framework for optimizing surveys by maximizing data quality within budgetary constraints is provided by \citet{biemer2003introduction} and \citet{biemer2010total}.
When designing a multipurpose survey several choices need to be made.
They usually are not trivial, because identifying the best solution for every purpose (i.e. every interest variable for each domain of interest) is challenging.
Usually ``just`` a practical optimum, not the best solution, can be pursued.
The research for the best solution - maximizing data quality within budgetary constraints - may arise conflicts in several
areas \citep{kish1988multipurpose}.
Among these areas, sample size and the relation of biases to sampling errors are considered the most important because their influence ripples throughout the overall survey.
This view justifies the care and attention always given in the literature to the optimal sample design \citep{cochran1977sampling,cicchitelli1992campionamento,conti2012campionamento,tille2020sampling}.
\citet{gonzalez2010optimal} present an interesting overview of the approaches for defining optimal sampling strategies.
The optimization problem of a sample design is usually dealt with the estimation of a mean (or equivalently of a total) in stratified sampling designs with a fixed sample size.
The problem of the optimization of stratified sample design can be classified depending on whether stratification is given or also the stratification has to be optimized, before or at the same time of the allocation.
The {R2BEAT} package solves the optimization problem when the stratification is given and the optimization must be sought in the allocation of sampling units.
Therefore, in the following, we focus just on this situation.
For more details on the optimization problems when also the stratification has to be optimized see, e.g., \citet{ballin2013joint} and references therein.
\subsection{Optimal allocation}
\label{sec:samplealloc}
Let us consider a population $U$ of size $N$ ($k=1, \dots, N$) partitioned in $H$ subgroups, $U_h$ $(h = 1, \dots, H)$, called strata.
Hence, each stratum contains $N_h$ elements, where $N_h$ is assumed to be known and such as $\sum_{h=1}^{H} N_h = N$.
The strata can be defined in different ways on the basis of one or more qualitative variables known for all the units in the population.
Then, we assume, at least for the moment, to be interested in investigating the mean of just one $y$ variable in the population $U$,
\begin{eqnarray}
\label{eq:meanPOP}
\mu_{y} = \frac{\sum_{k \in \ U} y_k}{N}
\end{eqnarray}
where $y_k$ is the value of the $y$ variable observed on the $k$-th unit in the population $U$.
The $y$ variable could be a quantitative variable or dichotomous, that is $y \in \left\lbrace 0, 1 \right\rbrace$.
Please note that, even when $y$ is a dichotomous variable, expression~\eqref{eq:meanPOP} holds and $\mu_{y}$ is equal to the proportion of units in the population for which $y=1$.
Furthermore, assume we want to estimate $\mu_{y}$ through a probabilistic sample $s$ of size $n$ with the estimator
\begin{eqnarray}
\label{eq:HT}
\hat{\bar{Y}} = \frac{\hat{Y}_{HT}}{N} = \frac{\sum_{k \in s} y_k \ d_k}{N}
\end{eqnarray}
where $\hat{Y}_{HT}$ is the Horvitz-Thompson estimator for the total \citep{horvitz1952generalization} in which $d_k$ is the design weight usually equal to the inverse of the first order inclusion probability.
The sample size of a survey, $n$, is usually exogenous information, dictated by budget and, sometimes, by logistic constraints associated to the unit $k$ in the sample.
Then, in practice, the problem comes down to the allocation of the $n$ units in the $H$ strata, such as $\sum_{h=1}^H n_h=n$.
Therefore, let us define
\begin{eqnarray}
\label{eq:meanPOPstr}
\mu_{hy} = \frac{\sum_{U} y_k \ \boldsymbol{1}_h}{N_h}
\end{eqnarray}
the mean of the $y$ in each stratum where $\boldsymbol{1}_h$ is the membership indicator for the unit $k$ in the stratum $h$.
In the same way, expressions~\eqref{eq:HT} can be easily adapted for estimating $\mu_{hy}$, that is
\begin{eqnarray}
\label{eq:HTh}
\hat{\bar{Y}}_h = \frac{\hat{Y}_{HT,h}}{N_h} = \frac{\sum_{k \in s_h} y_k \ d_k}{N_h},
\end{eqnarray}
where $s_h$ is the sample in the stratum $h$.
The sampling variance estimator of $\hat{\bar{Y}}_h$ is given by
\begin{eqnarray}
\label{eq:varHTh}
\widehat{\text{var}} \left(\hat{\bar{Y}}_h \right) =
\frac{1}{n_h - 1} \left(\frac{1}{n_h} - \frac{1}{N_h} \right)
\sum_{k \in h} (y_{hk} - \bar{y}_h)^2,
\end{eqnarray}
where $\bar{y}_h$ is the sample mean of the variable $y$ in the stratum $h$.
In this perspective, the mean of $y$ in \eqref{eq:meanPOP} can be written also as
\begin{eqnarray*}
\mu_{y}= \sum_{h=1}^H \frac{N_h}{N} \mu_{hy}
\end{eqnarray*}
and, consequently, $\hat{\bar{Y}}$ in \eqref{eq:HT} as
\begin{eqnarray*}
\hat{\bar{Y}} = \sum_{h=1}^H \frac{N_h}{N} \hat{\bar{Y}}_h.
\end{eqnarray*}
Therefore, the sampling variance estimator for $\hat{\bar{Y}}$ is
\begin{eqnarray*}
\widehat{\text{var}} \left( \hat{\bar{Y}} \right) = \sum_{h=1}^H \left(\frac{N_h}{N} \right)^2 \widehat{\text{var}} \left(\hat{\bar{Y}}_h \right).
\end{eqnarray*}
When there is no information on $y$, the sample size to be allocated to each stratum, $n_h$, can be assigned by performing uniform or proportional allocation.
Uniform allocation assigns an equal number of sampling units to each stratum, that is
\begin{eqnarray*}
n_h^{UNIF}=\frac{n}{L}.
\end{eqnarray*}
More often, we want the sample size assigned to strata in the sample to be proportional to the sizes of the strata in the population, that is
\begin{eqnarray*}
n_h^{PROP}=n \ \frac{N_h}{N}
\end{eqnarray*}
where $N_h/N$ is the weight of the stratum in the population with $\sum_{h=1}^L N_h/N=1$.
If the size is the same for all strata ($N_1=\dots=N_h=\dots=N_L=N/L$), $n_h^{PROP}$ comes down to $n_h^{UNIF}$.
When there is information in the population strata on $y$ and in particular on its variance, $S_{yh}^2$, a more favourable allocation can be performed.
Alternatively, it is possible to consider also a proxy variable highly correlated with $y$.
In this case, \cite{tschprow1923optimal} demonstrated that the optimal allocation can be obtained by giving
\begin{eqnarray*}
n_h^{OPT}=n \frac{\frac{N_h}{N} \ \sqrt{S_{yh}^2}}{\sum_{h=1}^L \frac{N_h}{N} \ \sqrt{S_{yh}^2}} \end{eqnarray*}
However, this result is better known as \citeauthor{neyman1934optimal} allocation by the namesake author that in \citeyear{neyman1934optimal} published the same result.
The rationale behind the optimal allocation is that strata with more weight and in which $y$ has much more variability need much more observations for reaching better estimates.
If the variance is the same in all the strata ($S_1^2=\dots=S_h^2=\dots=S_L^2$), $n_h^{OPT}$ comes down to $n_h^{PROP}$.
As evidence, the computation of the population variance is a crucial point in the optimal allocation.
A distinction between the types of variables and the sources from which they can be obtained is needed.
When $y$ is a dichotomous variable available from a population register, its population variance can be computed as
\begin{eqnarray}
\label{S2dicho}
S_{yh}^2 = p_h \times (1 - p_h)
\end{eqnarray}
where $p_h$ is the proportion of units with $y=1$ in the population strata.
In the case of a quantitative variable, $S_{yh}^2$ is equal to
\begin{eqnarray*}
\label{S2quant_register}
S_{yh}^2 = \frac{\sum_{k \in U_h} \left( y_k - \mu_{yh} \right)^2}{N_h}.
\end{eqnarray*}
When there is no population register, information on the variability can be obtained from a sample survey or a pilot survey previously carried out.
Let us assume to have collected the $y$ variable, or at least its proxy variable, on a sample $s^*$.
Then, (\ref{S2dicho}) can be computed just by replacing $p_h$ with
\begin{eqnarray}
\label{eq:estp}
\hat{p}_h = \frac{\sum_{k \in s_h^*} y_k \ w_k}{\sum_{k \in s_h^*} w_k},
\end{eqnarray}
that is the related estimate for each stratum obtained from the sample $s^*$.
In \eqref{eq:estp} $w_k$ is the sampling weight associated with the unit $k$ in the sample $s^*$.
Instead, when $y$ is a quantitative variable,
\begin{eqnarray*}
\label{S2quant_survey}
S_{yh}^2 = \hat{M}_h^2 - \hat{\bar{Y}}_h^{2}
\end{eqnarray*}
where
\begin{eqnarray*}
\hat{M}_h^2 = \frac{\sum_{k \in s_h^*} y_{k}^2 \ w_k}{N} \hspace{0.5cm} \text{and} \hspace{0.5cm}
\hat{\bar{Y}}_i = \frac{ \sum_{k \in s_h^*} y_{k} \ w_k}{N}
\end{eqnarray*}
are the quadratic mean and the arithmetic mean estimated on the sample $s^*$ in the $h$-th stratum, respectively.
Sometimes, collecting data on units belonging to different strata can have different costs for the difficulties in reaching them (e.g. strata are altitude zone) or the need of using different data collection modes.
Therefore, it is advisable to define the allocation by taking into account also the unit cost and the budget constraints.
The global cost of the survey can be defined as
\begin{eqnarray*}
C=c_0 + \sum_{h=1}^L n_h \ c_h,
\end{eqnarray*}
where $c_0$ is the fixed cost (not dependent on the sample size) and $c_h$ is the unit cost for collecting data on one unit belonging to stratum $h$.
Then, the optimal allocation under budget constraints is given by
\begin{eqnarray*}
n_h^{\bar{OPT}}=(C-c_0) \frac{\frac{\frac{N_h}{N} \ \sqrt{S_{yh}^2}}{\sqrt{c_h}}}{\sum_{h=1}^L \frac{N_h}{N} \ \sqrt{S_{yh}^2} \sqrt{c_h}}.
\end{eqnarray*}
If $c_1=\dots=c_h=\dots=c_L=1$ and $c_0=0$, the global cost amounts to the sample size ($C=n$).
The optimal allocation for just one $y$ variable is of little practical use unless the various variables under study are highly correlated.
This is because an allocation that is optimal for one characteristic is generally far from being optimal for others.
Therefore, several works have been devoted to solving the problem when more than one variable of interest has to be measured on each sampled unit.
All the contributions can be classified into two main approaches: the ``average variance" and convex programming.
The methods under the ``average variance" approach consist of defining a weight for each variable to consider, computing a weighted average of the stratum variance and finding the optimal allocation on the ``average variance" which results.
They are computationally simple, intuitive and can be solved under fixed cost assumption.
However, the choice of the weights is completely arbitrary and the optimal properties are not clear \citep[see, e.g.,][for more details]{dalenius1953multi, yates1960sampling,folks1965optimum, hartley1965multiple,kish1976optima}.
Instead, the other approach includes methods that use convex programming to find the minimum cost allocation when the variances of all the sampling variables to consider satisfy fixed constraints.
The obtained allocation is actually optimal, but sometimes it can exceed the budgetary constraints \citep[see, e.g.,][for more details]{dalenius1957sampling,yates1960sampling,kokan1963optimum,hartley1965multiple,kokan1967optimum,chatterjee1968multivariate,chatterjee1972study,huddleston1970optimal,bethel1985optimum,chromy1987design,falorsi1998principi,stokes2004using,choudhry2012sample,kozak2007modern,kozak2008stratified}.
The most important method in the convex programming approach is the Bethel algorithm \citep{bethel1989sample} which extends the Neyman allocation to the multivariate case.
In particular, when we are interested in investigating the mean of more than one $y$ variable (quantitative or dichotomous), namely $y_1, \dots, y_i, \dots, y_J$, the optimal allocation problem reduces, in practice, to a minimum optimization problem of a convex function under a set of linear constraints
\begin{eqnarray}
\label{eq:optprob}
\left\lbrace
\begin{array}{l}
C = \text{min} \\
\widehat{CV} \left(\hat{\bar{Y}}_{i,h}\right) \leq
\delta \left(\hat{\bar{Y}}_{i,h} \right)
\hspace{0.5cm} \begin{array}{c}
i = 1, \dots, J \\
h = 1, \dots, L
\end{array}
\end{array}
\right.
\end{eqnarray}
where $C$ is the global cost of the survey and $\hat{CV} \left(\hat{\bar{Y}}_{i,h}\right)$ is the estimate of the relative error.
The estimate of the relative error,
\begin{eqnarray}
\label{eq:CVHTh}
\widehat{CV} \left(\hat{\bar{Y}}_{i,h} \right) = \frac{\sqrt{\widehat{\text{var}} \left(\hat{\bar{Y}}_{i,h} \right)}}{\hat{\bar{Y}}_{i,h}},
\end{eqnarray}
is the ratio between the estimate of the sampling variance for the mean estimator of $y_i$ variable ($i=1, \dots, J$) in the stratum $h$ given by expression~\eqref{eq:varHTh} and the related estimate.
In this case, $\widehat{CV} \left(\hat{\bar{Y}}_{i,h}\right)$ is called
expected errors and it must be less than or equal to the precision constraints defined by the user or by regulation, $\delta \left(\hat{\bar{Y}}_{i,h} \right)$.
\cite{bethel1989sample} demonstrates that the solution to this optimization problem exists and can be obtained through an algorithm that applies the Lagrangian multipliers method.
The solution is a continuous solution, then it must be rounded to provide an integer stratum sample size.
The rounding clearly causes some deviations from the solution that, however, do not affect its optimality \citep{cochran1977sampling}.
The Bethel algorithm is very similar to the Chromy algorithm \citep{chromy1987design}.
However, it is preferable because, even if the Chromy algorithm is simpler, there is no proof that it converges if a solution exists.
The same framework works to deal also with the multi-domain problem.
Usually, estimates of a survey are disseminated for
the whole population and sub-domains, for instance for geographical areas, but not only.
Then, it is useful to define the optimal allocation also taking into account these outcomes of the survey.
Sub-domain estimation is actually a long-established theory \citep{SSW:03}.
Expressions~\eqref{eq:meanPOP} can be easily adapted just by introducing the sub-domain membership indicator variable, $\boldsymbol{1}_{k,d}$, which is equal to 1 for all the unit $k$ in the domain $d$ and 0 otherwise, that is
\begin{eqnarray*}
\label{eq:meanPOP_dom}
\mu_y^d = \frac{\sum_{U_d} y_k \ \boldsymbol{1}_{k,d}}{N_d}
\end{eqnarray*}
where $N_d$ is the population size in the domain $d$ ($d=1, \dots, D$).
It is important to point out, that domains must be an aggregation of strata and they do not have to cut the strata.
Then, it is sufficient to consider the domain estimates in the minimum optimization problem in \eqref{eq:optprob} and use the Bethel's algorithm for deriving the multivariate allocation in the multi-domain case.
However, in official statistics, especially for household surveys, two-stage sampling designs are usually adopted.
Two-stage sampling is based on a double sampling procedure: one on the primary stage units (PSUs) and another on the second stage units (SSUs).
For instance, in the household survey, the PSUs are the municipalities that are firstly selected.
Then, in each selected municipality, a sample of households - the $SSU$ - can be selected.
Two-stage sampling permits more complex sampling strategies and, moreover, it helps in the organization and cost reduction of data collection, because it reduces the interviewer's travels.
However, this economic saving is paid off with a loss of efficiency of the estimates.
In fact, each additional stage of selection usually entails an increase of the sampling variance of the mean estimator.
This increase can be assessed by the design effect ($deff$) that measures how much the sampling variance of $\hat{\bar{Y}}_{i}$, under the adopted sampling design ($des$), is inflated with respect to a simple random sample ($srs$), with the same sample size.
An estimate of the design effect, under can be given by the expression:
\begin{eqnarray*}
\label{eq:deff}
deff \left( \hat{\bar{Y}}_{i} \right) & = & \frac{\widehat{\text{var}} \left( \hat{\bar{Y}}_{i}\right)_{des}}{\widehat{\text{var}} \left(\hat{\bar{Y}}_{i}\right)_{srs}}
\end{eqnarray*}
While a rough approximation of the $deff$ can be obtained when the clusters have the same sample size and the same inclusion probability \citep{cicchitelli1992campionamento},
\begin{eqnarray}
\label{eq:deff1}
deff \left( \hat{\bar{Y}}_{i} \right) & = & 1+\rho_i \ (b-1)
\end{eqnarray}
where $b$ is the average cluster (i.e. PSU) size in terms of the final sampling units and $\rho_i$ is the intra-class correlation within the cluster (PSU) for the variable $y_i$ $(i=1, \dots, J)$.
The intra-class correlation provides a measure of data clustering in PSUs and SSUs.
In general, if $\rho_i$ is close to 1, the clustering is high and it is convenient to collect only a few units in the cluster.
On the contrary, if $\rho_i$ is close to 0, the collection of units from the same cluster does not affect the efficiency of the estimates.
Also for computing $\rho_i$, we can distinguish whether a population register in which the $y_i$ variables ($i=1,\dots,J$), or at least their proxies, are available or not.
In the former case, a good approximation is given by the expression
\begin{eqnarray}
\label{eq:rho_pop}
\rho_i=1 - \frac{D_{w_i}}{D_{y_i}}
\end{eqnarray}
where
\begin{eqnarray*}
D_{w_i} & = & \sum_{\ell=1}^L \sum_{k=1}^{N_\ell} \left( y_{i,k} - \mu_{y_{i,\ell}} \right)^2
\end{eqnarray*}
and
\begin{eqnarray*}
D_{y_i} & = &\sum_{k \in U} \left(y_{i,k} - \mu_{y_i}\right)^2
\end{eqnarray*}
are the deviance within clusters and the global deviance of the $y_i$ variable, respectively.
Remember that $D_{y_i}= D_{w_i} + D_{b_i}$, where
\begin{eqnarray*}
D_{b_i} = \sum_{\ell=1}^L N_\ell \left(\mu_{yi,\ell} - \mu \right)^2,
\end{eqnarray*}
is the deviance between clusters.
Therefore, $0 \leq \rho_i \leq 1$.
Instead, $\rho_i$ can be estimated from a sample with the expression~\eqref{eq:deff1}
\begin{eqnarray}
\label{eq:rho_est}
\hat{\rho}_{i} = \frac{deff_i - 1}{b-1}.
\end{eqnarray}
Here we consider, directly, a more general expression for the estimate of the $deff$ in terms of the intra-class correlation coefficient.
This expression refers to a typical situation in household surveys where PSUs are assigned to Self-Representing (SR) strata, that is they are included for sure in the sample, or to Not-Self-Representing (NSR) strata, where they are selected by chance.
In practice, this assignment is usually performed by comparing the measure of the size of PSUs to the threshold:
\begin{equation}
\label{eq:threshold}
\lambda = \frac{\bar{m} \ \Delta}{f}
\end{equation}
where $\bar{m}$ is the minimum number of SSUs to be interviewed in each selected PSU, $f=n/N$ is the sampling fraction and $\Delta$ is the average dimension of the SSU in terms of elementary survey units.
Then, $\Delta$ must be set equal to 1 if, for the survey, the selection units are the same as the elementary units (that is, household-household or individuals-individuals), whereas it must be set equal to the average dimension of the households if the elementary units are individuals, while the selection units are the households.
PSUs with a measure of size exceeding the threshold are identified as SR, while the remaining PSUs are identified as NSR.
Then, the extended expression of $deff$ \citep[see among the others][]{rojas2016estrategias} is
\begin{equation}
\label{eq:deff_ext}
deff \left(\hat{\bar{Y}}_{i}\right)
= \frac{N_{SR}^2}{n_{SR}} \left[ 1 + \left( \rho_{i,SR} \ (b_{SR} - 1 \right) \right] +
\frac{N_{NSR}^2}{n_{NSR}} \left[ 1 + \left(\rho_{i,NSR} \ (b_{NSR} - 1 \right) \right]
\end{equation}
where, for $SR$ and $NSR$ strata,
\begin{itemize}
\item $N_{SR}$ and $N_{NSR}$ are the population sizes;
\item $n_{SR}$ and $n_{NSR}$ are the sample sizes;
\item $\rho_{i,SR}$ and $\rho_{i,NSR}$ the intra-class correlation coefficients for the variable $i$ ($i=1, \dots, J)$;
\item $b_{SR}$ and $b_{NSR}$ are the average PSU size in terms of the final sampling units.
\end{itemize}
Of course, if there are no SR strata the expression \eqref{eq:deff1} recurs.
The design effect is equal to 1 under the $srs$ design and increases for each additional stage of selection, due to the intra-class correlation coefficient which is, usually, positive.
The intra-class correlation coefficient for NSR can be derived with expression ~\eqref{eq:rho_pop} or \eqref{eq:rho_est} whether population register data are available or not.
While it is not necessary to compute the intra-class correlation coefficient for SR strata because just one PSU is selected and the intra-class correlation is 1 by definition.
Therefore, under a two-stage sample design for determining the optimal allocation, the number of PSUs and SSUs must be determined.
The solution has been proposed by \cite{falorsi1998principi} in a paper published in Italian and it is obtained with an iterative use of the Bethel algorithm.
In fact, at the first iteration, the Bethel algorithm is applied.
The optimal allocation for a stratified simple sampling design is obtained.
Then, this allocation is used to update the threshold in \eqref{eq:threshold} and the design effect in \eqref{eq:deff_ext}.
A new design effect is computed and used in turn to inflate the $S_h^2$ (or equivalently $\hat{S}_h^2$).
It is used as input in the next iteration in which the Bethel algorithm is used again.
The obtained allocation is used again to update the threshold and the design effect, and a new allocation is found.
The process is iterated until when the difference between two consecutive iterations is lower than a predefined threshold. \\ \\ \\
\begin{algorithm}[h]
\SetAlgoLined
\SetKwData{Left}{left}
\SetKwData{This}{this}\SetKwData{Up}{up}
\SetKwFunction{Union}{Union}
\SetKwFunction{FindCompress}{FindCompress}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\BlankLine
\Input{
\BlankLine
a. precision constraints in terms of CV\;
b. information on sampling strata (mean and stdev of target variables, N, ...)\;
c. information on previous design: deff, effst, rho \;
d. information on PSUs in sampling strata (measure of size)\;
e. minimum number of SSUs per PSU\;
}
\BlankLine
\Output{
\BlankLine
a. for each stratum: number of PSUs and SSUs to be selected\;
b. expected CVs for target estimates\;
c. item sensitivity of the solution\;
}
\BlankLine
\nlset{REM} First iteration\;\label{first}
1. input deff is used to inflate standard deviations of target variables in sampling strata\;
2. optimal allocation of SSUs in sampling strata is obtained by applying the Bethel algorithm as if it were a one-stage sampling design\;
3. the number of PSUs is determined on the basis of the minimum number of SSUs per PSU\;
4. the threshold for determination of self-representing PSUs is calculated\;
5. new deff is calculated and used to update the standard deviations of target variables in sampling strata\;
\nlset{REM} Next iterations\;\label{iterations}
\While{not convergence} {
1. optimal allocation of SSUs in sampling strata is obtained by applying the Bethel algorithm\;
2. the number of PSUs is determined on the basis of the minimum number of SSUs per PSU\;
3. the threshold for determination of self-representing PSUs is calculated\;
4. new deff is calculated an used to update standard deviations of target variables in sampling strata\;
5. the iteration stops if\\
\begin{itemize}
\item[a.] the difference between the sample sizes of two iterations is lower than 5 (default value) \textit{or}
\item[b.] the maximum of defts (square root of deffs) largest differences is lower than 0.06 (default value) \textit{or}
\item[c.] the number of iterations is higher than 20 (default value)\;
\end{itemize}
}
\caption{R2BEAT optimal allocation of PSUs and SSUs in sampling strata}
\label{algorithm}
\end{algorithm}
However, as pointed out by \cite{waters1987optimal}, different combinations yield the same variance and can satisfy the precision constraints, $\delta \left( \hat{\bar{Y}}_{i,h} \right)$.
The optimal solution strongly depends on the budgetary constraints that limit the $SSU$s and the data collection organization that influences the maximum number of $PSU$s that can be managed.
All this discussion holds when you want to use the $HT$ estimator.
But, currently, the most applied estimator for the NSIs survey is the calibrated estimator \citep{deville1992calibration,sarndal2007calibration,devaud2019deville}.
The calibrated estimator, through the use of auxiliary variables, usually provides better estimates than $HT$.
Then, it can be useful to take into account, since the allocation phase, also be the impact on the estimates of an estimator different from the $HT$ estimator.
This can be done by inflating the $S_{yh}^2$ with the estimator effect and following the procedure explained above.
An estimate of the estimaror effect ($effst$) is given by
\begin{equation}
\label{effst}
effst (\hat{\bar{Y}}_{i}) = \frac{\text{var} \left(\hat{\bar{Y}}_{i} \right)}{\text{var} \left(\hat{\bar{Y}}_{i,_{HT}} \right)}.
\end{equation}
It measures how much the sampling variance of the applied estimator under the adopted design is inflated or deflated with respect to the sampling variance of the $HT$ estimator, on the same sample design.
\subsection{Sample selection}
\label{sec:samplesel}
Once the optimal allocation is defined, the selection of sampling units must be performed.
In the case of a stratified two-stage sampling design two sampling selections need to be done: one for PSUs and one for SSUs.
In each stratum, the PSUs are split into SR and NSR according to a size threshold \eqref{eq:threshold}.
PSUs with a measure of size exceeding the threshold are identified as SR, included for sure in the sample and each of them constitutes an independent sub-stratum.
Therefore, the probability that they are included in the sample (inclusion probability, $\pi_I$) is always equal to 1.
However, it can happen that no one PSU has a measure of size higher than the threshold.
The remaining PSUs, NSR-PSUs, are ordered by their measure of the size and divided into finer strata (\textit{sub-strata}) whose sizes are approximately equal to the threshold multiplied by the number of PSUs to be selected in each stratum.
In this way, sub-strata are composed of PSUs having size as homogeneous as possible.
The PSUs in each stratum can be selected in different ways.
However, the selection of a fixed number of PSUs per stratum is usually carried out with Sampford's method (unequal probabilities, without replacement, fixed sample size).
Then, the inclusion probability of the generic $\ell$-th NSR-PSU, is
\begin{equation*}
\pi_I=\frac{N_h}{m \ M_{h\ell}}
\end{equation*}
where $N_h$ is the measure of size in the sub-stratum $h$-th, $m$ is the number of NSR-PSUs to be selected in the sub-stratum and $M_{h\ell}$ is the measure of size in the $\ell$-th PSU in the sub-stratum $h$.
Finally, the SSUs must be drawn in the selected PSU.
Also in this case the SSU can be selected in different ways.
In most cases, they are selected through a systematic sampling design that shares several properties with the $srs$.
Then, the inclusion probability for the second stage is equal to
\begin{equation*}
\pi_{II}=\frac{n_{h\ell}}{M_{h\ell}}
\end{equation*}
where $n_{h\ell}$ is the number of SSUs to be selected in the $\ell$-th PSU in the $h$-th sub-stratum.
Then, the design weight for the unit $k$ in the $h$-th strata in the $\ell$-th PSU is equal to the inverse of the product of the first stage and the second stage inclusion probabilities,
\begin{equation*}
d_k = \frac{1}{\pi_{I}} \frac{1}{\pi_{II}}.
\end{equation*}
The design weights sum up to the population size, $\sum_{k \in s} d_k = N$, and are almost constant in each stratum, which means that the sample is self-weighting. \\ \\ \\
\begin{algorithm}[H]
\SetAlgoLined
\SetKwData{Left}{left}
\SetKwData{This}{this}\SetKwData{Up}{up}
\SetKwFunction{Union}{Union}
\SetKwFunction{FindCompress}{FindCompress}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\BlankLine
\Input{
\BlankLine
a. The output of the allocation step (function \texttt{beat.2st}) (universe of PSUs, measure of PSUs, number of PSUs and SSUs to be selected in each stratum, threshold)\;
}
\BlankLine
\Output{
\BlankLine
a. universe of PSUs with stratum, sub-stratum, PSU first order inclusion probability, PSU weight, flag sample, and number of SSUs to be selected in each PSU\;
b. sample of PSUs (flag sample=1) with stratum, sub-stratum, PSU first order inclusion probability, PSU weight, number of SSUs to be selected in each PSU\;
c. statistics related to the sample of PSUs at stratum level\;
}
\BlankLine
\nlset{REM} creation of \textit{sub-strata} and selection of PSUs\;
1. in each stratum, PSUs are sorted in descending order according to their measure of size\;
2. the measure of size of PSUs are compared with the threshold\;
3. PSUs with a measure of size exceeding the threshold are identified as SR, included for sure in the sample and constitutes an independent sub-stratum\;
4. the remaining PSUs, NSR-PSUs, are ordered in decreasing way by their measure of the size and aggregated into finer strata (\textit{sub-strata})\;
5. \textit{sub-strata} are created adding PSUs (still in descending order of measure of size) for which the sum of the measure of size of the \textit{sub-strata} is approximately equal to the threshold multiplied by the number of PSUs to be selected in each stratum\;
6. in each \textit{sub-stratum} a fixed number of PSUs per stratum are usually selected with Sampford's method (unequal probabilities, without replacement, fixed sample size)\;
\caption{R2BEAT selection of PSUs}
\label{algorithm2}
\end{algorithm}
\section{Structure of the package}
The {R2BEAT} package provides functions for drawing complex sample designs using an optimal allocation also performing the selection of the PSUs and SSUs.
To install the latest release version of {R2BEAT} from CRAN, type \textbf{install.packages("R2BEAT")}
within {R}.
The current development version can be downloaded and installed from GitHub by executing
\textbf{devtools::install\_github("barcaroli/R2BEAT")}.
This section provides an introduction to the structure and functions associated with the package while the next section will present examples of its specific use.
The workflow to draw and select a complex sample using {R2BEAT} is: (1) prepare the input data, (2) check the input data,
(3) define the design and obtain the allocation, and (4) select the final sample units.
\subsection{Prepare the input data}
\label{sec:prepinput}
As it will be illustrated in detail in the next sub-sections the {R2BEAT} package provides functions to define one-stage stratified sample design (\textbf{beat.1st}) and two-stage stratified sample design (\textbf{beat.2st}). The preparation of the input dataset changes whether the former or the latter sample design will be adopted.
In the case of a multivariate optimal allocation for different domains in a stratified one-stage sample design, the function \textbf{beat.1st} can be used.
The inputs required by this function are two,
a data frame containing survey strata information (\textbf{stratif}) and a data frame of expected CV for each domain and each variable (\textbf{errors}). No functions to prepare these inputs are provided by the package but is possible to follow the example dataset \textbf{stratif} and \textbf{error} to properly create the input datasets for the function \textbf{beat.1st}.
In the case of a two-stage design, two functions are provided by the package to help in the creation of the input data for the function \textbf{beat.2st}.
The functions are two because two different scenarios are possible, depending on the initial information available:
1. Only the sampling frame is available, no previous rounds of the survey have been carried out. In this scenario, a strict condition on the information content of the sampling frame must hold: values of the sample target surveys (or of their proxy correlated variables) are available for each unit in the frame. This can be accomplished by considering the previous census, or by using administrative registers. In this scenario, the function \textbf{prepateInputToAllocation1} can be used to create the input dataframes \textbf{stratif}, \textbf{rho}, \textbf{deft}, \textbf{effst}, \textbf{des\_file} and \textbf{psu\_file}.
2. Together with a sampling frame containing the units of the population of reference, also a previous round of the sampling survey to be planned is available. The \textbf{prepateInputToAllocation2} produces the same outputs of \textbf{prepateInputToAllocation1}, but it requires the design and/or calibrated objects of the previous sample survey, obtained using the {ReGenesees} package \citep{zardetto2015regenesees}.
The function \textbf{sensitivity\_min\_SSU} allows analyzing the different results in terms of first stage size (number of PSUs) and second stage size (number of SSUs), obtained when varying the values of the minimum number of SSUs to be selected in each PSU.
To check the coherence between the estimated population in the strata (\textbf{stratif}) and the population calculated by the PSUs dataset (\textbf{des\_file}), the function \textbf{check\_input} is provided to the users.
This function compares the strata sizes giving information about the differences and replacing the estimated stratum size with the stratum population calculated by the PSUs dataset.
\subsection{Defining the design and determining the allocation} \label{sec:alloc}
As already introduced, the package allows performing the optimal allocation for both one-stage and two-stage stratified sampling
The first one is implemented within the function \textbf{beat.1st} and computes a multivariate optimal allocation for different domains in one-stage stratified sample design.
As described in section~\ref{sec:prepinput}, in a one-stage stratified sample design there are only two inputs to be provided to \textbf{beat.1st}: the dataframes \textbf{stratif} and \textbf{errors}. Besides these two mandatory inputs, it is also possible to indicate the minimum number of sampling units to be selected in each stratum, by default set equal to 2.
The function \textbf{beat.2st} performs the same multivariate optimal allocation for different domains considering stratified two-stage design.
Together with the input data \textbf{stratif} and \textbf{errors} other mandatory input are:
\begin{itemize}
\item \textbf{des\_file}: dataframe containing a row per each stratum, with information on total population, the values of the \textbf{delta} parameter (equal to the mean number of final SSUs contained in clusters to be selected, for instance, the mean number of individuals in a household), and the minimum number of SSUs to be selected in each PSU;
\item \textbf{psu\_file}: dataframe containing information on each PSUs (identifier, stratum, measure of size).
\item \textbf{rho}: dataframe contains a row per each stratum with the intra-class correlation coefficient both for self representing and non-self representing PSUs.
\end{itemize}
Is also possible to provide optional information about:
\begin{itemize}
\item \textbf{deft\_start}: dataframe containing a row per each stratum with the starting values for the square root of the design effect in the stratum of each variable of interest.
\item \textbf{effst}: dataframe containing a row per each stratum with the estimator effect for each variable of interest.
\end{itemize}
The functions \textbf{beat.1st} and \textbf{beat.2st} produce lists with respectively 4 and 9 items.
The \textbf{beat.1st} output contains:
\begin{enumerate}
\item \textbf{n}: a vector with the optimal sample size for each stratum;
\item \textbf{file\_strata}: a dataframe corresponding to the input dataframe \textbf{stratif} with the $n$ optimal sample size column added;
\item \textbf{alloc}: a dataframe with optimal (\textbf{ALLOC}), proportional (\textbf{PROP}), equal (\textbf{EQUAL}) sample size allocation;
\item \textbf{sensitivity}: a data frame with a summary of planned coefficients of variation (\textbf{Planned CV}), the expected ones under the given optimal allocation (\textbf{Actual CV}), and the sensitivity at 10\% for each domain and each variable. Sensitivity can be a useful tool to help in finding the best allocation, as it provides a hint of the expected sample size variation for a 10\% change in planned CVs.
\end{enumerate}
Together with the previous outputs, the function \textbf{beat.2st} produces also:
\begin{enumerate}
\item \textbf{iterations}: a dataframe that for each iteration of the Bethel algorithm provides a summary with the number of PSUs (\textbf{PSU\_Total}), distinguished between SR (\textbf{PSU\_SR}) and NSR (\textbf{PSU\_NSR}), plus the number of SSUs;
\item \textbf{planned}: a dataframe with the planned coefficients of variation for each variable in each domain.
\item \textbf{expected}: a dataframe with a summary of expected coefficients of variation under the given optimal allocation for each target variable in each domain;
\item \textbf{deft\_c}: a dataframe with the design effect for each variable in each domain in each iteration. Note that \textbf{DEFT1\_0 - DEFTn\_0} is always equal to 1 if \textbf{deft\_start} is NULL; otherwise it is equal to \textbf{deft\_start}. While \textbf{DEFT1 - DEFTn} are the final design effect related to the given allocation.
\item \textbf{param\_alloc}: a vector with a resume of all the parameters given for the allocation.
\end{enumerate}
\subsection{Sample units selection} \label{sec:selection}
Once the allocation for the primary and secondary sampling stage units has been defined, it is possible to use two functions for the selection of the final sampling units.
The function \textbf{select\_PSU} allows the users to select the PSUs allocated in each stratum, using the Sampford method, as implemented by the \textbf{UPsampford} function of R package \textbf{sampling} \citep{Rsampling}.
The input of this function is the output of the \textbf{beat.2st} function.
The output of the function is a list containing the following items:
\begin{enumerate}
\item \textbf{universe\_PSU}: a dataframe that reports the whole universe of PSUs, with the inner strata formed for the selection;
\item \textbf{sample\_PSU}: a dataframe containing the selected PSUs, with the indication, for each of them, of how many SSUs must be selected;
\item \textbf{PSU\_stats}: a table containing summary information on selected PSUs.
\end{enumerate}
In the last step, the selection of a sample of SSUs has to be carried out. The function \textbf{select\_SSU} allows selecting a sample of SSUs from the population frame, based on the SSUs allocated to each selected PSUs.
The input datasets are two:
\begin{enumerate}
\item \textbf{df}: the dataframe containing the final sampling units;
\item \textbf{PSU\_sampled}: the dataframe containing selected PSUs, corresponding to the second item of the output of the \textbf{select\_PSU}function.
\end{enumerate}
The function \textbf{select\_SSU} returns a dataframe containing the selection of the \textbf{df} dataframe, enriched with information about the first stage inclusion probability, the second stage inclusion probability, the final inclusion probability (the product of the first stage and the second stage inclusion probabilities) and the design weights.
\section{Illustrative examples} \label{sec:example}
To illustrate how to implement workflows making use of \textbf{R2BEAT} functions, we will consider two scenarios, depending on the initial setting:
\begin{enumerate}
\item only the sampling frame is available, no previous rounds of the survey have been carried out;
\item together with a sampling frame containing the units of the population of reference, also a previous round of the sampling survey to be planned is available;
\end{enumerate}
In both cases, we assume that the sampling frame contains information on the final sampling units, together with the indication of the PSUs to which each unit belongs.
In the first scenario, a stricter condition on the information content of the sampling frame must hold: values of the sample target surveys (or of their proxy correlated variables) must be available for each unit in the frame. This can be accomplished by considering a previous census, or by imputing values using predictive models.
In the following paragraphs, we will show only a subset of the code necessary to produce the final results, the relevant part of it\footnote{In order to reproduce the processing related to these examples, datasets and R scripts are downloadable from the link \href{https://github.com/barcaroli/R2BEAT\_workflows}{https://github.com/barcaroli/R2BEAT\_workflows}.}.
\subsection{Scenario 1 workflow}
In this scenario, it is assumed that a sampling frame is available. We consider a frame (\textbf{pop.RData}), containing 2,258,507 units:
\begin{verbatim}
region province municipality id_hh id_ind stratum stratum_label sex cl_age
1 north north_1 1 H1 1 12000 north_1_6 1 (24,34]
2 north north_1 1 H1 2 12000 north_1_6 2 (64,74]
3 north north_1 1 H1 3 12000 north_1_6 1 (74,112]
4 north north_1 1 H10 4 12000 north_1_6 2 (44,54]
5 north north_1 1 H100 5 12000 north_1_6 1 (34,44]
6 north north_1 1 H100 6 12000 north_1_6 1 (54,64]
...
active income_hh unemployed inactive
1 1 30487.75 0 0
2 1 30487.75 0 0
3 0 30487.75 0 1
4 1 21755.68 0 0
5 1 29870.56 0 0
6 1 29870.56 0 0
...
\end{verbatim}
covering a (synthetic) population of reference, with basic information (geographical and demographic variables:
\begin{itemize}
\item region: the NUTS2 identifier;
\item province: the NUTS3 identifier;
\item municipality: identifier of the municipality, that plays the role of the PSU identifier;
\item id\_hh: the household identifier;
\item id\_ind: the individual identifier;
\item stratum and stratum\_label: identifier of the initial strata (provinces);
\item sex and cl\_age: demographic information on individuals.
\end{itemize}
together with information that is related to the sampling survey we want to design:
\begin{itemize}
\item \textbf{active}, \textbf{inactive}, \textbf{unemployed}: binary variables indicating the occupation status of the individual;
\item \textbf{income\_hh}: household income.
\end{itemize}
We suppose that the values of these variables have been made available by a different source (for instance a census) or by predicting them with a model-based approach. In any case the uncertainty related to these values should be taken into account, by correctly evaluating the anticipated variance related to the models used for the predictions when producing the \textbf{strata} dataset \citep[][p. 59]{Baillargeon+Rivest:2012}.
Anyway, in the following, we will not consider this issue, as we want only to illustrate how it is possible to automatically derive all the inputs required by the next steps.
\subsubsection{Step 1: preparation of the inputs for the optimal sample design}
The function \textbf{prepareInputToAllocation1} allows preparing all the inputs required by the optimal allocation step under this first scenario. This function requires the attribution of values to the following parameters:
\begin{itemize}
\item \textbf{samp\_frame}
\item \textbf{id\_PSU}
\item \textbf{id\_SSU}
\item \textbf{strata\_var}
\item \textbf{target\_vars}
\item \textbf{deff\_var}
\item \textbf{domain\_var}
\item \textbf{delta} (average dimension of the SSU in terms of elementary survey units)
\item \textbf{minimum} (minimum number of SSUs to be interviewed in each selected PSU)
\end{itemize}
About the values of these parameters, the choices are almost always driven by the content and structure of the sampling frame, except for \textbf{minimum}. In order to orientate in the choice of one of a suitable value for this parameter, the function \textbf{sensitivity\_min\_SSU} allows performing a sensitivity analysis, showing how the first and second stage sample sizes vary by varying its values:
\begin{verbatim}
> sens_min_SSU <- sensitivity_min_SSU (
+ samp_frame=pop,
+ id_PSU="municipality",
+ id_SSU="id_ind",
+ strata_var="stratum",
+ target_vars=c("income_hh","active","inactive","unemployed"),
+ deff_var="stratum",
+ domain_var="region",
+ minimum=50,
+ delta=1,
+ deff_sugg=1.5,
+ min=30,
+ max=80)
\end{verbatim}
This function calculates 10 different couples of values for the number of PSUs and SSU as resulting from the allocation step, starting with the value '30' assigned to the parameter \textbf{minimum}, ending with the value '80'.
The results are reported in Figure \ref{PlotMinimum}.
\begin{figure} [h!]
\centering
\includegraphics[width=14cm,height=10cm]{Plot_min_SSU.png}
\caption{Sensitivity analysis for \textbf{minimum} parameter.}
\label{PlotMinimum}
\end{figure}
On the basis of the results of the sensitivity analysis, we can instantiate the required parameters, for example in this way:
\begin{verbatim}
minimum = 50 # minimum number of SSUs to be interviewed in each selected PSU
\end{verbatim}
and execute the \textbf{prepareInputToAllocation1} function:
\begin{verbatim}
> inp <- prepareInputToAllocation1(
+ samp_frame = pop,
+ id_PSU = "municipality",
+ id_SSU = "id_ind",
+ strata_var = "stratum",
+ target_vars = c("income_hh","active","inactive","unemployed"),
+ deff_var = "stratum",
+ domain_var = "region",
+ delta,
+ minimum)
Computations are being done on population data
Number of strata: 24
... of which with only one unit: 0
\end{verbatim}
The output of this function (\textbf{inp}) is a list composed by the following elements:
\begin{enumerate}
\item the \textbf{strata} dataframe
\item the \textbf{deff} dataframe
\item the \textbf{effst} dataframe
\item the \textbf{rho} dataframe
\item the \textbf{psu\_file} dataframe
\item the \textbf{des\_file} dataframe
\end{enumerate}
that will be the inputs for the optimal allocation step (with the exception of the \textbf{deff}), which is produced only for documentation).
Here we report the content of the \textbf{rho} dataframe:
\begin{verbatim}
STRATUM RHO_AR1 RHO_NAR1 RHO_AR2 RHO_NAR2 RHO_AR3 RHO_NAR3
1 1000 1 0.0032494875 1 0.00001260175649 1 0.0000003631192
2 2000 1 0.0028554017 1 0.00150936389450 1 0.0007420929883
3 3000 1 0.0069678726 1 0.00162968276279 1 0.0006469515878
4 4000 1 0.0114552934 1 0.00578473329221 1 0.0019797687826
5 5000 1 0.0002677333 1 0.00000001682475 1 0.0000029484212
6 6000 1 0.0057050500 1 0.00004270905958 1 0.0000397945795
RHO_AR4 RHO_NAR4
1 1 0.000039120880
2 1 0.000937018761
3 1 0.002837431259
4 1 0.008962657055
5 1 0.000003404961
6 1 0.000194411580
\end{verbatim}
that has been calculated using the equation ~\eqref{eq:rho_pop}.
\subsubsection{Step 2: optimization of PSUs and SSUs allocation}
It is now possible to execute the optimization step of the sample design.
First of all, we define the set of precision constraints on the target variables:
\begin{verbatim}
DOM CV1 CV2 CV3 CV4
1 DOM1 0.02 0.03 0.03 0.05
2 DOM2 0.03 0.06 0.06 0.08
\end{verbatim}
We interpret the values of the CVs in this way: the maximum expected coefficient of variation for the first target variable (\textbf{household income}) is 2\% at the national level and 3\% at the regional level; for \textbf{active} and \textbf{inactive} the expected maximum values of CV is 3\% at the national level and 6\% at the regional level; finally, for \textbf{unemployed} it is 5\% at the national level and 8\% at the regional level.
The optimization step is performed by executing the \textbf{beat.2s} function:
\begin{verbatim}
> inp1$desfile$MINIMUM <- 50
> alloc1 <- beat.2st(stratif = inp1$strata,
+ errors = cv,
+ des_file = inp1$des_file,
+ psu_file = inp1$psu_file,
+ rho = inp1$rho,
+ deft_start = NULL,
+ effst = inp1$effst,
+ minPSUstrat = 2,
+ minnumstrat = 50
+ )
iterations PSU_SR PSU NSR PSU Total SSU
1 0 0 0 0 7887
2 1 31 104 135 8328
3 2 39 104 143 8317
4 3 38 104 142 8320
\end{verbatim}
This design is characterized by 142 PSUs (of which 38 self-representative, SR, and 104 non self-representative, NSR) and 8,320 SSUs.
\subsubsection{Step 3: selection of PSUs and SSUs}
We can now proceed in selecting the PSUs:
\begin{verbatim}
> sample_1st <- select_PSU(alloc, type="ALLOC", pps=TRUE, plot=TRUE)
> sample_1st$PSU_stats
STRATUM PSU PSU_SR PSU_NSR SSU SSU_SR SSU_NSR
1 1000 2 2 0 286 286 0
2 2000 9 3 6 452 152 300
3 3000 4 0 4 200 0 200
4 4000 2 0 2 100 0 100
5 5000 2 2 0 219 219 0
6 6000 2 0 2 100 0 100
7 7000 2 0 2 100 0 100
8 8000 2 0 2 100 0 100
9 9000 1 1 0 557 557 0
10 10000 6 6 0 587 587 0
11 11000 26 2 24 1300 100 1200
12 12000 8 0 8 400 0 400
13 13000 1 1 0 703 703 0
14 14000 4 4 0 577 577 0
15 15000 27 9 18 1361 461 900
16 16000 18 0 18 900 0 900
17 17000 1 1 0 154 154 0
18 18000 4 2 2 200 100 100
19 19000 7 1 6 350 50 300
20 20000 4 0 4 200 0 200
21 21000 1 1 0 125 125 0
22 22000 3 3 0 150 150 0
23 23000 4 0 4 200 0 200
24 24000 2 0 2 100 0 100
25 Total 142 38 104 9421 4221 5200
\end{verbatim}
A discrepancy can be noted between the number of SSUs determined by the allocation step and the one produced by the selection of PSUs. This is because the selection of PSUs controls that the minimum number of SSUs to be allocated in each selected PSU is compliant with the minimum, in our case equal to 50: if not, this minimum is assigned. This is why the total number of SSUs increases from 8,320 to 9,421.
Selected PSUs are contained in the \textbf{sample\_PSU} element of the output list:
\begin{verbatim}
> head(sample_1st$sample_PSU)
PSU_ID STRATUM stratum SR nSR PSU_final_sample_unit Pik weight_1st weight_2st weight
1 330 1000 1000-1 1 0 207 1 1 706.0966 706.0966
2 309 1000 1000-2 1 0 72 1 1 706.1806 706.1806
3 51 10000 10000-0 1 0 171 1 1 196.8480 196.8480
4 11 10000 10000-1 1 0 96 1 1 196.9688 196.9688
5 40 10000 10000-2 1 0 79 1 1 197.9494 197.9494
6 13 10000 10000-3 1 0 72 1 1 198.3750 198.3750
\end{verbatim}
With this input, we can proceed to select the sample of final units:
\begin{verbatim}
> PSU_sampled <- sample_1st$sample_PSU
> samp <- select_SSU(df=pop,
+ PSU_code="municipality",
+ SSU_code="id_ind",
+ PSU_sampled,
+ verbose=TRUE)
PSU = 1 *** Selected SSU = 50
PSU = 4 *** Selected SSU = 72
PSU = 6 *** Selected SSU = 50
PSU = 8 *** Selected SSU = 557
...
PSU = 510 *** Selected SSU = 50
PSU = 512 *** Selected SSU = 50
--------------------------------
Total PSU = 142
Total SSU = 9421
--------------------------------
\end{verbatim}
The distribution of PSUs and SSUs in the different strata is reported in Figure~\ref{fig:allocation1}.
\begin{figure} [h!]
\centering
\includegraphics[width=15cm,height=12cm]{allocation1.png}
\label{fig:allocation1}
\caption{Allocation of PSUs and SSUs (scenario 1).}
\end{figure}
In Figure \ref{weights1} the distribution of weights is reported.
\begin{figure} [h!]
\begin{subfigure}
\centering
\includegraphics[width=15cm,height=6cm]{weights1_1.png}
\label{weights11}
\end{subfigure}
\begin{subfigure}
\centering
\includegraphics[width=15cm,height=6cm]{weights1_2.png}
\label{weights12}
\end{subfigure}
\caption{Distribution of weights (scenario 1).}
\label{weights1}
\end{figure}
It can be seen that the distribution of weights is variable at the national, regional and provincial level, and only inside each stratum, the variability is low, as desired, except for those strata in which for some PSUs the minimum number of SSUs (50) had to be attributed instead of the optimal allocation.
\subsubsection{Step 4: verify compliance with precision constraints}
The function \textbf{eval\_2stage]} allows verifying the compliance of the two-stage sample design to the set of precision constraints, by selecting a given number of different samples (in our case, 500) from the sampling frame, producing the estimates for each sample, and calculating over them the coefficients of variation for each target estimate.
We apply twice the function, first for the national level:
\begin{verbatim}
> # Domain level = national
> domain_var <- "one"
> set.seed(1234)
> eval11 <- eval_2stage(df,
+ PSU_code,
+ SSU_code,
+ domain_var,
+ target_vars,
+ sample_1st$sample_PSU,
+ nsampl=500,
+ writeFiles=FALSE,
+ progress=FALSE)
> eval11$coeff_var
CV1 CV2 CV3 CV4 dom
1 0.0101 0.0091 0.0241 0.0344 DOM1
\end{verbatim}
then, at the regional level:
\begin{verbatim}
> # Domain level = regional
> domain_var <- "region"
> set.seed(1234)
> set.seed(1234)
> eval12 <- eval_2stage(df,
+ PSU_code,
+ SSU_code,
+ domain_var,
+ target_vars,
+ sample_1st$sample_PSU,
+ nsampl=500,
+ writeFiles=FALSE,
+ progress=FALSE)
> eval12$coeff_var
CV1 CV2 CV3 CV4 dom
1 0.0113 0.0066 0.0235 0.0754 DOM1
2 0.0224 0.0206 0.0495 0.0733 DOM2
3 0.0240 0.0282 0.0515 0.0413 DOM3
\end{verbatim}
We recall that the precision constraints had been set equal to 2\% for the first variable, 3\% for the second and third, and 5\% for the fourth, at national level; and respectively to 3\% and 6\% and 8\% at regional level. We can see that the computed CVs are all compliant.
\subsection{Scenario 2 workflow}
Together with the availability of a sampling frame, containing the same information presented in the previous scenario, we assume also the availability of at least one previous round of the survey.
For sake of simplicity, we assume that the previous round sample is the same selected in scenario 1. We assume also that the values of the four target variables are the observed ones after the data collection.
Having set the above conditions, the main difference with scenario 1 is that, instead of choosing in a somewhat arbitrarily way the values of the inputs required by the optimal allocation step, we can derive them directly from the collected survey data.
\subsubsection{Step 1: processing and analysis of survey data}
In this step, we proceed to perform the usual phases of calibration and production of the estimates. In doing that, we make use of the R package \textbf{ReGenesees}.
First we describe the sample design:
\begin{verbatim}
> ## Sample design description
> sample$stratum_2 <- as.factor(sample$stratum_2)
> sample.des <- e.svydesign(sample,
+ ids= ~ municipality + id_hh,
+ strata = ~ stratum_2,
+ weights = ~ weight,
+ self.rep.str = ~ SR,
+ check.data = TRUE)
\end{verbatim}
obtaining the \textbf{sample.des} object. Then we proceed with the calibration step:
\begin{verbatim}
> ## Calibration with known totals
> totals <- pop.template(sample.des,
+ calmodel = ~ sex : cl_age,
+ partition = ~ region)
> totals <- fill.template(pop, totals, mem.frac = 10)
> sample.cal <- e.calibrate(sample.des,
+ totals,
+ calmodel = ~ sex : cl_age,
+ partition = ~ region,
+ calfun = "logit",
+ bounds = c(0.3, 2.6),
+ aggregate.stage = 2,
+ force = FALSE)
\end{verbatim}
obtaining the \textbf{sample.cal} object.
These two objects are what is needed to obtain, in an automated way, all the inputs required by the optimization step.
\subsubsection{Step 2: preparation of the inputs for the optimal sample design}
The preparation of all the inputs required by the optimization step is a straightforward operation by using the \textbf{prepareInputToAllocation2} function:
\begin{verbatim}
> inp <- prepareInputToAllocation2(
+ samp_frame = pop, # sampling frame
+ RGdes = sample.des, # ReGenesees design object
+ RGcal = sample.cal, # ReGenesees calibrated object
+ id_PSU = "municipality", # identification variable of PSUs
+ id_SSU = "id_hh", # identification variable of SSUs
+ strata_vars = "stratum", # strata variables
+ target_vars = c("income_hh","active","inactive","unemployed"), # target variables
+ deff_vars = "stratum", # deff variables
+ domain_vars "region", # domain variables
+ delta 0 1, # Average number of SSUs for each selection unit
+ minimum= 50 # Minimum number of SSUs to be selected in each PSU
+ )
\end{verbatim}
The configuration of the output is just the same already seen in scenario 1 for the function \textbf{prepareInputToAllocation1}.
Here we report the content of the \textbf{effst} dataframe:
\begin{verbatim}
stratum STRATUM EFFST1 EFFST2 EFFST3 EFFST4
1 1000 1000 1.061891 0.9511291 0.9071854 1.0137193
2 10000 10000 1.005724 0.9077114 0.8991158 0.9780552
3 11000 11000 1.005722 0.9309392 0.9240808 0.9998968
4 12000 12000 1.026967 0.9241132 0.9117161 0.9911560
5 13000 13000 1.006354 0.9244961 0.9085689 0.9977077
6 14000 14000 1.002360 0.9348739 0.9237139 1.0065308
...
\end{verbatim}
and of the \textbf{rho} dataframe:
\begin{verbatim}
STRATUM RHO_AR1 RHO_NAR1 RHO_AR2 RHO_NAR2 RHO_AR3
1 1000 1 -0.00005314789 1 0.000004056338 1
2 10000 1 0.00021289157 1 0.000154688468 1
3 11000 1 0.01349102041 1 -0.004226612245 1
4 12000 1 0.00409179592 1 0.034025755102 1
5 13000 1 0.00002020513 1 0.000016396011 1
6 14000 1 0.00009018499 1 -0.000022736475 1
RHO_NAR3 RHO_AR4 RHO_NAR4
1 0.000007542254 1 0.00003425352
2 0.000160791738 1 0.00002596213
3 -0.007398367347 1 0.00075012245
4 0.031294265306 1 0.02008032653
5 0.000019209402 1 0.00001038462
6 -0.000022157068 1 0.00007399651
...
\end{verbatim}
in order to compare them with the scenario 1 ones.
\subsubsection{Step 3: optimization of PSUs and SSUs allocation}
The optimal allocation of PSUs and SSUs is the same as the one already seen in the first scenario:
\begin{verbatim}
> set.seed(1234)
> inp2$des_file$MINIMUM <- 50
> alloc2 <- beat.2st(stratif = inp2$strata,
+ errors = cv,
+ des_file = inp2$des_file,
+ psu_file = inp2$psu_file,
+ rho = inp2$rho,
+ deft_start = NULL,
+ effst = inp2$effst,
+ minnumstrat = 2,
+ minPSUstrat = 2)
iterations PSU_SR PSU NSR PSU Total SSU
1 0 0 0 0 9557
2 1 71 92 163 8464
3 2 38 108 146 8398
4 3 38 108 146 8396
\end{verbatim}
\subsubsection{Step 4: selection of PSUs and SSUs}
The selection of first and second stage units proceeds in exactly the same way than in the scenario 1, first selecting the PSUs, and then the SSUs.
\begin{verbatim}
> sample_1st <- select_PSU(alloc2, type="ALLOC", pps=TRUE)
> sample_1st$PSU_stats
STRATUM PSU PSU_SR PSU_NSR SSU SSU_SR SSU_NSR
1 1000 2 2 0 279 279 0
2 2000 10 6 4 517 317 200
3 3000 4 0 4 200 0 200
4 4000 2 0 2 100 0 100
5 5000 2 2 0 202 202 0
6 6000 2 0 2 100 0 100
7 7000 2 0 2 100 0 100
8 8000 2 0 2 100 0 100
9 9000 1 1 0 564 564 0
10 10000 6 6 0 537 537 0
11 11000 26 4 22 1300 200 1100
12 12000 12 0 12 600 0 600
13 13000 1 1 0 756 756 0
14 14000 4 4 0 583 583 0
15 15000 28 10 18 1414 514 900
16 16000 22 0 22 1100 0 1100
17 17000 1 1 0 114 114 0
18 18000 2 0 2 100 0 100
19 19000 6 0 6 300 0 300
20 20000 4 0 4 200 0 200
21 21000 1 1 0 113 113 0
22 22000 2 0 2 100 0 100
23 23000 2 0 2 100 0 100
24 24000 2 0 2 100 0 100
25 Total 146 38 108 9579 4179 5400
>
> samp <- select_SSU(df=pop,
+ PSU_code="municipality",
+ SSU_code="id_ind",
+ PSU_sampled=sample_1st$sample_PSU,
+ verbose=TRUE)
PSU = 4 *** Selected SSU = 66
PSU = 8 *** Selected SSU = 564
PSU = 10 *** Selected SSU = 50
PSU = 11 *** Selected SSU = 96
...
PSU = 510 *** Selected SSU = 50
PSU = 512 *** Selected SSU = 50
--------------------------------
Total PSU = 146
Total SSU = 9579
--------------------------------
\end{verbatim}
The distribution of PSUs and SSUs in the different strata is reported in Figure \ref{allocation2}. It can be seen that the relative distribution of both units in the strata is quite similar to the one already seen in scenario 1.
\begin{figure} [h!]
\centering
\includegraphics[width=15cm,height=12cm]{allocation2.png}
\label{allocation2}
\caption{Allocation of PSUs and SSUs (scenario 2).}
\end{figure}
\begin{figure} [h!]
\begin{subfigure}
\centering
\includegraphics[width=15cm,height=6cm]{weights2_1.png}
\label{weights21}
\end{subfigure}
\begin{subfigure}
\centering
\includegraphics[width=15cm,height=6cm]{weights2_2.png}
\label{weights22}
\end{subfigure}
\caption{Distribution of weights (scenario 2).}
\label{weights2}
\end{figure}
We can observe now the distribution of weights in the selected sample (see Figure \ref{weights2}). Also in this case, their variability is lower inside the strata level, where it is almost null except in those strata in which for some PSUs the minimum number of SSUs (50) had to be attributed, instead of the optimal allocation.
\subsubsection{Step 5: verify the compliance to precision constraints}
As in the previous scenario, the final check consists in verifying the compliance of the optimized design to the precision constraints.
We, therefore, apply the function \textbf{eval\_2stage}, first for the national level:
\begin{verbatim}
> # Domain level = national
> domain_var <- "one"
> eval <- eval_2stage(df,
+ PSU_code,
+ SSU_code,
+ domain_var,
+ target_vars,
+ PSU_sampled=sample_1st$sample_PSU,
+ nsampl=500)
> eval$coeff_var
CV1 CV2 CV3 CV4 dom
1 0.012 0.0094 0.025 0.0364 DOM1
\end{verbatim}
then, at regional level:
\begin{verbatim}
> # Domain level = regional
> domain_var <- "region"
> eval <- eval_2stage(df,
+ PSU_code,
+ SSU_code,
+ domain_var,
+ target_vars,
+ PSU_sampled=sample_1st$sample_PSU,
+ nsampl=500)
> eval$coeff_var
CV1 CV2 CV3 CV4 dom
1 0.0105 0.0070 0.0246 0.0745 DOM1
2 0.0285 0.0206 0.0504 0.0748 DOM2
3 0.0291 0.0335 0.0597 0.0444 DOM3
\end{verbatim}
Also in this case, no precision constraint is violated.
\section{Comparison with other softwares}
To evaluate the performance of R2BEAT, in this section we compare it to other two R packages, i.e.:
\begin{enumerate}
\item the package \textbf{PracTools} \citep{practools} implements many of the procedures described in \cite{Valliant:2015}, including those regarding the design of multistage samples;
\item the package \textbf{samplesize4surveys} \citep{Rojas:2020} allows to calculate the sample size for complex surveys.
\end{enumerate}
First, we briefly illustrate, for both packages, the functions covering the two-stage sampling design, then we apply them to the same case seen in scenario 1, finally comparing the obtained results
\footnote{In order to reproduce the processing related to the evaluation of the different softwares, datasets and R scripts are downloadable from the link \href{https://github.com/barcaroli/Two-stage-sampling-software-comparison}{https://github.com/barcaroli/Two-stage-sampling-software-comparison}}.
\subsection{R package PracTools}
\cite{Valliant:2015} describe (pages 231-234) a method for the optimal allocation of two-stage sampling when numbers of sample PSUs and elements per PSU are adjustable (which is our case).
This method is implemented in the R function \textbf{clusOpt2} in the \textbf{PracTools} package. This function computes the number of PSUs and the number of final units for each PSU for a two-stage sample which uses \textit{srs} at each stage or probability proportional to size with replacement (\textit{ppswr}) at the first stage and \textit{srs} at the second.
This function requires the indication of a number of parameters, among which:
\begin{itemize}
\item C1: unit cost per PSU
\item C2: unit cost per SSU
\item delta: homogeneity measure
\item unit.rv: unit relvariance
\item k: ratio of B2+W2 to unit relvariance
\item CV0: target CV
\item tot.cost: total budget for variable costs
\item cal.sw: indicates if the optimization has to be run for a fixed total budget, or the for target CV0
\end{itemize}
The function \textbf{BW2stagePPS} computes the population values of B2, W2, and delta whose meaning is explained in \cite{Valliant:2015} (page 222).
The method is univariate: the optimization can be performed by indicating only one variable. The whole code required for the case described in scenario 1 is given here:
\begin{verbatim}
> load("pop.RData")
> library(PracTools)
> # Probabilities of inclusion (I stage)
> pp <- as.numeric(table(pop$municipality))/nrow(pop)
> # variable income_hh
> bw <- BW2stagePPS(pop$income_hh, pp, psuID=pop$municipality)
> bw
B2 W2 unit relvar B2+W2 k delta
0.04075893 0.79538674 0.83601766 0.83614567 1.00015312 0.04874621
> des <- clusOpt2(C1=130,
+ C2=1,
+ delta=bw[6],
+ unit.rv=bw[3],
+ k=bw[5],
+ CV0=0.02,
+ tot.cost=NULL,
+ cal.sw=2)
> des
C1 = 130
C2 = 1
delta = 0.04874621
unit relvar = 0.8360177
k = 1.000153
cost = 25499.72
m.opt = 141.4
n.opt = 50.4
CV = 0.02
> sample_size <- des$m.opt*des$n.opt
> sample_size
7126.56
\end{verbatim}
In running the function, we have indicated that the optimization step was to be carried out having a target CV of 2\% for the variable \textbf{income\_hh}. As there is no way to directly indicate a desired minimum number of SSUs per PSU, we managed to obtain the desired value of 50 by indicating a couple of values 130 and 1 respectively for C1 and C2. As a result, the number of PSUs is 141 and the number of SSUs is 7,127.
\subsection{R package samplesize4surveys}
This package offers two functions to compute a grid of possible sample sizes for estimating single means (\textbf{ss2s4m}) or single proportions (\textbf{ss2s4p}) under two-stage sampling designs.
The required parameters are the following:
\begin{itemize}
\item N: the population size
\item mu: the value of the estimated mean of a variable of interest
\item sigma: the value of the estimated standard deviation of a variable of interest
\item conf: the statistical confidence
\item delta: the maximum relative margin of error that can be allowed for the estimation
\item M: number of clusters in the population
\item to: (integer) maximum number of final units to be selected per cluster
\item rho: the intraclass correlation coefficient
\end{itemize}
Here is the code we used in the case of the target variable \textbf{income\_hh}:
\begin{verbatim}
> load("pop.RData")
> PSU <- length(unique(pop$municipality))
> pop_strata <- as.numeric(table(pop$stratum))
> rho <- 0.04875369 # value taken from scenario 1 analysis
> ss2s4m(N = nrow(pop),
+ mu = mean(pop$income_hh),
+ sigma = sd(pop$income_hh),
+ delta = 0.02 * 1.96,
+ M = PSU,
+ to = 50,
+ rho = sum(rho$RHO_NAR1*pop_strata) / sum(pop_strata))
50 3.388931 142 50 7061
\end{verbatim}
we obtain a design characterized by a total sample size of 7,061, with 142 PSUs.
Concerning the way we indicated the value of the parameter \textbf{rho}, we made use of the value of the intra-class correlation coefficient computed in scenario 1 by \textbf{R2BEAT}, not considering domains and strata.
In order to compare the 2\% precision constrain expressed in terms of coefficient of variation, as the package requires the margin of error, we multiply the value of the CV by a z-value equal to 1.96, to obtain the ratio between the semi-width of the confidence interval and the estimate of the mean of the parameter.
The use of the function \textbf{ss2s4p}, applicable for the other three variables, is practically the same.
\subsection{Comparison of results}
As already said, we refer to the scenario 1 setting.
We consider the same precision levels for the four variables for the unique domain, set equals respectively to 2\%, 3\%, 3\% and 5\%.
We apply another constraint for all the three softwares, that is, we want to select a minimum number of final units in each PSU, set equal to 50.
There is no problem in doing that for package \textbf{samplesize4surveys}, by setting the parameter \textbf{to} equal to 50: the last value of the final grid is the result we want. Moreover, there is no loss in the optimality of the solution in doing that, because the sample sizes obtained for further values are increasingly higher.
As for \textbf{PractTools}, it is more complicated because, as already said, there is no direct way to set this constraint. In any case, we manage to do that, by varying the value of C1 (leaving C2 equal to 1) until we find the solution with the nearest value of \textbf{n.opt} to 50.
A final consideration regarding the application of \textbf{R2BEAT}: in this setting, to be comparable with the other packages (that are univariate and mono-domain), it has been applied in a simplified way, that is, one variable per time (univariate), and no different domains and strata in the sampling frame. By so doing, \textbf{R2BEAT} yields obviously different results from those seen in scenario 1.
\begin{table}[h!]
\caption{Two-stage sample design obtained by different packages.}
\centering
\begin{tabular}{|l||c|c|c|c|c|c| }
\hline
& \multicolumn{2}{c|}{PracTools} & \multicolumn{2}{c|}{R2BEAT} & \multicolumn{2}{c|}{samplesize4surveys} \\
Variable & PSUs & SSUs & PSUs & SSUs & PSUs & SSUs \\ \hline
active & 49 & 2459 & 37 & 2030 & 49 & 2436 \\
inactive & 90 & 4395 & 68 & 4338 & 88 & 4391 \\
income\_hh & 141 & 7127 & 79 & 5140 & 142 & 7061 \\
unemployed & 406 & 19956 & 149 & 10884 & 402 & 20058 \\ \hline
\end{tabular}
\label{results}
\end{table}
\begin{figure} [h!]
\begin{subfigure}
\centering
\includegraphics[width=12cm,height=7cm]{comparisonPSU.png}
\label{comparison:fig1}
\end{subfigure}
\begin{subfigure}
\centering
\includegraphics[width=12cm,height=7cm]{comparisonSSU.png}
\label{comparison:fig2}
\end{subfigure}
\caption{Sample sizes by packages.}
\label{comparison}
\end{figure}
In Table \ref{results} and in Figure \ref{comparison} are reported the results obtained by the three packages. To be sure of the results of \textbf{R2BEAT}, simulations have been carried out, and the resulting CVs are always below the precision threshold.
Analyzing the table, it is evident that \textbf{R2BEAT} is always the best performer, in terms of sample size, considering both numbers of PSUs and SSUs.
\section{Concluding remarks}
Concluding, we would like to focus on the main strength of the R2BEAT, which could be considered its completeness, regarding all the phases of the statistical data production process.
The package deals with the design, stratification, allocation among strata and, finally, the selection of sample units.
Furthermore, the facilities provided by R2BEAT are extremely flexible and generalizable: for instance, R2BEAT is the first R package on repositories to provide the optimal allocation, both for one-stage and two-stage sampling designs.
These features make the package particularly helpful and valuable both for NSIs and private statistical institutions, such as marketing researchers, universities or national government organizations.
Moreover, those who deal with statistics often have a large data availability, coming from registers, previous surveys or other data sources: the package, requiring auxiliary variables for designing and allocating the sample, makes this auxiliary information useful and profitable during the sampling planning process.
The output provided to users has been thought to be as clear as possible and to help them to carry out analysis and checks on the obtained allocations and on the sample on which the survey will be based.
Last, but not least, R2BEAT can be considered more efficient than the other available software and packages which deal with sample design: in fact, on equal errors, the sample size allocated is lower, both in terms of Primary and Secondary Stage Units (PSUs and SSUs.
\bibliographystyle{chicago}
|
1,314,259,995,026 | arxiv | \section{Introduction}
\label{sec:intro}
The Large Hadron Collider (LHC) represents a unique opportunity to probe the detailed structure of the Standard Model at the TeV scale. Collisions at the LHC are dominated by Quantum Chromodynamics (QCD), and in particular, jets, whose radiation encodes the details of the underlying scattering process. There has therefore been a significant theoretical effort to understand more complicated final states involving jets, both in terms of calculating the underlying ultraviolet process \cite{Berger:2010zx,Bern:2011ep,Bern:2013gka,Badger:2013yda,Boughezal:2015ded,Boughezal:2015dva,Boughezal:2015aha,Boughezal:2015dra,Campbell:2016lzl,Currie:2017eqf} and disentangling infrared divergences therein \cite{GehrmannDeRidder:2005cm,Czakon:2010td,Boughezal:2011jf,Czakon:2014oma,Boughezal:2015aha,Gaunt:2015pea,Moult:2016fqy,Boughezal:2016zws,DelDuca:2016ily,Caola:2017dug}, as well as developing new tools for understanding factorization and the infrared dynamics of QCD radiation \cite{Bauer:2000yr,Bauer:2001ct,Bauer:2001yt,Bauer:2002nz,Rothstein:2016bsq,Bauer:2011uc,Larkoski:2015zka,Larkoski:2015kga,Pietrulewicz:2016nwo,Dasgupta:2014yra,Banfi:2014sua,Chien:2015cka,Caron-Huot:2015bja,Larkoski:2015zka,Becher:2015hka,Becher:2016mmh,Chang:2013iba,Larkoski:2016zzc,Larkoski:2015lea}. This has enabled realistic first principles calculations of physical observables on jets \cite{Marzani:2017mva,Frye:2016aiz,Frye:2016okc,Banfi:2016zlc,Banfi:2015pju,Stewart:2013faa,Becher:2012qa,Feige:2012vc,Dasgupta:2013ihk,Dasgupta:2015lxh,Larkoski:2015kga,Frye:2017yrw,Jouttenus:2013hs,Hoang:2017kmk}.
Experimental advances at the LHC have allowed the detailed substructure of a jet to be measured and exploited to determine its origin, giving rise to the field of jet substructure \cite{[{We are unable to do justice to the wealth of impressive experimental results using jet substructure. We refer the interested reader to \url{https://twiki.cern.ch/twiki/bin/view/AtlasPublic} and \url{http://cms-results.web.cern.ch/cms-results/public-results/publications/} for more examples}]results}. Of particular importance are multi-prong discriminants, which allow the identification of structures which are characteristic of hadronically decaying $W/Z/H$ bosons. Due to the complex environment of the LHC, these are used in conjunction with a grooming strategy, which removes low energy contamination. From a theoretical perspective, these techniques require understanding the structure of QCD jets at a much more differential level than previously considered.
\begin{figure*}[t]
\subfigure{\raisebox{0.75cm}{(a)}\hspace{0.2cm}\includegraphics[width=6.4cm]{figures/multi_differential}\label{fig:D2_ps}} \qquad \qquad \qquad
\subfigure{\raisebox{0.75cm}{(b)}\hspace{0.2cm}\includegraphics[width=6.2cm]{figures/Fig1.eps}\label{fig:qg_dist}}
\caption{(a) Contours of $D_2$ in the $(e_2,e_3)$ phase space. (b) The groomed $D_2$ distribution obtained through marginalization of the multi-differential cross section. Effective field theories describing the different regions are discussed in the text.
}
\label{fig:multi_marginalize}
\end{figure*}
Analytic calculations play a crucial role in the field of jet substructure, having transformed it from relying on simple observables based on heuristics, to sophisticated observables which are able to exploit increasingly subtle aspects of gauge theories \cite{Larkoski:2013eya,Larkoski:2015npa,Maltoni:2016ays,Moult:2016cvt}, leading to improved performance and novel search strategies. As a concrete example, analytic calculations of previous status quo observables led to the modified mass drop (mMDT) \cite{Dasgupta:2013ihk,Dasgupta:2013via} and soft drop groomers \cite{Larkoski:2014wba}, as well as the $D_2$ \cite{Larkoski:2014gra,Larkoski:2015kga} and $N_2$ \cite{Moult:2016cvt} discriminants, which are the current tools of choice. Continued progress in developing new techniques relies on the next generation of calculations for deeper understanding and guidance.
In this paper we present a field theory framework for the analytic calculation of groomed multi-prong observables, building on recent developments in multi-scale effective field theories (EFTs) \cite{Bauer:2011uc,Larkoski:2015zka,Larkoski:2015kga,Pietrulewicz:2016nwo,Chien:2015cka}. It allows for a systematically improvable perturbative calculation based on operator definitions and a resummation of logarithmically enhanced terms, which dominate the behavior of the observable in the region of interest, using the renormalization group.
While our framework applies quite generally, in this paper we consider the specific case of the groomed $D_2$ observable \cite{Larkoski:2014gra,Larkoski:2015kga}, which has been widely used at the LHC to identify hadronically decaying $W/Z/H$ bosons. We compute for the first time at next-to-leading logarithmic (NLL) and leading order (LO) accuracy fully realistic distributions at the LHC, allowing us to draw robust theoretical conclusions about both the perturbative and non-perturbative behavior of the observable.
\section{Jet Substructure Observables}
\label{sec:obs}
To identify hadronically decaying bosons based on their radiation patterns, we will use observables formed from the correlations of $p_T$ and boost invariant angle $R^2_{ij}=\phi_{ij}^2+\eta_{ij}^2$ \cite{Larkoski:2013eya,Moult:2016cvt}. These form the building blocks for observables of use at both ATLAS and CMS, and their simple structure facilitates analytic calculations and leads to well behaved perturbative corrections \cite{Larkoski:2015uaa}. Here we will focus on a specific example \cite{Larkoski:2014gra,Larkoski:2015kga}
\begin{align}\label{eq:D2_def}
D_2^{(\alpha)}= \frac{\ecf{3}{\alpha}}{(\ecf{2}{\alpha})^3}=\fd{1.3cm}{figures/D2_schematic.pdf}\,,
\end{align}
where the two-point and three-point correlation functions are defined as \cite{Larkoski:2013eya}
\begin{align}\label{eq:ppe2}
\ecf{2}{\alpha}&=\frac{1}{p_{TJ}^2}\sum_{i<j\in J} p_{Ti} p_{Tj}R_{ij}^\alpha\,, \\
\ecf{3}{\alpha}&=\frac{1}{p_{TJ}^3}\sum_{i<j<k\in J} p_{Ti} p_{Tj}p_{Tk}R_{ij}^\alpha R_{ik}^\alpha R_{jk}^\alpha\,.
\end{align}
In the remainder of the text, we will take $\alpha=2$, and drop the label to simplify notation. The $D_2$ observable measures the extent to which a jet has a two-prong substructure. For a two-prong jet, as is characteristic of a decaying $W/Z/H$, $D_2\ll1$, while for a more uniform jet, $D_2 \sim 1$, allowing them to be distinguished \cite{Larkoski:2014gra,Larkoski:2015kga}. This is illustrated in \fig{multi_marginalize}, which shows contours of constant $D_2$ in the $(e_2, e_3)$ phase space.
To reduce sensitivity to soft radiation, we will use the mMDT \cite{Dasgupta:2013ihk,Dasgupta:2013via} or soft drop \cite{Larkoski:2014wba} grooming algorithm. From a theoretical perspective, these algorithms have the advantage that they remove color connections to other jets \cite{Frye:2016okc,Frye:2016aiz}. After clustering a jet with the Cambridge/Aachen algorithm \cite{Dokshitzer:1997in,Wobisch:1998wt,Wobisch:2000dk}, the mMDT algorithm declusters the jet, and at each step compare branches $i$ and $j$, removing branches which fail the criteria
\begin{equation}
\frac{\min[p_{Ti},p_{Tj}]}{p_{Ti}+p_{Tj}}>z_{\text{cut}}\,,
\end{equation}
where $z_{\text{cut}}$ is a parameter typically taken to be around $10\%$. The soft drop grooming algorithm generalizes this to include angular dependence, but in this paper we will only consider mMDT grooming for simplicity.
\section{Theoretical Framework}
\label{sec:theory}
To analytically compute substructure observables such as $D_2$ in a systematically improvable manner we will use techniques from effective theory, in particular, the soft-collinear effective theory (SCET) \cite{Bauer:2000yr,Bauer:2001ct,Bauer:2001yt,Bauer:2002nz,Rothstein:2016bsq}, which provides operator and Lagrangian techniques for analyzing factorization.
\subsection{Multi-Differential Marginalization}
Substructure discriminants of current interest take the form of ratios of IRC safe observables, such as \eq{D2_def}. These discriminants can be viewed as identifying contours (more generally hypersurfaces) in the multi-dimensional space spanned by the observables. The cross section can then be obtained by marginalization (integration along a contour) of the multi-differential cross section \cite{Larkoski:2013paa,Larkoski:2015kga}
\begin{align}
\frac{d\sigma}{dD_2}= \int d \ecfnobeta{2} d \ecfnobeta{3}\, \delta \left ( D_2 -\frac{ \ecfnobeta{3} }{( \ecfnobeta{2})^3 } \right ) \frac{d\sigma}{d \ecfnobeta{2} d \ecfnobeta{3} }\,.
\end{align}
For the case of $D_2$, this is illustrated in \fig{multi_marginalize}, along with the final distributions for both quark and gluon jets at NLL+LO accuracy, as will be described in the text.
The multi-differential cross section can be computed \cite{Larkoski:2014tva,Procura:2014cba} efficiently by tiling the multi-dimensional phase space with EFTs, which can then be smoothly patched together. This approach is completely general, and reduces the problem to identifying the correct EFTs in all asymptotic regions of the phase space \cite{Bauer:2011uc,Larkoski:2015zka,Larkoski:2015kga,Pietrulewicz:2016nwo}. The extension to additional measurements or subjets is conceptually simple by iterating these constructions.
\subsection{Factorization with Grooming}
In the case that $ m_J^2 \ll z_{\text{cut}} p_{TJ}^2\ll p_{TJ}^2$, which is satisfied for situations of interest at the LHC, the grooming procedure removes all wide angle soft radiation, including non-global logarithms \cite{Dasgupta:2001sh}, and the observable is determined by collinear physics \cite{Frye:2016okc,Frye:2016aiz}. In this limit there is also a well defined notion of quark and gluon jets \cite{Frye:2016okc,Frye:2016aiz}. Due to collinear factorization, the normalized cross section depends only on the quark and gluon fractions
\begin{align}
\frac{d\sigma^{ \text{norm}}}{d\Dobsnobeta{2}}=\sum\limits_{k} \kappa_k \frac{d\sigma_k^{\text{norm}}}{d\Dobsnobeta{2}}\,,
\end{align}
where the $\kappa_k$ can be interpreted as the fraction of jets with flavor $k$ within the given mass cut. It can be extracted using fixed order codes \cite{Campbell:1999ah,Campbell:2010ff,Campbell:2011bn,Alwall:2014hca}. We will drop the ``norm" superscript for simplicity
The grooming procedure, which isolates the collinear radiation from the rest of the event, makes the jet behave similar to a boosted event shape (other than the color flow), with center of mass energy set by the jet mass, $m_J$. For a color singlet decay this is strictly true, and was exploited in \cite{Feige:2012vc}. In the present case we will see that this will lead to a number of remarkable features, in particular for the structure of non-perturbative corrections.
Although the dynamics of interest are purely collinear, energy hierarchies still exist for the radiation (modes) within the jet.
Due to these hierarchies imposed by the $D_2$ measurement large logarithms exist in the perturbative calculation of the $D_2$ observable, which must be resummed to all orders to calculate the shape of the distribution. When used for discrimination, a cut $D_2 \ll 1$ is applied, making an accurate description of the distribution deep in the resummation region necessary.
We will perform the resummation by factorizing the cross section in each effective theory into single scale functions, $F$. Logarithms in the cross section are then resummed by the renormalization group evolution of these different functions
$d\log F/d\log \mu =\gamma_{F}\,,$
where $\gamma_F$ is the anomalous dimension.
All functions have operator definitions that are valid to all orders using standard gauge invariant operators in SCET \cite{Bauer:2000yr,Bauer:2001ct}. These are either matching coefficients, which we denote $H$, jet functions, which describe energetic collinear dynamics, and are denoted $J$, or collinear-soft functions, which we denote $C_s$, capturing radiation with softer energies that couple eikonally to the energetic subjets.
The jet functions are defined as matrix elements of collinear fields
\begin{align}
J_{n}(\ecfnobeta{3})&=\text{tr}\langle 0|\frac{\bar{n}\!\!\!\slash}{2}\chi_{n} \delta(Q-\bar{\mathcal{P}})\delta(\vec{{\mathcal P}}_{\perp})\delta(\ecfnobeta{3}-\ecfop{3})\bar{\chi}_{n}|0\rangle\,,
\end{align}
where $\chi_{n}(x) = \left[W_n^\dagger(x)\, \xi_n(x) \right]$, with $W_n$ a lightlike Wilson line, is a gauge invariant quark field, and ${\mathcal P}$ is the momentum operator. Similar definitions exist for the gluon field. The collinear-soft functions are defined as matrix elements of Wilson line operators
\begin{align}\label{eq:cs_func}
\hspace{-1cm}C_{si}(\ecfnobeta{3})&=\text{tr}\langle 0|T\{Y_i\}\delta (\ecfnobeta{3}-\ecfop{3}) \Theta_{\text{SD}}\bar{T}\{Y_i\} |0\rangle\,.
\end{align}
Here $Y_i$ are products of Wilson lines along subjet, or the recoiling jet directions, $T$ and $\bar T$ denote time and anti-time ordering respectively, and $\ecfop{3}$ is an energy flow operator that implements the measurement function, while $\Theta_{\text{SD}}$ implements the soft drop constraints. These operators can be written in terms of the energy-momentum tensor of the effective theory \cite{Sveshnikov:1995vi,Korchemsky:1997sy,Lee:2006nr,Bauer:2008dt}.
As illustrated in \fig{multi_marginalize}, three distinct effective field theories are required to cover the entire phase space; an effective theory describing the region where no substructure is resolved within the jet (factorization $\text{I}$), and two effective theories describing when a two-prong substructure is resolved. The latter two are distinguished by the energy distribution of the subjets, namely if the subjets are both energetic, with $p_T\gg z_{\text{cut}} p_{TJ}$, which we will call $\text{II}_a$, or if one of the subjets has $p_T \sim z_{\text{cut}} p_{TJ}$, which we will call $\text{II}_b$.
The factorization $\text{I}$ is a generalization of that given in \cite{Larkoski:2015kga}. Here the substructure of the jet is not resolved by the measurement, and the grooming acts only on dynamical soft radiation
\begin{align}\label{eq:fac_pp_cshaze}
\frac{d^2\sigma_k^{\text{I}}}{d\ecfnobeta{2} d\ecfnobeta{3}}= J(\ecfnobeta{2}) C_{s}(\ecfnobeta{2}, \ecfnobeta{3}, z_{\text{cut}}) \,.
\end{align}
The factorization $\text{II}_a$ is a generalization of \cite{Larkoski:2015kga}
\begin{align}\label{eq:fac_pp_coll}
\hspace{-0.5cm}\frac{d^2\sigma_k^{\text{IIa}}}{dz\, d\ecfnobeta{2} d\ecfnobeta{3}}= H^c(z,\ecfnobeta{2}) J_1(\ecfnobeta{3}) J_2(\ecfnobeta{3}) C_s(\ecfnobeta{3}, z_{\text{cut}}) \,,
\end{align}
Here $H^c$ is a matching coefficient which describes the splitting into two energetic subjets, $J_1$ and $J_2$, and is derived from collinear $1\to 2$ splitting amplitudes. The functions $C_s$ describe collinear-soft radiation at different energy scales, and $z$ is the $p_T$-fraction of one of the subjets of the total $p_{TJ}$.
The most interesting factorization occurs when the subjet energy, $p_T\sim z_{\text{cut}} p_{TJ}$. In this case, we can identify a universal matching coefficient that describes the splitting into a low energy jet in the presence of a grooming algorithm, analogous to the standard soft current \cite{Catani:2000pi,Duhr:2013msa,Li:2013lsa}. At leading power, $e_2$ is set by the Born-level kinematics of the subjets, making the result independent of the particular observable measured, and a universal ingredient in the study of groomed multiprong observables.
We have
\begin{align}\label{eq:fac_pp_cs}
\frac{d^2\sigma_k^{\text{IIb}}}{dz\,d\ecfnobeta{2} d\ecfnobeta{3}}=H^{s}(z,\ecfnobeta{2},z_{\text{cut}} )C_s(\ecfnobeta{3}) J_{sc}(\ecfnobeta{3}) J(\ecfnobeta{3}) \,,
\end{align}
In this case, the hard matching coefficient depends on $z_{\text{cut}}$. For example, the two diagrams that must be considered in the matching for the non-Abelian channel are
\begin{align}\label{eq:hard_match}
\hspace{-0.3cm} H^{s}_{C_A}=\ \raisebox{0.4cm}{\fd{2.85cm}{figures/matching_soft_nonabelian_virtual.pdf}}\ +\ \raisebox{0.48cm}{\fd{3.15cm}{figures/matching_soft_nonabelian.pdf}}
\end{align}
The first is a standard one-loop diagram, while in the second an additional real emission is removed by the mMDT algorithm. The one-loop anomalous dimension is
\begin{align}
\gamma_{H^{s}} =&-\left(2C_F+C_A\right)\,\Gamma_{\text{cusp}}[\alpha_s]\log\frac{4\mu^2}{z\ecfnobeta{2}Q^2}-\frac{\alpha_s}{2\pi}\beta_0 \nonumber \\
&-\frac{\alpha_sC_F}{\pi}\,
\log\frac{z^2}{z_{\text{cut}}^2 }
+\frac{\alpha_s}{\pi}\left(
C_F-C_A
\right)\frac{\text{Cl}_2(\frac{\pi}{3})}{\pi} \,.
\end{align}
Here $\text{Cl}_2(\frac{\pi}{3})=\Im \text{Li}_2(e^{i\pi/3}) \approx 1.01494$ is the maximum value of the Clausen function. Polylogarithms of the sixth root of unity have appeared in a number of higher order calculations \cite{Broadhurst:1998rz,Kalmykov:2010xv,Bonciani:2015eua,Henn:2015sem}.
The leading logarithmic behavior is proportional to the cusp anomalous dimension, $\Gamma_{\text{cusp}}$ \cite{Korchemsky:1987wg}.
We have checked that summing over the different effective field theory contributions, and removing overlap \cite{Larkoski:2015kga,Pietrulewicz:2016nwo} our factorization correctly reproduces the small $D_2$ behavior by comparison with \texttt{EVENT2}~\cite{Catani:1996vz}.
A more detailed discussion can be found in \cite{usD2l}.
\subsection{Non-Perturbative Corrections}
\begin{figure}[t]
\subfigure{\raisebox{2.55cm}{(a)}\hspace{0.2cm}\includegraphics[width=6.2cm]{figures/np_scaling.pdf}}\\
\subfigure{\raisebox{4.75cm}{(b)}\hspace{0.2cm}\includegraphics[width=6.2cm]{figures/np_shift.pdf}}
\caption{(a) Fit for the non-perturbative parameter $\Omega_D$ in $e^+e^-$ collisions. (b) Effect of MPI and hadronization as modeled by Pythia for $pp\to Zj$, and comparison with the shape function prediction.
}
\label{fig:np_shift}
\end{figure}
Non-perturbative corrections play an important role in the description of QCD event shapes. Our operator based formulation allows non-perturbative corrections to be defined in terms of matrix elements whose scalings and symmetry properties can be studied \cite{Korchemsky:1999kt,Korchemsky:2000kp,Lee:2006fn,Hoang:2007vb,Mateu:2012nk}. In the present case, the dominant non-perturbative corrections due to hadronization arise from collinear-soft modes, and can be shown to have a leading-power scaling as
\begin{equation}\label{eq:npdef}
D_2^\text{NP}\sim \frac{\Omega_D}{z_{\text{cut}}^{3/2}m_J}\,.
\end{equation}
Here $\Omega_D$ is a non-perturbative scale, $\Omega_D\sim \Lambda_\text{QCD}$, and this expression is independent of the jet transverse momentum, $p_{TJ}$. The fact that the dominant non-perturbative effects arise from collinear-soft modes has two important consequences. First, the collinear-soft function, \eq{cs_func}, depends only on the color structure of the Wilson lines along the subjet and recoil directions, and its non-perturbative structure is therefore independent of the particular processes in which the jet was produced. Second, to leading order in the number of colors, the non-perturbative corrections are independent of whether a jet is gluon or quark initiated, since the dominant soft hadronization corrections arise from the zero rapidity region in the rest frame of a color-connected dipole, as was first experimentally established in \cite{Bartel:1983ii}.\footnote{This distribution of soft hadrons is manifest in the Lund string model \cite{Andersson:1983ia}, but applies to more general models of hadronization.} The outer dipoles will then have their contributions groomed away, but the interior will not. This is clearly seen from the color flow diagrams (here solid lines denote the color flows)
\begin{center}
\raisebox{-0.15cm}{\fd{2.65cm}{figures/gluon_colorflow.pdf}}\quad versus \quad \fd{2.65cm}{figures/quark_colorflow.pdf}\,,
\end{center}
Contributions from the underlying event (which we model using multiple parton interactions (MPI)) have an identical scaling, but are further suppressed by the effective area of the jet, namely $m_J^2/p_{TJ}^2\ll z_{\text{cut}}$, and can therefore be completely neglected. We will verify that these predictions are well reproduced in simulation.
\begin{figure*}[t]
\subfigure{\raisebox{0.75cm}{(a)}\hspace{0.2cm}\includegraphics[width=6.95cm]{figures/Fig2a.pdf} }
\qquad \qquad
\subfigure{\raisebox{0.75cm}{(b)}\hspace{0.2cm}\includegraphics[width=7.3cm]{figures/Fig2b.pdf} }
\caption{(a) Analytic predictions for the groomed $D_2$ distribution for boosted $Z$ and QCD jets. (b) Prediction for the QCD efficiency vs.~$Z$ boson efficiency curve, illustrating the importance of non-perturbative corrections.
}
\label{fig:Z}
\end{figure*}
Non-perturbative effects are implemented via a shape function $F(\epsilon)$, which is a parametrization of the non-perturbative matrix element, and are included via the convolution \cite{Korchemsky:1999kt,Korchemsky:2000kp}
\begin{align}
\frac{d\sigma_{\text{NP}}}{dD_2}=\int\limits_0^\infty d\epsilon\, F(\epsilon) \frac{d\sigma}{dD_2} \left( D_2 -\frac{\epsilon}{m_J z_{\text{cut}}^{3/2}} \right)\,.
\end{align}
We take the functional form
$F(\epsilon)=(4\epsilon/\Omega_D^2) e^{-2\epsilon/\Omega_D}\,,$ \cite{Stewart:2014nna}
where $\Omega_D$ is defined as in \eq{npdef}.
By our factorization, $\Omega_D$ is independent of the mechanism of jet production (up to possible flavor dependence, which we have argued is suppressed) and can therefore be estimated from \pythia{8.226} \cite{Sjostrand:2006za,Sjostrand:2014zea} in $e^+e^-$ collisions. As shown in \fig{np_shift}(a), we see that it is identical within errors for quarks and gluons and that it correctly reproduces the scaling with mass predicted by factorization. This implies that for the bulk of the $D_2$ distribution, only a single non-perturbative parameter is needed to describe groomed $D_2$ for all mass cuts and processes.
To verify this, in \fig{np_shift}(b) we test the effect of both MPI and hadronization for the $pp\to Zj$ process in \pythia{8.226}, and compare with the prediction of the shape function extracted from $e^+e^-$ collisions. We have chosen a smaller mass to enhance the non-perturbative effects. As predicted by factorization MPI is negligible, and the effect of hadronization is well reproduced by the shape function. We have verified this for a variety of other parameters and partonic channels, both using the perturbative factorization theorem prediction and the unhadronized \pythia{} distributions as input to the shape function, treating the parton shower with hadronization as data.
\section{Results for the LHC}
\label{sec:results}
Observables based on the energy correlation functions are being used in a wide range of applications at the LHC. Here we will illustrate our framework on a simple example, namely discriminating hadronically decaying $Z$-bosons from QCD. The extension to other processes is straightforward, using the factorization properties of the observable.
In \fig{Z}(a) we show analytic distributions for the $D_2$ observable for both a hadronically decaying $Z$ and QCD background at NLL matched to leading order fixed order computed using $1\to 3$ splitting functions. Quark and gluon fractions have been extracted after the application of a 80-100GeV mass cut. In \fig{Z}(b) we show an analytic signal efficiency vs.~background efficiency curve highlighting the difference between the perturbative and full results. While the non-perturbative effects have a moderate impact on the distribution, they have a non-trivial impact on the discrimination efficiency, particularly in the region of interest where $D_2$ is small, and therefore must be properly incorporated.
\section{Conclusions}
\label{sec:conc}
In this paper we have presented an effective field theory framework which allows for systematically improvable calculations for groomed multi-prong observables of current interest at the LHC. We demonstrated the power of our approach by computing for the first time the groomed $D_2$ observable at the LHC illustrating complete theoretical control over the perturbative and non-perturbative aspects of the observable. We hope that the next generation of theoretical understanding will drive the development of new jet substructure techniques to fully exploit the rich dataset of the LHC.
\section{Acknowledgements}
We thank Simone Marzani, Nhan Tran, Jesse Thaler, Phil Harris and Ben Nachman for helpful discussions and comments on the manuscript and Frank Tackmann for pictures of hadrons.
This work is supported by the U.S. Department of Energy (DOE) under cooperative research agreements DE-FG02-05ER-41360, and DE-SC0011090 and by the Office of High Energy Physics of the U.S. DOE under Contract No. DE-AC02-05CH11231, and the LDRD Program of LBNL, as well as support from DOE contract DE-AC52-06NA25396 and through the LANL/LDRD Program.
|
1,314,259,995,027 | arxiv | \section{Introduction}
During the past several decades, yield stress fluids found an increasing number of practical applications for several major industries (which include cosmetics, foods, oil field etc.) and they are encountered in the daily life in various forms such as hair gels, food pastes, cement, mud and more. Such materials can be loosely and simplistically defined as materials that do not flow unless a minimal stress (referred as 'yield stress') is applied onto them. Understanding and controlling the hydrodynamic stability during flows of yield stress fluids is important for practical applications which involve flows of such materials through conduits.
From a fundamental standpoint, yield stress materials continue triggering intensive debates and posing difficult challenges, of both theoretical and experimental nature. Undoubtedly, the best known and most cited debate is related to the very definition of such materials and the existence of a 'true yield stress', \cite{1985Barnes_tysam,1999Barnes_tysar}. It was argued in Refs. \cite{1985Barnes_tysam,1999Barnes_tysar} that the yield stress emerges as an artefact related to the inability of the rheometric equipment to properly identify a viscous flow regime in a range of negligibly small rates of deformation. Recent studies claim to have solved the "yield stress debate" by arguing that prior to yielding the viscosity of the material is infinite which demonstrates the existence of a true solid state,\cite{bonn_europhys, bonn_science}.
\subsection{Yielding of a \trademark{Carbopol} gel and its relevance to hydrodynamic studies}
The existence of a "true" yield stress and the nature of the transition from a solid to fluid behaviour may look at a first glance of little or no relevance to the hydrodynamic stability of the flow of a yield stress material. Indeed, whereas the yielding transition occurs at low values of the Reynolds number (typically $Re<1$) a loss of the hydrodynamic stability is typically observed at significantly larger $Re$ (typically $Re>1000$). On the other hand, the base flows usually considered in the linear analysis of the hydrodynamic stability of channel flows of yield stress fluids are characterized by a significant stratification of the velocity gradients: large values near the channel boundaries which are consistent with a yielded flow region and vanishing values near the center-line, which are consistent with a plug region. A recent experimental investigation of the laminar-turbulent transition in the pipe flow of a yield stress fluids demonstrates that the transition to turbulence occurs when the Reynolds stresses balance the yield stress of the fluid, that is when the solid plug is broken, \cite{bulent_crap}. These findings corroborate well with the idea that, in fact, the nature of the solid-fluid transition and the yielding scenario may play an important role in the stability problem. A rather new and certainly unexpected insight into the yielding transition has been brought by an experimental study on sedimentation of spherical particles in a physical gel (\trademark{Carbopol} $940$), \cite{sedimentation_andreas}. Quite unexpectedly, the fore-aft symmetry of the flow patterns around the falling object was broken (in spite of the smallness of the Reynolds number during these experiments) and even more surprisingly a negative wake has been observed (see Fig. $8$ in Ref. \cite{sedimentation_andreas}). A similar non symmetric flow pattern has been observed experimentally during flows of \trademark{Carbopol} gel past a cylinder, \cite{Tokpavi200935}.
A first message of the sedimentation study presented in \cite{sedimentation_andreas} was that the solid-fluid transition in the \trademark{Carbopol} gel might not be reversible upon increasing/decreasing stresses, unlike it has been traditionally assessed. A second important message was that the elastic effects responsible for the emergence of the experimentally observed negative wake might be consistent with an intermediate transition regime where solid material elements (unyielded) coexist with fluid (yielded) ones. A more systematic investigation of these features of the solid-fluid transition in a \trademark{Carbopol} gel has been presented
in Ref. \cite{Burghelea09}. Indeed, as suggested by the sedimentation experiments presented in \cite{sedimentation_andreas}, a hysteresis has been found in the solid-fluid transition of a \trademark{Carbopol} gel upon increasing/decreasing stresses (see Fig. 3 in Ref. \cite{Burghelea09}). A clear signature of the elasticity involved in the transition region in the form of a recoil effect (negative shear rates) on the decreasing stress branch of the flow curve has been observed as well.
The hysteresis effect observed in the increasing/decreasing stress flow curves and reported in \cite{Burghelea09} has also been observed experimentally by Divoux et al., \cite{manneville}, and attributed to critical slowing down of the system dynamics close to yielding (and not to any memory effect).
\subsection{Linear stability analysis of a yield stress fluid: a brief survey}
The first study on linear stability analysis of a Bingham viscoplatic fluid is due to Frigaard et al. \cite{Frig94}. These authors concentrated on the asymptotic stability of two-dimensional traveling wave perturbations and found that the critical Reynolds number increases linearly with Bingham number. They also showed that the plug region remains unaffected by the disturbance field. In this work the authors imposed even symmetry at the yield surface and the results are incomplete. Some years later Nouar et. al. studied again this problem using the exact boundary conditions at the yield surface and they found that the plane-Bingham-Poiseuille flow is linearly stable. This being a consequence of the vanishing perturbation at the yield surface. They also conjectured that this result could be extended to Poiseuille flow of a viscoplastic fluid in a circular or annular pipe and to other rheological models such as the Herschel-Bulkley and Casson models.
As mentioned before, recent studies have found that the solid-fluid transition in some yield stress materials, such as \trademark{Carbopol}, is not reversible under increasing/decreasing stresses, thus the objective of this work is the linear stability analysis of an elastoviscoplastic fluid.
In this work we consider a modified version of the structural model developed by Putz and Burghelea in \cite{Burghelea09}. This models consists of a non-linear Maxwell-type viscoelastic constitutive equation and a kinematic equation that governs the behaviour of the microstructure. The relaxation time of the fluid depends on the structural variable which is equal to one if the fluid is unyielded and zero if the fluid is fully yielded. We should note that the transition from viscoelastic solid to fluid is continuous and smooth. For the non-linear viscosity we consider a regularized viscoplastic model, its behaviour is the one of a pseudo-plastic fluid, that is, if the second invariant of the stress tensor is below the yield stress the fluid presents a very high viscosity.
Several authors have studied the linear stability problem of a flow with variations in viscosity across the channel \cite{Chikkadi05,Nouar07,Pinarbasi01}. Govindarajan et. al. \cite{Govindarajan01} showed that by having a viscosity which is decreasing function of space it is sufficient to considerably delay the onset of two-dimensional instabilities. This result was latter confirmed by Nouar and Bottaro for the case of shear-thinning fluids \cite{Nouar07}. Linear stability analysis for a regularized Bingham model were carried out by Frigaard and Nouar in \cite{Frig05}. They showed that the spectrum for the regularized problem had some physical spurious eigenvalues, not present in the Bingham problem. The stability of these eigenvalues depended on the size of the regularization parameter and unstable modes can be found even for moderate Reynolds numbers if this parameter is small enough.
For the case of Maxwell liquids, Gorodstov and Leonov \cite{Gorod67} showed the existence of a stable continuous spectrum and two stable eigenvalues for each value of the wave number in the streamwise direction, $\alpha$. In the same work they predicted instabilities at finite Reynolds numbers. Later, this result was proven wrong by several authors, e.g.~ \cite{Denn77,Lee86,Renardy86}. Through careful numerical simulation of the generalised eigenvalue problem no unstable modes were found, even for high Reynolds numbers. Denn and coworkers \cite{Denn77,Denn72,Denn73} also studied the stability of plane Poiseuille flow and found that at high Reynolds numbers UCM fluids are significantly less stable than Newtonian fluids. Sureshkumar \& Beris \cite{Suresh95} confirmed this and also showed that destabilisation is reduced for the Oldroyd-B and Chilcott-Rallison models. Renardy \cite{Rena92} has proven the linear stability of Couette flow of a UCM fluid, but there is no proof of linear stability of Couette flows for more general fluids of this type. However, it is generally believed that plane Couette flow is linearly stable. Indeed much of the literature is focused at the study of interesting features of the eigenspectra, rather than marginal stability, e.g.~\cite{Wils99,Kupf05}.
\section{Elastoviscoplastic model with internal microstructure}\label{sec_pmm}
The elastovisplastic fluid is described by the following set of equations with unknowns $(p,{\boldsymbol{u}},\bm{\tau},\Phi)$:
\begin{eqnarray}
\rho\left(\frac{\partial \bm{u}}{\partial t}+(\bm{u}\cdot{\bm \nabla})\bm{u}\right)&=&-\nabla p+\mu_s {\bm \nabla} \cdot \bm{\dot{\gamma}}+ {\bm \nabla} \cdot\bm{\tau} \label{eqnmotiondim}\\
{\bm \nabla} \cdot\bm{u}&=& 0 \label{massconsdim},
\end{eqnarray}
where $\mu_s$ denotes the solvent viscosity. The rate of strain tensor is $\bm{\dot{\gamma}} = \bm{\dot{\gamma}}({\bm u})$.
The constitutive equation for the elastic stress tensor $\bm{\tau}$ is:
\begin{equation}
\bm{\tau}+\frac{\mu\left(\dot{\gamma}(\bm{u})\right)}{G} \Phi \stackrel{\triangledown}{\bm{\tau}} = \mu\left(\dot{\gamma}(\bm{u})\right) \bm{\dot{\gamma}}, \label{constdim}
\end{equation}
where
\[
\stackrel{\triangledown}{\cdot}=\frac{D \cdot}{D t}-\bm \nabla\bm{u}\cdot-\cdot \bm \nabla\bm{u}^T
\]
is the Upper-Convected-Derivative and $D\cdot /Dt$ is the usual material derivative.
Finally, the concentration of the solid state, $\Phi$, satisfies the following kinematic equation:
\begin{equation}
\frac{\partial \Phi}{\partial t}+(\bm{u}\cdot{\bm \nabla})\Phi=R_d(\Phi,\tau(\bm{u}))+R_r(\Phi,\tau(\bm{u})),\label{phieqndim}
\end{equation}
with $R_d$ being the rate of destruction of solid units and $R_r$ is the rate of fluid recombination of fluid elements into a gelled structure.
Note that when $\Phi\equiv 1$ the model describes a Maxwell type viscoelastic fluid with a nonlinear viscosity $\mu(\dot{\gamma})$ and if $\Phi\equiv 0$ the model represents a Generalized Newtonian fluid.
In both (\ref{constdim}) and (\ref{phieqndim}), $\dot{\gamma}$ and $\tau$ are the second invariant of the rate of strain and elastic stress tensors respectively. These are defined as follow:
\[
\dot{\gamma}(\bm{u})=\left(\frac{1}{2}\sum_{i,j=1}^3[\dot{\gamma}_{ij}(\bm{u})]^2\right)^{1/2},~~~~~~\tau(\bm{u})=\left(\frac{1}{2}\sum_{i,j=1}^3[\tau_{ij}(\bm{u})]^2\right)^{1/2}.
\]
\subsection{Choice of functions for nonlinear viscosity $\mu\left(\dot{\gamma}\right)$, destruction rate $R_d(\Phi,\tau(\bm{u}))$ and recombination rate $R_r(\Phi,\tau(\bm{u}))$.}
In this section we present our choices for the nonlinear viscosity $\mu(\dot{\gamma})$ and the destruction and recombination rate functions. The choices are clearly non unique. Constitutive models for fluids with a yield stress trace back to the early 1900's . The most widely used are the ones due to Bingham \cite{Bingham}, Herschel-Bulkley \cite{H-B} and Casson \cite{Casson}. Description of the rates of destruction and recombination is rather empirical and for this we follow closely \cite{Burghelea09}.
\subsubsection{Non-linear viscosity $\mu(\dot{\gamma})$}
The fluids we are interested in present a yield stress. It is well known that an ``ideal'' viscoplastic fluid behaves as a Newtonian (or Generalized Newtonian) fluid if $\tau$ is above the yield stress, if the contrary happens then the material behaves as a plastic solid and one says that the fluid is unyielded. In \cite{Burghelea09} it has been shown that the transition from an elastic solid behavior (unyielded) to a fluid behavior (yielded) is not direct but mediated by a viscoelastic like regime where solid and fluid behavior coexist. This effect, though previously unnoticed, is particularly pronounced in the case of "fast" yielding, i.e when the externally applied stress changes unsteadily and becomes vanishingly small in the limit of steady yielding, in other words, when the stresses vary infinitesimally slow. Although it has been argued that if one waits ``long enough'' the viscosity in the unyielded region actually tends to infinity, see \cite{bonn_europhys}, in an experimental (to investigate hydrodynamic stability for example) or industrial setting it is unlikely one will reach those time frames. Therefore for the rest of this study we will work with a regularized version of a viscoplastic constitutive model. Because our main interest is on linear stability analysis and this involves high Reynolds numbers it is better if we have a model with non-zero infinite shear viscosity, for this reason we choose the shear-thinning regularized Casson's model for the viscosity:
\begin{equation}
\mu(\dot{\gamma})=\left(\mu_{\infty}^{1/m}+\left(\frac{\tau_y}{\sqrt{\dot{\gamma}^2+\epsilon^2}}\right)^{1/m}\right)^m,\label{Cassonvisc}
\end{equation}
where $m\geq 1$ is the power law index and $\tau_y$ is the yield stress and $\epsilon\ll 1$ is the regularization parameter . We recover Casson's model when $m=2$, Bingham model when $m=1$ and the model reduces to a Newtonian viscosity when $m=1$ and $\tau_y=0$. In Figure \ref{fig:viscfandg}a show an example of (\ref{Cassonvisc}), note that if $\tau<\tau_y$ the viscosity becomes very large but never infinity as long as $\epsilon$ is fixed and different from zero, thus we will be working with a pseudo-plastic viscosity (shear-thinnig).
\subsubsection{$R_d(\Phi,\tau(\bm{u}))$ and $R_r(\Phi,\tau(\bm{u}))$}
As discussed by Putz and Burghelea in \cite{Burghelea09} $R_d$ and $R_r$ have to satisfy the following assumptions:
\begin{itemize}
\item[1.] $R_d(\Phi,\tau(\bm{u}))$ is proportional to the relative speed of neighboring solid units and the existing amount of solid, thus
\begin{equation}
R_d(\Phi,\tau(\bm{u}))=-g(\tau(\bm{u}))\Phi,
\end{equation}
and
\begin{equation}
g(\tau(\bm{u}))=K_d\left(1+\tanh\left(\frac{\tau(\bm{u})-\tau_y}{w}\right)\right),\label{geqn}
\end{equation}
where $g(\tau)$ is an increasing function of the second invariant of the stress tensor, $K_d$ is the rate of destruction with units $s^{-1}$ and $w$ determines the ``width'' of the solid-fluid region and has units Pa.
\item[2.] $R_r(\Phi,\tau(\bm{u}))$ decreases with the relative speed of neighboring elements, begin practically zero in a fast enough flow. Therefore,
\begin{equation}
R_r(\Phi,\tau(\bm{u}))=f(\tau(\bm{u}))(1-\Phi),
\end{equation}
and
\begin{equation}
f(\tau(\bm{u}))=K_r\left(1-\tanh\left(\frac{\tau(\bm{u})-\tau_y}{w}\right)\right),\label{feqn}
\end{equation}
\end{itemize}
where $f(\tau)$ is a decreasing function of the second invariant of the stress tensor. In Figure \ref{fig:viscfandg}b we show an example of (\ref{geqn}) and (\ref{feqn}). Note that when $\tau$ is far below the yield stress, the material is fully ``unyielded'' which means it behaves as a viscoelastic solid. This because when $\tau<\tau_y$, $\mu(\dot{\gamma})$ is of the order $O(1/\epsilon)$ and $\Phi\equiv 1$, thus by a simple dominant balance analysis equation (\ref{constdim}) reduces to the one for a Kelvin-Voight viscoelastic solid. If the opposite happens ($\tau$ far above the yield stress) $\Phi\equiv 0$ and the fluid behaves as a shear-thinning fluid. It is important to note that in a neighborhood around $\tau_y$ the material is neither a viscoelastic solid nor a viscous fluid, both solid and fluid structures coexist and this region will play a crucial role int the stability of the flow. The existence of this region is the main difference with respect to other proposed models for this type of elastoviscoplastic fluids. The main difference of the approach used here and previous work, see \cite{Moller06}, is that we do not use the steady states of \eqref{phieqndim} but solve it numerically and thus describe a non-steady yielding behavior which we believe might be of some practical relevance
\begin{figure}
\begin{center}
(a)\scalebox{0.4}{\includegraphics{viscosityfunc.eps}}
(b)\scalebox{0.4}{\includegraphics{figfandg.eps}}
\end{center}
\caption{(a) Viscosity function $\mu(\dot{\gamma})$ with $m=3$, $\mu_{\infty}=0.0102$, $\tau_y=6.5$ and $\epsilon=0.01$. (b) $f(\tau)$ and $g(\tau)$ with $K_d=K_r=0.3$ and $w=0.1$.}
\label{fig:viscfandg}
\end{figure}
\subsection{Validation of the model against experimental results}\label{sec_expcomparison}
To validate the model, we use the rheological measurements presented in \cite{Burghelea09} conducted for various \trademark{Carbopol} solutions with concentrations (by weight) ranging in between $0.05 \%$ and $0.2 \%$.
The measurements were conducted on a Bohlin (now Malvern) $C-VOR$ rotational rheometer equipped with a
digitally controlled temperature (within $0.1 ^\circ C $ ) bath
($RTE-111$ from Neslab). The rheometer was operated in a controlled stress mode. To insure a good reproducibility of the measurements,
prior to each experiment the sample has been pre-sheared at
a constant stress (usually the largest stress applied during the
test, so that yielding occurs in the entire volume of the material) for $300~s$. After the pre-shear step, the samples were allowed to relax for $300~s$ prior to the rheological measurements.
A first major concern during these experiments was the occurrence of the wall slip phenomenon at the
contact with the measuring geometry, which is a well known and documented effect for \trademark{Carbopol} gels, \cite{piau}. To prevent this, a serrated parallel plates geometry with an
approximate roughness of $0.8~mm$ has been used. A second concern was related to
the possible artefacts introduced by fluid evaporation during long
experimental runs. To prevent this, a solvent trap has
been placed around the free fluid meniscus. After each
experimental run we have carefully checked (by visual inspection)
that no significant changes in the shape of the meniscus occurred,
and thus concluded that evaporation effects were either minimal or
absent. The radius of the plates was $R=2~cm$ and the distance
between them was $d=1~mm$ (measured between the plates
protuberances). Two types of rheological tests have been
performed: controlled stress linear ramps and controlled stress
oscillatory sweeps at a fixed frequency. For each sample we have
conducted controlled stress experiments for $19$ different values
of the total ramp time. Each constant stress rheological
experiment started with a increasing stress ramp and ended with
a decreasing stress ramp within the same range of stresses and the
same stress step. The data averaging time per stress value, $t_0$ (referred in \cite{Burghelea09} as the \textit{characteristic time of
forcing}), has been varied between $0.2~s$ and $2~s$. For each
up-down stress ramps $1000$ stress values have been explored
ranging in between $0.1~Pa$ and $20~Pa$.
We emphasize here that a true steady state of deformation can only be inferred in the asymptotic
limit $t_0 \rightarrow \infty$, unlike in previous studies concerning \trademark{Carbopol} gels where a steady state was a priori set by deliberately choosing large values of $t_0$ (an accurate description of such procedure is presented, for example, in Ref. \cite{coussotjnfm2008}).
Alternatively, the time dependent response of the samples has been tested via stress
controlled oscillatory experiments at several frequencies and in
the same range of stresses.
We now turn our attention to the comparison of the predictions of the model described in Sec. \ref{sec_pmm} with the experimental results presented in Ref. \cite{Burghelea09}.
Bearing in mind that the experiments provide a scalar data set, the equations (\ref{eqnmotiondim})-(\ref{phieqndim}) reduce to:
\begin{eqnarray}
&&\frac{d\Phi}{dt}=-g(\tau)\Phi+f(\tau)(1-\Phi)\label{eqn:phiexperiment}\\
&&\frac{\mu(\dot{\gamma})}{G}\Phi\frac{d\tau}{dt}+\tau=\mu(\dot{\gamma})\dot{\gamma}\label{eqn:stressexperiment}
\end{eqnarray}
Because the rheometer operates in a controlled stress mode, the above equations decouple. First we solve for (\ref{eqn:phiexperiment}) using the function \emph{ode15s} provided with the \trademark{MATLAB} distribution. The function \emph{ode15s} performs well with stiff problems and its accuracy is recognized as medium to high. Equation (\ref{eqn:stressexperiment}) reduces to an algebraic equation for $\dot{\gamma}$ which we solve using the function \emph{fzero} provided with the \trademark{MATLAB} distribution as well. The function \emph{fzero} uses a combination of bisection, secant, and inverse quadratic interpolation methods.
In Figure \ref{fig:phi}a we present the steady state solution of equation (\ref{eqn:phiexperiment}) which reads
\begin{equation}
\Phi_s=\frac{f}{f+g}.
\end{equation}
In Figure \ref{fig:phi}b we present the solution of (\ref{eqn:phiexperiment}) using the same stress ramp as in the experiments presented in Ref. \cite{Burghelea09}. Clearly, for intermediate values of $\tau$, the decreasing stress branch of the solid concentration lags behind the increasing branch and a hysteresis is clearly visible. Having now both $\tau$ and $\Phi$ we can calculate $\dot{\gamma}$ from (\ref{eqn:stressexperiment}) using (\ref{Cassonvisc}) for $\mu(\dot{\gamma})$. The values for $m$, $\mu_{\infty}$ and $\epsilon$ are obtained from the fit performed in the rheometer. We obtain the elastic modulus $G$ from a linear fit of the experimentally measured dependence $\tau=G\gamma$ using the experimental data. The parameters $K_d$, $K_r$ and $w$ were chosen such that the numerical flow curve is in close agreement with the experimental flow curve. In Figure \ref{fig:experiment}a we compare the model discussed above with the experimental data. An excellent agreement is obtained, particularly at the point where $\dot{\gamma}=0$.
Finally, we test the model against to oscillatory flow measurements where a harmonic forcing $\tau=\tau_0\sin(2\pi\kappa t)$ and the strain response $\gamma=\gamma(t)$ is monitored. This step is needed for a proper validation of the model, as the model involves three adjustable parameters ($K_d$, $K_r$ and $w$). Note that $\dot{\gamma}=d\gamma/dt$. As before, having $\tau$ we solve for $\Phi$ using (\ref{eqn:phiexperiment}), then $\dot{\gamma}$ is found using (\ref{eqn:stressexperiment}), and finally we can use standard numerical integration to find $\gamma$. The results are presented in Figure \ref{fig:experiment}b.
One can conclude that using the values of the adjustable parameters that have validated the flow curves presented in Fig. \ref{fig:experiment}a a good agreement with the oscillatory measurements is obtained without any additional parametric adjustments, Fig. \ref{fig:experiment}b. We consider this result as a validation of the model initially proposed in \cite{Burghelea09} and revisited in Sec. \ref{sec_pmm} which further justifies employing it in the linear stability analysis presented through the rest of the paper.
\begin{figure}
\begin{center}
(a)\scalebox{0.4}{\includegraphics{figphisteady.eps}}
(b)\scalebox{0.4}{\includegraphics{figphitrans.eps}}
\end{center}
\caption{(a) Steady $\Phi$. (b) Transient $\Phi$. Both with $\tau_y=6.5$Pa $K_d=K_r=0.3$ and $w=0.1$.}
\label{fig:phi}
\end{figure}
\begin{figure}
\begin{center}
(a)\scalebox{0.4}{\includegraphics{figflowcurve.eps}}
(b)\scalebox{0.4}{\includegraphics{figoscillatoryexp.eps}}
\end{center}
\caption{(a) Flow curve. (b) Strain time series. In bot plots we have $m=3$, $\mu_{\infty}=0.0102$, $\epsilon=0.01$, $\tau_y=6.5$, $K_d=K_r=0.3$ and $w=0.1$.}
\label{fig:experiment}
\end{figure}
\subsection{Non-dimensionalization}
To conclude this section we present the non-dimensional set of equations. Let
\[
{\boldsymbol{x}}=\hat{{\boldsymbol{x}}}L,~~~~{\boldsymbol{u}}=\hat{{\boldsymbol{u}}}U_{\max}, ~~~~t=\hat{t}L/U_{\max},~~~~ p=\hat{p}\rho U_{\max}^2, \bm{\tau}=\hat{\bm{\tau}}\mu U_{\max}/L
\]
where $\mu=\mu_s+\mu_{\infty}$ is the total viscosity of the completely yielded fluid. Substituting all these into (\ref{eqnmotiondim})-(\ref{phieqndim}) we get:
\begin{eqnarray}
\frac{\partial \hat{{\boldsymbol{u}}}}{\partial \hat{t}}+(\hat{{\boldsymbol{u}}}\cdot{\bm \nabla})\hat{{\boldsymbol{u}}}&=&-\nabla \hat{p}+\frac{\mu_r}{Re} {\bm \nabla} \cdot \bm{\hat{\bm{\dot{\gamma}}}}+\frac{1}{Re}{\bm \nabla} \cdot\hat{\bm{\tau}} \label{eqnmotion}\\
{\bm \nabla} \cdot\hat{{\boldsymbol{u}}}&=& 0 \label{masscons},\\
\hat{\bm{\tau}}+We\hat{\mu}\left(\dot{\gamma}\right)\Phi \stackrel{\triangledown}{\hat{\bm{\tau}}} &=& \hat{\mu}\left(\dot{\gamma}\right) \hat{\bm{\dot{\gamma}}}, \label{const}\\
\frac{\partial \Phi}{\partial \hat{t}}+(\hat{{\boldsymbol{u}}}\cdot{\bm \nabla})\Phi&=&-\hat{g}(\hat{\tau})\Phi+\hat{f}(\hat{\tau})(1-\Phi).\label{phieqn}
\end{eqnarray}
where $\mu_r$ is the ratio of the solvent viscosity to the total viscosity, i.e. $\mu_s/\mu$. Finally:
\begin{eqnarray}
\hat{\mu}(\hat{\dot{\gamma}})&=&\left((1-\mu_r)^{1/m}+\left(\frac{B}{\sqrt{\hat{\dot{\gamma}}^2+\varepsilon^2}}\right)^{1/m}\right)^m,\label{Cassonviscndim}\\
\hat{g}(\hat{\tau})&=&\hat{K_d}\left(1+\tanh\left(\frac{\hat{\tau}-B}{\hat{w}}\right)\right),\label{geqnndim}\\
\hat{f}(\hat{\tau})&=&\hat{K_r}\left(1-\tanh\left(\frac{\hat{\tau}-B}{\hat{w}}\right)\right),\label{feqnndim}
\end{eqnarray}
where
\[
K_d=\hat{K_d}L/U_{\max},~~~~K_r=\hat{K_r}L/U_{\max},~~~~w=\hat{w}\mu U_{\max}/L.
\]
The Reynolds ($Re$), Weissenberg ($We$) and Bingham ($B$) numbers are defined:
\[
Re=\frac{\rho L U_{\max}}{\mu},~~~~We=\frac{\lambda_H U_{\max}}{L},~~~~B=\frac{\tau_y L}{U_{\max}\mu}
\]
with the relaxation time $\lambda_H=\mu_{\infty}/G$.
Through the rest of the paper only non-dimensional variables will be used and the ``hat'' shall be dropped for simplicity.
\section{Linear stability analysis for shear-thinning regularized Casson's Model}
First we consider the linear stability of the regularized Casson model. We note that our main goal is to investigate the effect the solid and solid-fluid regions have on the stability of the flow. It is known that a viscoplastic fluid is stable to all infinitesimal perturbations (at least no instabilities have been found numerically) see \cite{Nouar07b}, therefore a natural second step towards our goal is to ask ourselves how the presence of a highly stratified viscosity affects the stability of the flow, thus we consider the case $\Phi\equiv 0$. Linear stability analysis for a regularized Bingham model was carried out by Frigaard and Nouar in \cite{Frig05}. There they showed that as $\epsilon\rightarrow 0$ the velocity field using a regularized viscosity converges to the velocity field using the non-regularized viscoplastic viscosity. While considering the problem of linear stability they showed that the spectrum for the regularized problem had some physical spurious eigenvalues, not present in the Bingham problem. The stability of the eigenvalues depended on $\epsilon$, we will discuss further about this in Sec. \ref{subsec:validationmodOrr-Som}. Here we are not interested in the distinguished limit $\epsilon\rightarrow 0$. Based on experimental data we are aware of that typically $O(10^{-3})\leq \epsilon \leq O(10^{-2})$, therefore for the rest of the paper we fix $\epsilon=0.002$ unless otherwise stated.
For the rest of this section we set $\Phi=0$, $\mu_s=0$ and $\mu=\mu_{\infty}$, then the equation of motion reads
\begin{equation}\label{eq:regmomentum}
\frac{D{\boldsymbol{u}}}{Dt} = -{\nabla}{p} +\frac{1}{Re} {\nabla}\cdot\bm{\tau},
\end{equation}
along with the incompressibility condition ${\nabla}\cdot{\boldsymbol{u}}=0$.
The constitutive equation reduces to:
\[
\bm{\tau} = \mu(\dot{\gamma})\bm{\dot{\gamma}},
\]
with $\mu(\dot{\gamma})$ satisfying (\ref{Cassonviscndim}) with $\mu_r=0$.
\subsection{Baseflow profile}\label{baseflowreg}
We consider shear flows of the form ${\boldsymbol{U}}=(U(y),0,0)$ with $y\in[-1,1]$, then (\ref{eqnmotion}) reduces to:
\begin{equation}
\frac{d\tau_{xy}}{dy}=Re\frac{dp}{dx}\label{eqn:baseflowreg}
\end{equation}
with the usual non-slip boundary conditions $U(-1)=U(1)=0$ at the walls. It is clear that in this situation $\dot{\gamma}=|dU/dy|$ and because $U(y)$ reaches its maximum at the center of the channel we have: $\dot{\gamma}=-dU/dy$ if $y\in [0,1]$ and $\dot{\gamma}=dU/dy$ otherwise. Having this in mind and integrating once (\ref{eqn:baseflowreg}) we have the following algebraic equation for $\dot{\gamma}$:
\begin{equation}\label{eq:baseflowgamma}
|y|\frac{dp}{dx}Re + \mu(\dot{\gamma})\dot{\gamma} = 0.
\end{equation}
We solve this non-linear equation for $\dot{\gamma}$ using \emph{fzero} in MATLAB for each point of our discrete domain. Once we have the approximation for $\dot{\gamma}$ we integrate it with respect to $y$ and use our boundary conditions to get $U(y)$. We feed this result to an user-built function in MATLAB and use \emph{fzero} again to find $Re(dp/dx)$ such that $\max_y(U(y))=1$.
We present different examples of the base flow for different Bingham numbers in Figure \ref{fig:baseflowreg}, clearly as we increase B we can see the existence of a pseudo-plug region around the center line of the channel. Recall that this is not a true plug, i.e. the velocity there is not constant.
\begin{figure}
\begin{center}
\scalebox{0.5}{\includegraphics{figbaseflowregB.eps}}
\end{center}
\caption{Base flow profile for the regularized shear-thinning Casson model with $\epsilon=0.002$, $m=2.5$ and $B=0,~1,~2,~3,~4,~5$.}
\label{fig:baseflowreg}
\end{figure}
\subsection{Linearised momentum equation and tangent viscosity}
As is common in linear stability analysis we consider an infinitesimal perturbation $(\varepsilon{\boldsymbol{u}}',\varepsilon p')$ superimposed upon the base flow and linearize the momentum equation (\ref{eq:regmomentum}) around $({\boldsymbol{U}},p)$ to get:
\begin{equation}\label{eq:linearised-momentum-equation}
\ddy{{\boldsymbol{u}}'}{t}+({\boldsymbol{u}}'\cdot\bm{\nabla}){\boldsymbol{U}}+({\boldsymbol{U}}\cdot\bm{\nabla}){\boldsymbol{u}}' = -{\nabla}{p'} + {\bm {\nabla}}\cdot \bm{\tau}',
\end{equation}
where $\bm{\tau}^{\prime}$ is the stress perturbation given by:
\begin{equation}
\bm{\tau}^{\prime}= \mu({\boldsymbol{U}})\bm{\dot{\gamma}}({\boldsymbol{u}}')+\tilde{\mu}\bm{\dot{\gamma}}({\boldsymbol{U}}).\label{eqn:regstresspert}
\end{equation}
where $\tilde{\mu}$ is the viscosity perturbation and it is given by:
\begin{equation}
\tilde{\mu}=\dot{\gamma}_{ij}({\boldsymbol{u}}')\ddy{\mu}{\dot{\gamma}_{ij}}({\boldsymbol{U}}).
\end{equation}
Given the fact that the flow we consider is unidirectional, it can be shown that (see \cite{Nouar07}):
\begin{equation}
\tau_{ij} = \left\{
\begin{array}{lcl}
\mu({\boldsymbol{U}})\dot{\gamma}_{ij}({\boldsymbol{u}}') & \mathrm{for} & ij \neq xy,yx\\
\mu_{t}({\boldsymbol{U}})\dot{\gamma}_{ij}({\boldsymbol{u}}') & \mathrm{for} & ij = xy,yx,\\
\end{array}
\right.
\end{equation}
where
\begin{equation}\label{def:tangent-viscosity}
\mu_{t} = \mu({\boldsymbol{U}}) + \frac{d\mu}{d\dot{\gamma}_{xy}}({\boldsymbol{U}})\dot{\gamma}_{xy}({\boldsymbol{U}})
\end{equation}
is the tangent viscosity. For a one-dimensional shear flow, the tangent viscosity is defined by $\mu_{t}=d\tau_{xy}/d\dot{\gamma}_{xy}$ whereas the effective viscosity is defined as $\mu=\tau_{xy}/\dot{\gamma}_{xy}$. In Figure \ref{fig:muandmutreg} we show some examples of the non-linear viscosity $\mu(y)$ and $\mu_t(y)$ for increasing Bingham numbers, note that the functions are smooth, $\mu\geq\mu_t$ and that $\max_y\mu(y)\equiv\max_y\mu_t(y)\sim B/\epsilon$.
For a detailed description of the tangent viscosity concept we refer the reader to \cite{Nouar07}.
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figureviscreg.eps}}
(b)\scalebox{0.5}{\includegraphics{figuretanviscreg.eps}}
\end{center}
\caption{(a) Non-linear viscosity $\mu(y)$ and (b) tangent viscosity $\mu_t(y)$ with $\epsilon=0.002$, $m=2.5$ and $B=0,~1,~2,~3,~4,~5$.}
\label{fig:muandmutreg}
\end{figure}
\subsection{Modified Orr-Sommerfeld and Squire equations}
Writing equation (\ref{eq:linearised-momentum-equation}) in terms of the normal velocity $v'({\boldsymbol{x}},t)$ and the normal vorticity $\eta({\boldsymbol{x}},t)=\dy{u'}/\dy{z} - \dy{w'}/\dy{x}$ and assuming modal solutions of the form:
\begin{equation*}\begin{aligned}
&v'({\boldsymbol{x}},t) = \hat{v}(y)\exp[i(\alpha{x}+\beta{z}-\omega{t})],\\
&\eta({\boldsymbol{x}},t) = \hat{\eta}(y)\exp[i(\alpha{x}+\beta{z}-\omega{t})],
\end{aligned}\end{equation*}
we get the following eigenvalue problem for the frequency $\omega$:
\begin{equation}\label{eq:modified-os-sq}
\left(\begin{array}{cc}
\mathcal{L} & \mathcal{C}_{1}\\
\mathcal{C}_{2} & \mathcal{S}
\end{array}\right)
\left(\begin{array}{c}
v'\\
\eta
\end{array}\right) =
\omega \left(\begin{array}{c}
\tilde{\Delta}v'\\
\eta
\end{array}\right),
\end{equation}
where
\begin{eqnarray}
\mathcal{L} &=& \alpha[U\tilde{\Delta}-(D^{2}U)] + \frac{i}{Re}[\mu\tilde{\Delta}^{2}+2(D\mu)D^{3}+(D^{2}\mu)D^{2}-2k^{2}(D\mu)D+k^{2}(D^{2}\mu)]\nonumber\\
& +&\frac{i\alpha^{2}}{Re\ k^{2}}(D^{2}+k^{2})[(\mu_{t}-\mu)(D^{2}+k^{2})],\label{modOrr-Somop}\\
\mathcal{C}_{1}&=&-\frac{i\alpha\beta}{Re\ k^{2}}(D^{2}+k^{2})[(\mu_{t}-\mu)D],\\
\mathcal{C}_{2}&=&\beta(DU)-\frac{i\alpha\beta}{Re\ k^{2}}D[(\mu_{t}-\mu)(D^{2}+k^{2})],\\
\mathcal{S}&=&\alpha{U} +\frac{i}{Re}\mu\tilde{\Delta}+\frac{i}{Re}(D\mu)D + \frac{i}{Re}\frac{\beta^{2}}{k^{2}}D[(\mu_{t}-\mu)D],\label{modSquireop}
\end{eqnarray}
with $k^{2}=\alpha^{2}+\beta^{2}$, $D=d/dy$, $\tilde{\Delta}=D^{2}-k^{2}$. Together with the boundary conditions
\begin{equation}
v=Dv=\eta=0~~\mbox{at}~~ y=\pm1.\label{regbc}
\end{equation}
\subsection{Bounds for the Squire and Orr-Sommerfeld modes and one-dimensional stability}
In this section we present bounds for the eigenvalues of the modified Orr-Sommerfeld and the modified Squire operators, equations (\ref{modOrr-Somop}) and (\ref{modSquireop}). We follow closely the work by Joseph \cite{Joseph68} and Davies and Reid \cite{DavisReid77}. For the rest of the section instead of working with the frequency $\omega$ as the eigenvalue we consider the wave speed, $c=\omega / \alpha$ unless otherwise stated.
As in \cite{Joseph68} we define
\[I_{n}^{2}=\int^{1}_{-1}|v^{(n)}|^{2}\dd y\]
and take $v\in\overline{\mathcal{H}}$, where $\overline{\mathcal{H}}$ is a complex-valued Hilbert space completed under the norm $I_2^2$ by the addition of limit points of sequences of functions in $C^4([-1,1])$ satisfying (\ref{regbc}). The following isoperimetric inequalities hold in $\overline{\mathcal{H}}$:
\begin{lem}\label{thm:poincare}
Let $v\in\overline{\mathcal{H}}$, then
\begin{equation*}\begin{aligned}
I_{1}^{2}\geq\lambda_{1}^{2}I_{0}^{2},\ \ \ \ \ I_{2}^{2}\geq\lambda_{2}^{2}I_{1}^{2},\ \ \ \ \ I_{2}^{2}\geq\lambda_{3}^{2}I_{0}^{2}\\
\lambda_{1}^{2}=\frac{\pi^{2}}{4},\ \ \ \ \ \lambda_{2}^{2}=\pi^{2},\ \ \ \ \ \lambda_{3}^{2}=(2.365)^{4},
\end{aligned}\end{equation*}
where $\lambda_3$ is the smallest eigenvalue of a vibrating rod with displacement $v$ satisfying (\ref{regbc}).
\end{lem}
\subsubsection{Bounds for the Squire and Orr-Sommerfeld modes}
\begin{thm}\label{thm:nn-squire}
\emph{(Damped Squire modes)} Let $c(\alpha,\beta,Re)$ be any eigenvalue of the homogeneous modified Squire's equation
\begin{equation*}
\frac{1}{\alpha}\mathcal{S}\eta=c\ \eta, \ \ \ \ \ \ \eta(\pm1)=0.
\end{equation*}
Then
\begin{equation}\begin{aligned}
U_{min}<&c_{r}<U_{max}\\
&c_{i}<-p\frac{(\pi^{2}/4)+k^{2}}{\alpha Re}
\end{aligned}\end{equation}
where $p=\min\{\mu_{t}(y)|-1\leq{y}\leq1\}$.
\end{thm}
See \ref{App:Squire-proof} for the proof.
\begin{thm}\label{thm:nn-joseph}
Let $c(\alpha,Re)$ be any eigenvalue of the modified Orr-Sommerfeld equation
\begin{equation}\label{thm:nn-joseph-os}
\frac{1}{\alpha}\mathcal{L}v = c(D^{2}-\alpha^{2})v\ \ \ \ \ \ \ \ v(\pm1)=v^{\prime}(\pm1) = 0.
\end{equation}
Let $q=\max\{|U^{\prime}(y)| : -1\leq{y}\leq1\}$ and $p=\min\{\mu_{t}(y)|-1\leq{y}\leq1\}$, then we have the following results:
\begin{enumerate}
\item \begin{equation}\label{thm:nn-joseph1}
c_{i}\leq\frac{q}{2\alpha} - \frac{p}{\alpha Re}\left(\frac{\pi^{2}(\pi^{2}+\alpha^{2})}{\pi^{2}+4\alpha^{2}}+\alpha^{2}\right).
\end{equation}
\item No amplified disturbances (modes with $c_{i}>0$) of \eqref{thm:nn-joseph-os} exist if
\begin{equation}\begin{aligned}\label{thm:nn-joseph2}
q\alpha{Re} &< p\cdot{}f(\alpha) \equiv \max[M_{1}, M_{2}],\\
M_{1} &= \lambda_{3}\pi + 2^{3/2}\alpha^{3}\\
M_{2} &= \lambda_{3}\pi + \pi\alpha^{2},\\
\end{aligned}\end{equation}
where $\lambda_{3}=(2.356)^{2}$.
\item \begin{equation}\begin{aligned}\label{thm:nn-joseph3}
&U_{min}^{\prime\prime}\leq0: &U_{min}&<c_{r}<U_{max}+\frac{2U_{max}^{\prime\prime}}{\pi^{2}+4\alpha^{2}}\\
&U_{min}^{\prime\prime}\leq0\leq{}U_{max}: &U_{min}+\frac{2U_{min}^{\prime\prime}}{\pi^{2}+4\alpha^{2}}&<c_{r}<U_{max}+\frac{2U_{max}^{\prime\prime}}{\pi^{2}+4\alpha^{2}}\\
&U_{max}^{\prime\prime}\leq0: &U_{min}+\frac{2U_{min}^{\prime\prime}}{\pi^{2}+4\alpha^{2}}&<c_{r}<U_{max}.\\
\end{aligned}\end{equation}
\end{enumerate}
\end{thm}
See \ref{App:Orr-Somm-proof} for a proof.
We should note that these results are not restrictive to regularized viscoplastic fluids, they hold for any shear-thinning (we are using the fact that $\mu$ is a decreasing function of $\dot{\gamma}$) viscosity function.
\subsubsection{One-dimensional stability}
From Theorem \ref{thm:nn-squire} we have that the Squire modes are always damped, thus for the rest of this section we consider only the modified Orr-Sommerfeld equation and $\alpha=0$. If instead of working with the perturbation of the normal velocity $v$ we consider a stream function formulation, i.e. $u=\Psi_y$ and $v=-\Psi_x$, and taking a modal solution of the form $\Psi=\phi(y)\exp[i(\alpha{x}-\omega{t})]$, then we can interchange $v$ for $\phi$ in (\ref{eq:modified-os-sq}). Note that $\phi\in\overline{\mathcal{H}}$ and Theorem \ref{thm:nn-joseph} holds for this formulation. Therefore using (\ref{pf:nn-joseph4}) we have a bound for the imaginary part of the frequency $\omega$:
\begin{equation}\label{omegaregbound}
\omega_{i}<\frac{q\alpha I_{1}I_{0}}{I_{1}^{2}+\alpha^{2}I_{0}^{2}} -\frac{({Re})^{-1}p(I_{2}^{2}+2\alpha^{2}I_{1}^{2}+\alpha^{4}I_{0}^{2}))}{I_{1}^{2}+\alpha^{2}I_{0}^{2}}.
\end{equation}
Here we are interested in one-dimensional perturbations, i.e. $\alpha=0$. This is a special case in terms of boundary conditions. The fact that $v\equiv 0$ does not mean that $\phi=0$ at $y=\pm 1$. This boundary condition can be obtained if the perturbation to the flow rate $\int_{-1}^1u(y)\dd y$ is taken to be zero and a uniform pressure gradient in the $x$-direction is allowed, see \cite{Renardy86}. Now, even for the case $\alpha=0$ we have $\phi\in\overline{\mathcal{H}}$ and (\ref{omegaregbound}) becomes:
\begin{equation}\label{omegaregonedim}
\omega_{i}\leq - \frac{p\pi^2}{Re},
\end{equation}
where we used the isoperimetric inequalities defined in Lemma \ref{thm:poincare}.
Therefore, one-dimensional perturbations are always linearly stable. Note again that this result is valid for any decreasing function $\mu(\dot{\gamma})$.
\subsubsection{Solution of modified Orr-Sommerfeld equation and code validation}\label{subsec:validationmodOrr-Som}
There is no equivalent of Squire's theorem for generalized Newtonian fluids but some authors have performed several numerical tests for large range of axial and transverse wave numbers ($\alpha$ and $\beta$ respectively) for different non-linear viscosity functions $\mu (\dot{\gamma})$, see \cite{Nouar07,Nouar07b,Nouar09,Nouar10}. Their numerical results show that the lowest critical Reynolds number is obtained for spanwise homogeneous perturbations ($\beta=0$, $\alpha\neq0$). We should also note that in the situation of a homogeneous streamwise perturbation ($\alpha=0$, $\beta\neq0$), the imaginary part of $\omega$ satisfies:
\begin{equation}
\omega_i=-\frac{1}{Re}\frac{\int_{-1}^1\mu\left(4\beta^2|Dv|^2+|D^2v+\beta^2v|^2\right)\dd y}{\int_{-1}^1|Dv|^2+\beta^2|v|^2\dd y}<0,
\end{equation}
which comes from letting $\alpha=0$, multiplying by the complex conjugate of $v$, say $v^*$, integrating by parts and taking imaginary part in (\ref{eq:modified-os-sq}). Note that if $\alpha=0$ then $\mathcal{L}$ and $\mathcal{S}$ decouple, thus the flow is unconditionally stable. Using these facts we would only consider 2D perturbations in the streamwise direction for the rest of the section. We know from Theorem \ref{thm:nn-squire} that all the Squire modes are always damped, thus we are concerned only with the modified Orr-Sommerfeld equation:
\begin{equation}\label{Orr-Somreg}\begin{aligned}
\alpha c (D^2-\alpha^2)v =& \alpha[U(D^2-\alpha^2)-D^{2}U] v- \frac{4i\alpha^2}{Re}D(\mu D)v\\
& +\frac{i}{Re}(D^{2}+\alpha^{2})[\mu_{t}(D^{2}+\alpha^{2})]v,\\
\end{aligned}\end{equation}
Together with the boundary conditions
\begin{equation}
v=Dv=0~~\mbox{at}~~ y=\pm1.\label{regbcorr}
\end{equation}
We solve the eigenvalue problem (\ref{Orr-Somreg})-(\ref{regbcorr}) by discretizing using Chebyshev polynomials in the usual fashion, as described for example in \cite{Schmid01}. In order to validate our code, we benchmark by solving the Newtonian problem $m=1$ and $B=0$, i.e. $\mu\equiv\mu_t\equiv 1$ in (\ref{Orr-Somreg}) and compare with results obtained by Mack \cite{Mack76}. In Figure \ref{fig:validationnewt}a we present the first 33 modes calculated using our code with $N=150$ and the ones reported by Mack with the eigenvalues of the two computations overlaying one another. Figure \ref{fig:validationnewt}b shows the error norm between our calculations and Mack's for $N=50$ and $N=150$. As expected the least stable eigenvalue has converged but the split of the $S$-branch is present when $N$ is not large enough.
In order to stably compute the spectrum of (\ref{Orr-Somreg})-(\ref{regbcorr}) we need substantially more nodes than for the Newtonian problem. In Figure \ref{fig:validationreg} we present the calculations using $N=150,~200,~250$. The first thing to note is that even with $N=150$ the S-branch of the spectrum has not converged. Another important fact about the spectrum is the presence of an R-branch, which was first documented by Frigaard and Nouar in \cite{Frig05}. They found that this branch is physically spurious, meaning that these eigenvalues do not exist for the Orr-Sommerfeld operator corresponding to the viscoplastic model and their stability depends on $\epsilon$. As $\epsilon\rightarrow 0$ we can find unstable modes from this R-branch even for moderate Reynolds numbers. We have found that by choosing a smooth regularized non-linear viscosity (either the one due to Becovier et. al. or the one due to Papanastasiu) increases the range of $\epsilon$ for which the R-branch remains stable and the least stable eigenvalue belongs to the A-branch as expected. We have fixed $\epsilon=0.002$ for all our results and we have checked that the critical eigenvalue is indeed in the A-branch. For the rest of the section we fix the number of nodes to be $N=250$.
\begin{figure}
\begin{center}
(a)\scalebox{0.4}{\includegraphics{figurevalidationcode.eps}}\\
(b)\scalebox{0.4}{\includegraphics{figurevalidationerror.eps}}
\end{center}
\caption{(a) Spectrum for Newtonian case, present code with $N=150$ (o), Mack \cite{Mack76} ($+$). (b) Error norm of present code with $N=50$ (diamonds) and $N=150$ (squares) vs Mack \cite{Mack76}. All for $Re=10000$ and $\alpha=1$.}
\label{fig:validationnewt}
\end{figure}
\begin{figure}
\begin{center}
\scalebox{0.5}{\includegraphics{figurevalidationspecreg.eps}}
\end{center}
\caption{Spectrum for regularized shear-thinning Casson's model, $N=150$ (o), $N=200$ ($+$), and $N=250$($\Diamond$). For $Re=10000$ and $\alpha=1$. All calculations with $\epsilon=0.002$.}
\label{fig:validationreg}
\end{figure}
\subsection{Numerical results}
Stability results for the regularized Casson model are presented in the following section. First we introduce the ``\emph{plasticity number}'' defined as:
\[Pl=BRe=\frac{\tau_y\rho L^2}{\mu^2}.\]
Note that $Pl$ only depends on the rheological properties of the fluid and the geometry of the problem, thus as we increase $Re$ in our analysis $Pl$ will remained fixed. All the results that follow show the critical Reynolds number for the regularized model normalized with respect to $Re_{Newt}=5772.2$. In Figure \ref{fig:resultsregpl}a we fix $m=2.5$ and with the increase $Pl$ we enhance the stability. These results are not surprising, we are mimicking a viscoplastic fluid which is known, at least numerically, to be linearly stable \cite{Nouar07b} with a pseudo-plastic fluid with a very large zero-shear rate viscosity. The use of this type of non-linear viscosity has also been shown to enhance stability \cite{Nouar07} which is also represented in Figure \ref{fig:resultsregm}a where we fix $Pl=10000$ and vary the power-law index $m$. Figures \ref{fig:resultsregpl}b and \ref{fig:resultsregm}b show the corresponding critical wave number for increasing plasticity number and increasing power law index respectively. Note that these values remain in close proximity to $\alpha=1.02$ which is the critical wave number for the Newtonian case.
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figRevsRenewtreg.eps}}
(b)\scalebox{0.5}{\includegraphics{figalpharegpl.eps}}
\end{center}
\caption{(a) Normalized critical $Re$ for increasing $Pl$ and $m=2.5$. (b) Critical wave number . All calculations with $\epsilon=0.002$.}
\label{fig:resultsregpl}
\end{figure}
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figRevsRenewtregm.eps}}
(b)\scalebox{0.5}{\includegraphics{figalpharegm.eps}}
\end{center}
\caption{(a) Normalized critical $Re$ for varying power law index $m$ with $Pl=10000$. (b) Critical wave number. All calculations with $\epsilon=0.002$.}
\label{fig:resultsregm}
\end{figure}
\section{Linear stability analysis for Elastoviscoplastic model}
In this section we study the linear stability for the elastoviscoplatic model presented in equations (\ref{eqnmotion})-(\ref{phieqn}).
\subsection{Baseflow profile}
For the base flow we consider unidirectional shear flow of the form ${\boldsymbol{U}}=(U(y),0,0)$, thus equations (\ref{eqnmotion})-(\ref{phieqn}) reduce to:
\begin{eqnarray}
&&\frac{\dd}{\dd y}\left(\mu_r\frac{\dd U}{\dd y}\right)+\frac{\dd T_{xy}}{\dd y}=Re\frac{\dd p}{\dd x} \label{pmmbaseU}\\
&&T_{xx}=2We\mu(|U'|)\Phi U'T_{xy}\label{pmmbaseTxx}\\
&&T_{xy}=\mu(|U'|)U'\label{pmmbaseTxy}\\
&&T_{yy}=0\label{pmmbaseTyy}\\
&&\Phi=\frac{f(T)}{f(T)+g(T)}\label{pmmbasephi}
\end{eqnarray}
where $T=\sqrt{1/2\left(T_{xx}^2+2T_{xy}^2\right)}$. Substituting (\ref{pmmbaseTxy}) into (\ref{pmmbaseU}) we get:
\begin{equation}
\frac{\dd}{\dd y}\left((\mu_r+\mu(|U'|))\frac{\dd U}{\dd y}\right)=Re\frac{\dd p}{\dd x}\label{pmmbaseflowUfull}
\end{equation}
This equation is solved in exactly the same way as (\ref{eqn:baseflowreg}) in Section \ref{baseflowreg}. Once we have $\dot{\gamma}=|U'|$ from (\ref{pmmbaseflowUfull}) we solve the following nonlinear equation for $T_{xx}$:
\begin{equation}
(f(T)+g(T))T_{xx}-2We\left(\mu(|U'|)U'\right)^2f(T)=0\label{Txxnonlineqn}
\end{equation}
where we have used the definition of $T_{xy}$ in (\ref{pmmbaseTxy}). We now solve equation (\ref{Txxnonlineqn}) using \emph{fzero} in \trademark{MATLAB} for each point $y$ in our discrete domain. Finally we can construct $\Phi$ using (\ref{pmmbasephi}).
In Figure \ref{fig:baseflowpmm} we present the solution of equations (\ref{pmmbaseU})-(\ref{pmmbasephi}) for increasing Bingham numbers and fixed Weissenberg number $We=1$. As we can see the velocity profile presents the pseudo plastic behaviour which in this case, in addition of a intensely stratified viscosity, we have a viscoelastic plug, due to the presence of non-zero normal stresses inside this region. We should note the existence of a thin region ($O(w)$) where both solid (viscoelastic) and fluid phases coexist as it's clearly seen in the plot of $\Phi_s$.
\begin{figure}
\begin{center}
\scalebox{0.8}{\includegraphics{figurebaseflowpmm.eps}}
\end{center}
\caption{Base flow profile for the elastoviscoplastic model for increasing Bingham number $B$. From top left to bottom right: velocity, $U(y)$; shear stress $\tau_{xy}$; normal stress, $\tau_{xx}$ and steady state structural variable $\Phi_s$. Parameters for this calculation: $\epsilon=0.002$, $We=1$, $m=2.5$, $K_r=K_d=0.3$, $w=0.1$ and $\mu_r=0.1$.}
\label{fig:baseflowpmm}
\end{figure}
\subsection{Orr-Sommerfeld equation for Elastoviscoplastic model}
As before, we consider an infinitesimal perturbation $(\varepsilon{\boldsymbol{u}}', \varepsilon p', \varepsilon \bm{\tau}, \varepsilon \Phi')$ superimposed onto the base flow and the field equations are linearized around $({\boldsymbol{U}}, P, {\boldsymbol{T}}, \Phi)$, thus we have:
\begin{eqnarray}
&&\frac{\partial {\boldsymbol{u}}'}{\partial t}+({\boldsymbol{u}}'\cdot{\bm \nabla}){\boldsymbol{U}}+({\boldsymbol{U}}\cdot{\bm \nabla}){\boldsymbol{u}}'=-\nabla p'+\frac{\mu_r}{Re} {\bm \nabla} \cdot \bm{\dot{\gamma}}({\boldsymbol{u}}')+\frac{1}{Re} {\bm \nabla} \cdot\bm{\tau}' \label{eqnmotionpertpmm}\\
&&{\bm \nabla} \cdot{\boldsymbol{u}}'= 0 \label{conteqnpertpmm},\\
&&\bm{\tau}'+We(\Phi\mu(\stackrel{\triangledown_{{\boldsymbol{u}}'}}{{\boldsymbol{T}}}+\stackrel{\triangledown_{{\boldsymbol{U}}}}{\bm{\tau}'})+(\Phi\tilde{\mu}+\Phi'\mu)\stackrel{\triangledown_{{\boldsymbol{U}}}}{{\boldsymbol{T}}}) = \mu_t \bm{\dot{\gamma}}({\boldsymbol{u}}'), \label{constpertpmm}
\end{eqnarray}
\begin{eqnarray}
\frac{\partial \Phi}{\partial t}+({\boldsymbol{u}}'\cdot{\bm \nabla})\Phi+({\boldsymbol{U}}\cdot{\bm \nabla})\Phi'&=&-\left(\left(\frac{\partial g(T)}{\partial T_{xx}}+\frac{\partial f(T)}{\partial T_{xx}}\right)\tau'_{xx}+\left(\frac{\partial g(T)}{\partial T_{xy}}+\frac{\partial f(T)}{\partial T_{xy}}\right)\tau'_{xy}\right)\Phi\nonumber \\
&+&\left(\frac{\partial f(T)}{\partial T_{xx}}\tau'_{xx}+\frac{\partial f(T)}{\partial T_{xy}}\tau'_{xy}\right)-(g(T)+f(T))\Phi'.\label{phieqnpertpmm}
\end{eqnarray}
As there is no version of Squire Theorem for this type of models for simplicity we restrict ourselves to two-dimensional perturbations below. We introduce a stream function $\Psi$, such that: $u' = \Psi_y,~v'=-\Psi_x$. We consider modal linear disturbances of the form:
\begin{equation}
\Psi=\phi(y)\ee^{\ii(\alpha x-\omega t)},~~ \tau'_{ij}=\tau_{ij}(y)\ee^{\ii(\alpha x-\omega t)},~~ij = xx,~xy,~yx,~yy,~~\Phi'=\varphi(y)\ee^{\ii(\alpha x-\omega t)},.
\end{equation}
Denoting $D = \frac{\dd}{\dd y}$, the eigenvalue problem for $\omega$ is:
\begin{eqnarray}
\ii\omega (\alpha^2-D^2)\phi
&=&\frac{\mu_r}{Re}(\alpha^2 - D^2)^2 \phi + \ii\alpha [D^2U + U (\alpha^2-D^2)] \phi \nonumber\\
& &+\frac{1}{Re}[\ii\alpha D(\tau_{xx} - \tau_{yy}) + (D^2 + \alpha^2) \tau_{xy} ]\label{phieqnpmm} \\
\nonumber \\
\ii\omega We\Phi\mu \tau_{xx}
&=&
\left[1+\ii\alpha We\Phi\mu U \right] \tau_{xx} -2We\Phi\mu DU \tau_{xy}
-\ii\alpha We\Phi\mu DT_{xx} \phi -2We\Phi\mu T_{xy}D^2 \phi \nonumber \\
& &
-2\ii\alpha \left[We\Phi\mu T_{xx}+\mu\right]D\phi - 2\Phi\mu DU T_{xy}\varphi \label{tau1eqn}\\[0.5ex]
\ii\omega We\Phi\mu \tau_{xy}
&=&
\left[1+\ii\alpha We\Phi\mu U\right]\tau_{xy} - We\Phi\mu DU \tau_{yy} - \mu_tD^2\phi \nonumber \\
& &-\left[ \ii\alpha We\Phi\mu DT_{xy} + We\Phi\mu\alpha^2 T_{xx} + \alpha^2\mu_t\right] \phi \label{tau2eqn}\\[0.5ex]
\ii\omega We\Phi\mu \tau_{yy} &= & \left[1+\ii\alpha We\Phi\mu U\right]\tau_{yy} + 2\ii\alpha\mu D\phi-2We\Phi\mu\alpha^2T_{xy}\phi . \label{tau3eqn}\\
\ii\omega \varphi
&=&
\ii\alpha D\Phi\phi+\left[\ii\alpha U+g(T)+f(T)\right]\varphi + \left[(K_r-K_d)\Phi-K_r \right]F(y)\mbox{sign}(DU)\tau_{xy} \nonumber \\
& &
+ \left[(K_r-K_d)\Phi^2-K_r\Phi \right]F(y)We\mu |DU|\tau_{xx}, \label{varphieqn}
\end{eqnarray}
where
\begin{equation}
F(y)=\frac{1-\tanh^2\left(\frac{T-B}{w}\right)}{w\sqrt{2(We\mu DU\Phi)^2+1}}
\end{equation}
and with boundary conditions:
\begin{equation}
\phi = D\phi = 0,~\mbox{ at } y=\pm1. \label{bcspmm}
\end{equation}
As it is customarily done in linear stability analysis for viscoelastic flows we introduce the elasticity number defined as:
\[\epsilon\ell=\frac{We}{Re},\]
which is the ratio of kinematic viscosity and relaxational diffusivity: $\epsilon\ell$ depends only on the properties of the fluid and the flow geometry. For the rest of the paper we replace $We$ by $\epsilon\ell Re$.
\subsubsection{One-dimensional stability}
In this section we consider one-dimensional disturbances of (\ref{phieqnpmm})-(\ref{varphieqn}). In order for $\phi$ to satisfy boundary conditions (\ref{bcspmm}) we proceed in the same way as for the regularized viscoplastic fluid, by letting the perturbation to the flow rate $\int_{-1}^1u(y)\dd y$ to be zero and we impose a uniform pressure gradient in the $x$-direction. Letting $\alpha=0$ in (\ref{phieqnpmm})-(\ref{varphieqn}) we are left with the following eigenvalue problem:
\begin{eqnarray}
D^2\tau_{xy}&=&-\ii Re \omega D^2\phi -\mu_rD^4\phi, \label{pmmphieqnalp0}\\
\left(1-\ii \epsilon\ell Re \Phi \mu \omega \right)\tau_{xx}&=& 2\epsilon\ell Re\Phi\mu DU \tau_{xy}
+2\epsilon\ell Re\Phi\mu T_{xy}D^2 \phi+2\Phi\mu DU T_{xy}\varphi , \label{pmmtauxxeqnalp0}\\
\left(1-\ii\epsilon\ell Re \Phi \mu \omega \right)\tau_{xy}&=&\mu_tD^2\phi. \label{pmmtauxyeqnalp0}\\
\left(1-\ii\epsilon\ell Re \Phi \mu \omega \right)\tau_{yy}&=&0, \label{pmmtauyyeqnalp0}\\
\left(\ii\omega-g(T)-f(T) \right) \varphi&=& \left[(K_r-K_d)\Phi-K_r \right]F(y)\mbox{sign}(DU)\tau_{xy} \nonumber \\
& &
+ \left[(K_r-K_d)\Phi^2-K_r\Phi \right]F(y)\epsilon\ell Re\mu |DU|\tau_{xx}\label{pmmphiseqnalp0}
\end{eqnarray}
We should point out that from equations (\ref{pmmtauxxeqnalp0})-(\ref{pmmphiseqnalp0}) a continuous spectrum exists and this consists of purely imaginary eigenvalues in the strip $[-1/\epsilon\ell Re \max(\mu),-\infty)$ (note that for our choice of regularization parameter $\epsilon$, we always have $(K_d+K_r)>1/\epsilon\ell Re \max(\mu)$). One has to be careful when considering this part of the spectrum, note that as $\Phi\rightarrow 0$ then $\omega_i\rightarrow -\infty$ , clearly these eigenvalues are always damped, thus we consider only the case $\tau_{yy}=0$. Also note that (\ref{pmmtauxxeqnalp0}) and(\ref{pmmphiseqnalp0}) decouple from (\ref{pmmphieqnalp0}) and (\ref{pmmtauxyeqnalp0}), then we multiply (\ref{pmmphieqnalp0}) by the conjugate of $\phi$, say $\phi^*$, (\ref{pmmtauxyeqnalp0}) by $\tau_{xy}^*$ and integrate by parts to get:
\begin{eqnarray}
&&\int_{-1}^1\tau_{xy}D^2\phi^*\dd y=\ii Re \omega \int_{-1}^1|D\phi|^2\dd y -\mu_r\int_{-1}^1|D^2\phi|^2\dd y, \label{alp0phieqn}\\
&&\int_{-1}^1\left(1-\epsilon\ell Re \Phi \mu \omega \right)|\tau_{xy}|^2\dd y=\int_{-1}^1\mu_tD^2\phi\tau_{xy}^*\dd y. \label{alp0txyeqn}
\end{eqnarray}
Expanding in real and imaginary parts and applying the mean value theorem for integrals as necessary we have the following equation for the imaginary part of $\omega$,
\begin{equation}
\omega_I=-\frac{\int_{-1}^1|\tau_{xy}|^2\dd y+\mu_{t_1}\mu_r\int_{-1}^1|D^2\phi|^2\dd y}{\mu_{t_1}Re\int_{-1}^1|D\phi|^2\dd y+\epsilon\ell Re \bar{\Phi}\bar{\mu}\int_{-1}^1|\tau_{xy}|^2\dd y}<0\label{onedstabeqn}
\end{equation}
where
\[\mu_{t_1}=\mu_t(\xi_1),~~\bar{\Phi}=\Phi(\xi_2),~~\mbox{and}~~\bar{\mu}=\mu(\xi_3)~~~\mbox{for some}~~\xi_1,\xi_2,\xi_3\in[-1,1].\]
Therefore, (\ref{phieqnpmm})-(\ref{varphieqn}) is linearly stable to one-dimensional perturbations.
\subsubsection{Code validation}
As a first test for our code we consider the case $\mu_r=0.5$, $\Phi=\mu=\mu_t\equiv1$ and $\varphi\equiv0$ in (\ref{phieqnpmm})-(\ref{varphieqn}), by doing this we recover the Oldroyd-B model and compare our results with the work by Sureshkumar and Beris in \cite{Suresh95}. We set $Re=3960$, $We=3.96$ (or $\epsilon\ell=10^{-3}$), $\alpha=1.15$ and have used $N=128$ and $N=256$ to solve for the spectrum. We show the results in Figure \ref{fig:validationOB}, the least stable eigenvalue is $\omega=0.3409+ 1.9888\times 10^{-7}\ii$, the value reported in \cite{Suresh95} is $\omega=0.3409+ 1.9696\times 10^{-7}\ii$. We have convergence of the discrete spectrum and the difference of the least stable eigenvalue between computations is $2.7\times 10^{-12}$.
We now turn our attention to the full model (\ref{phieqnpmm})-(\ref{varphieqn}). For the rest of the section we consider the wave speed $c=\omega/\alpha$ as our eigenvalue. The first thing to note is the location of the continuous spectrum, from (\ref{tau1eqn})-(\ref{varphieqn}). For simplicity let us define the Deborah function as $De(y)=\epsilon\ell\Phi\mu$. We can see that the real part of the continuous spectrum is in the line $c_r= U$ and the imaginary is in the line $c_i\in [a,\infty)$, where:
\[a=\max\{2K_d,2K_r,WeB/\epsilon\}.\]
Clearly, the continuous spectrum is always stable but one has to be careful when doing computations. Note that for $y$ in the pseudo-plug region (where $T<B$), $De(y)$ is very large (recall that $\mu\sim B/\epsilon$), once we begin to approach the yield limit $De(y)$ decays to zero very fast as $\Phi\rightarrow0$ and $\mu\rightarrow1$. This means that the continuous spectrum will approach zero when $y$ is on the pseudo-plug region and it will be pulled down very fast towards minus infinity as $T$ approaches $B$. This could cause severe numerical problems, $\Phi$ is actually zero when the fluid is fully yielded. We discretize the problem using the same technique as in Section \ref{subsec:validationmodOrr-Som}. In order to prove the convergence of our code we compare the spectrum of a regularized version of (\ref{phieqnpmm})-(\ref{varphieqn}) with the one of the full model using two different eigenvalue solvers in \trademark{MATLAB}. Let
\begin{equation}
\Phi_r (y)= \left\{
\begin{array}{lcl}
\Phi(y) & \mathrm{for} & \Phi(y)>\vartheta\\
\vartheta & \mathrm{for} & \Phi(y)\leq \vartheta,\\
\end{array}
\right.
\end{equation}
where $\vartheta\ll1$ is a small parameter. We replace $\Phi$ with $\Phi_r$ in $De(y)$ and solve the generalized eigenvalue problem using the function \emph{eig} in \trademark{MATLAB}, we choose $\vartheta$ such that we get finite results. We then solve the full problem (\ref{phieqnpmm})-(\ref{varphieqn}) using \emph{eigs} in \trademark{MATLAB}, this function provides the reverse communication required by the Fortran library ARPACK which is based upon an algorithmic variant of the Arnoldi process called the Implicitly Restarted Arnoldi Method. This function can calculate $k$ eigenvalues based on a user defined $\sigma$, thus we choose $k$ such that we get finite results and $\sigma$ is taken to be the least stable eigenvalue of the regularized Casson model calculated in Section \ref{subsec:validationmodOrr-Som}. We present the results of these calculations with $N=250$ in Figure \ref{fig:validationpmm}a, the spectra of both problems overlap each other. Note the presence of the almost undisturbed spectrum of the generalized Newtonian problem ($A$-branch, $P$-branch, $S$-branch and $R$-branch), the continuous spectrum corresponding to equation (\ref{varphieqn}) centered at $c_i=(K_d+K_r)/\alpha^2$ (we have assumed $K_d=K_r$ for this example). Because the choice of parameters (see caption in figure), the pseudo-plug region is very small we have only a few eigenvalues corresponding to $c_r=U\sim 1$ and $c_i=-1/\alpha^2De(y)Re$. Note that there exists a set of discrete eigenvalues which emanates from the region close to $-1/\alpha^2De(0)Re$ which distorts the $A,~P,~R$ branches of the modified Orr-Sommerfeld operator. The difference on the least stable eigenvalue between the problem using $\Phi_r$ and the one using $\Phi$ is $5.1101\times10^{-6}$. This result is not surprising due to the fact that this eigenvalue belongs to the wall modes ($A$-branch), there the fluid is fully yielded and it does not feel the effect of elasticity, when we use $\Phi_r$ close to the wall we can assume that $\epsilon\ell\sim O(10^{-8})$ for this choice of parameters. In view of these results we use the function \emph{eigs} for the full discretized problem (\ref{phieqnpmm})-(\ref{varphieqn}). In Figure \ref{fig:validationpmm}b we present convergence results for $N=250,~350,~450$, the change on the least stable eigenvalue is on the sixth significant figure. For the rest of the paper we fix $N=450$ unless otherwise stated.
\begin{figure}
\begin{center}
\scalebox{0.5}{\includegraphics{figurevalidationspecOB.eps}}
\end{center}
\caption{Code validation. Spectrum for Oldroyd-B fluid, $N=128$ (o), $N=256$ ($\square$). For $Re=3960$, $\epsilon\ell=10^{-3}$ ($We=3.96)$, $\beta=0.5$ and $\alpha=1.15$.}
\label{fig:validationOB}
\end{figure}
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figurevalidationspecregphi.eps}}\\
(b)\scalebox{0.5}{\includegraphics{figurevalidationspecpmm.eps}}
\end{center}
\caption{Code validation for elastoviscoplatic model. a) Spectrum for full model (o) and for regularized model using $\Phi_r$ (+) with $N=250$. b) Spectrum for full model with $N=250$ (o), $N=350$ ($+$), and $N=450$($\Diamond$). For $Re=10000$ and $\alpha=1$. Parameters for these calculations: $\epsilon=0.002$, $\epsilon\ell=10^{-4}$, $m=2.5$, $K_r=K_d=0.3$, $w=0.1$ and $\mu_r=0.1$.}
\label{fig:validationpmm}
\end{figure}
\subsection{Numerical results}
Next we present the results of our cacluations. Figure \ref{fig:resultspmmpl}a shows the critical Reynolds number for the elastoviscoplastic model as a function of plasticity number, $Pl$. As with the regularized Casson model the existence of a pseudo-plug region (stratified viscosity) is suffciente to greatly enhance the stability of the flow. The critical Reynolds number appears to be a monotone increasing function of plasticity number, just as with the regularized viscoplastic model. Clearly, in Figure \ref{fig:resultspmmpl}b we can see that the behaviour of the critical wave number $\alpha_c$ is very similar to the wave number of the Casson model. We should also note that the inclusion of a highly viscous viscoelastic fluid as a plug destabilises the flow when comparing it with the regularized model (shown as the dotted curve in Figure \ref{fig:resultspmmpl}a). In relative terms, when $Pl=1000$ the critical Reynolds number for the elastoviscoplastic model is 2.66\% smaller than the critical Reynolds number for regularized model. For $Pl=10^5$ this percentage increases to around 6\%, this is due to the fact that the pseudo-plug and solid-fluid regions increase and are closer to the wall.
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figRepmmvsRereg.eps}}
(b)\scalebox{0.5}{\includegraphics{figalphapmmpl.eps}}
\end{center}
\caption{(a) Normalized critical $Re$ for increasing $Pl$. (b) Critical wave number. Parameters for these calculations: $\epsilon=0.002$, $\epsilon\ell=10^{-4}$, $m=2.5$, $K_r=K_d=0.3$, $w=0.1$ and $\mu_r=0.1$.}
\label{fig:resultspmmpl}
\end{figure}
As a way to understand better the effects of the elastoviscoplasticity on the instability mechanism we will turn our attention to the analysis of the distribution of the production and dissipation of disturbance kinetic energy across the channel. The linearized Reynolds-Orr equation is derived by multiplying equation (\ref{phieqnpmm}) (equation (\ref{Orr-Somreg}) for the regularized model) by the conjugate of the stream function perturbation (or velocity perturbation in (\ref{Orr-Somreg})) and integrating over a periodic domain. This can be written as:
\begin{equation}
\frac{d\langle I_{1,j}\rangle}{dt}=\langle I_{2,j}\rangle-\frac{1}{Re}\langle I_{3,j}\rangle \label{Reynolds-Orr}
\end{equation}
where $j=$regularized (reg) or elastoviscoplastic (ev) and $\langle \cdot \rangle=\int_{-1}^1\cdot\dd y$ . The left hand side of (\ref{Reynolds-Orr}) is the temporal variation of the averaged kinetic energy, the first term in the right hand side is the exchange of energy between the base flow and the perturbation and the last term in the equation is the rate of energy dissipation. Explicitly we have for the regularized model
\begin{eqnarray}
I_{1,reg}&=&|Dv|^2+\alpha^2|v|^2 \label{I1reg}\\
I_{2,reg}&=&\alpha[DU_{reg}(v_rDv_i-v_iDv_r)] \label{I2reg}\\
I_{3,reg}&=&4\alpha^2\mu|Dv|^2+\mu_t|D^2v+\alpha^2v|^2 \label{I3reg}
\end{eqnarray}
and for the elastoviscoplastic model
\begin{eqnarray}
I_{1,ev}&=&|Dv|^2+\alpha^2|v|^2 \label{I1pmm}\\
I_{2,ev}&=&\alpha[DU_{ev}(\phi_rD\phi_i-\phi_iD\phi_r)] \label{I2pmm}\\
I_{3,ev}&=&\mu_r|D^2\phi+\alpha^2\phi|^2\nonumber\\
&+&\alpha[(\tau_{yy}-\tau_{xx})_rD\phi_i-(\tau_{yy}-\tau_{xx})_iD\phi_r] \nonumber\\
&+&\tau_{xy_r}(D^2\phi_r+\alpha^2\phi_r)+\tau_{xy_i}(D^2\phi_i+\alpha^2\phi_i).\label{I3pmm}
\end{eqnarray}
Following Govindarajan et al. \cite{Govindarajan01}, we compare the normalized space-averaged energy production,
\begin{equation}
\Gamma^+_j=\frac{\langle I_{2,j}\rangle}{\langle I_{1,j}\rangle}\label{energyprod}
\end{equation}
and normalized space-averaged energy dissipation
\begin{equation}
\Gamma^-_j=\frac{\langle I_{3,j}\rangle}{Re\langle I_{1,j}\rangle}.\label{energydiss}
\end{equation}
At criticality these two quantities balance. Positive values of $I_{2,j}$ indicate where in the flow domain energy is supplied from the base flow to the perturbed flow. It should be mentioned that even though that the energy production and energy dissipation balance at critical conditions, the mechanism of instability is governed by $I_{2,i}$ and not by $I_{3,j}$. We refer the reader to \cite{Govindarajan01} for an extensive discussion about Reynolds stress distribution analysis. In Figure \ref{fig:energybudget}a we can see how $\Gamma^+_{reg}$ and $\Gamma^-_{reg}$ balance at critical Reynolds number $Re_{reg}=13364.41$ for $Pl=1000$. The inclusion of an elastic plug an its effect on the stability of the flow is clearly seen in Figure \ref{fig:energybudget}b. We keep the same Reynolds number as for the regularized case. Note that both $\Gamma^+_{ev}$ and $\Gamma^-_{ev}$ are in the same orders of magnitude as before, but the balance is slightly broken. The energy production has increased but note that the energy dissipation has gone negative close to the centre line of the channel (insert figure) whereas for the regularized model is always positive due to the presence of a highly viscous pseudo-plug. This is due to the presence of elastic forces. Even though the elastic plug is confined to a very small region away from the wall, it is enough to break the balance between energy production and dissipation and stability is lost. Experimental evidence of the existence of an elastic plug can be found in \cite{Park10}.
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figenergybudreg.eps}}
(b)\scalebox{0.5}{\includegraphics{figenergybudpmm.eps}}
\end{center}
\caption{Energy budget for $Re=13364.41$ and $Pl=1000$. (a)Regularized Casson model, $\Gamma^+=\Gamma^-=7.152\times10^{-3}$ (b) Elastoviscoplastic model, $\Gamma^+=7.351\times10^{-3},~\Gamma^-=7.033\times10^{-3}$. Parameters for these calculations: $\epsilon=0.002$, $\epsilon\ell=10^{-4}$, $m=2.5$, $K_r=K_d=0.3$, $w=0.1$ and $\mu_r=0.1$.}
\label{fig:energybudget}
\end{figure}
Now we explore the effects that increasing elasticity will have on the stability of the flow. In Figure \ref{fig:resultspmmeps}a we present the normalized critical Reynolds number for increasing $\epsilon\ell$. Note that the change is minimal, the same happens for the critical wave number shown in Figure \ref{fig:resultspmmeps}b. This is not surprising either, elastic forces are confined to regions of very low shear-rates where the value of the nonlinear viscosity is very large, thus an increase on $\epsilon\ell$ does not make any difference. This is also seen in Figure \ref{fig:energybudget2}a, where $\epsilon\ell=10^{-3}$ and $\Gamma^+_{ev}$ and $\Gamma^-_{ev}$ are almost unchanged with respect to the case $\epsilon\ell=10^{-4}$. In Figure \ref{fig:resultspmmw}a-b we show the effects of increasing the parameter $w$ which in turns makes the solid-fluid region wider. Note that for small values of $w$ things remain more less unchanged but as soon as $w$ gets closer to 1 we see a steep decrease on the critical Reynolds number. This again is not too surprising, by increasing $w$ the region where solid-fluid coexist stretches towards the wall. When $w=1$ we have that both $\Phi$ and $T_{xx}$ are different from zero at the wall. This is clearly seen in Figure \ref{fig:energybudget2}b where the normalized production of energy $\Gamma^+_{ev}$ has increased in the critical layer.
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figRevsRenewtpmmeps.eps}}
(b)\scalebox{0.5}{\includegraphics{figalphapmmeps.eps}}
\end{center}
\caption{(a) Normalized critical $Re$ with $Pl=1000$ and increasing $\epsilon\ell$. (b) Critical wave number. Parameters for these calculations: $\epsilon=0.002$, $m=2.5$, $K_r=K_d=0.3$, $w=0.1$ and $\mu_r=0.1$.}
\label{fig:resultspmmeps}
\end{figure}
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figRevsRenewtpmmw.eps}}
(b)\scalebox{0.5}{\includegraphics{figalphapmmw.eps}}
\end{center}
\caption{(a) Normalized critical $Re$ with $Pl=1000$ and increasing $w$. (b) Critical wave number. Parameters for these calculations: $\epsilon=0.002$, $\epsilon\ell=10^{-4}$, $m=2.5$, $K_r=K_d=0.3$ and $\mu_r=0.1$.}
\label{fig:resultspmmw}
\end{figure}
\begin{figure}
\begin{center}
(a)\scalebox{0.5}{\includegraphics{figenergybudpmmeps.eps}}
(b)\scalebox{0.5}{\includegraphics{figenergybudpmmw.eps}}
\end{center}
\caption{Energy budget for $Re=13364.41$ and $Pl=1000$. (a)Elastoviscoplastic model with $\epsilon\ell=10^{-3}$, $\Gamma^+=7.356\times10^{-3},~\Gamma^-=7.03\times10^{-3}$ (b) Elastoviscoplastic model with $w=1$, $\Gamma^+=7.436\times10^{-3},~\Gamma^-=7.037\times10^{-3}$. Parameters for these calculations: $\epsilon=0.002$, $m=2.5$, $K_r=K_d=0.3$ and $\mu_r=0.1$.}
\label{fig:energybudget2}
\end{figure}
\section{Discussion}
We have studied linear stability analysis of plane Poiseuille flow of an elastoviscoplastic fluid. Our results show that there is an increase in the critical Reynolds number for two-dimensional perturbations as the yield stress increases. The main difference between our study and previous works is that we consider a rather new yielding scenario recently suggested in \cite{Burghelea09} according to which the transition from solid to fluid regime is not direct but mediated by a solid-fluid phase coexistence regime within which the material behaves as a viscoelastic fluid. Thus, we were interested in understanding how the presence of elasticity modifies the hydrodynamic stability of the flow.
The relevance of a particular yielding scenario to the stability problem might not be obvious at a first glance as the yielding typically occurs at low Reynolds numbers (the experiments described in \cite{Burghelea09} were performed at Re<1) whereas the loss of hydrodynamic stability emerges at significantly higher Re. However, it has been demonstrated experimentally in \cite{bulent_crap} that the transition to turbulence in the pipe flow of a viscoplastic fluid (Carbopol 940) is simultaneous with the breakdown of the unyielded plug initially located around the centerline of the pipe. These results suggested us that the loss of the hydrodynamic stability and the yielding are in fact connected, which motivated us to revisit the stability problem in the context of the yielding scenario suggested in \cite{Burghelea09}. These results are in a close agreement with the ones of a regularized viscoplastic model, this is not surprising due to the fact that the viscoelastic core is confined to a region away from the wall. We have also shown that the existence of a region where solid and fluid structure coexist destabilizes the flow.
We have chosen the regularized Casson model for the viscosity function but we suspect that the stability behaviour is qualitatively similar for other models such as the regularized Hershel-Bulkley and Bingham models, and therefore that our results are quite generic.
There are practical limitations to be acknowledged. First, the domain is a geometric idealisation of a planar geometry of large aspect ratio. Depending on the actual geometry it becomes necessary to consider other effects, e.g.~entry/start-up effects of the flow, curvature or imperfections of the walls, etc. On the other hand in the experimental setting, the point at which instability is actually observed is very sensitive to control of apparatus imperfections and the level of flow perturbations. For example, in Hagen-Poiseuille flow of Newtonian fluids one typically observes transition to turbulence starting for $Re \gtrsim 2000$. However, an experimental flow loop in Manchester UK produces stable laminar flows for $Re \approx 24,000$, \cite{Hof04,Peixinho06}, and stable flows have even been reported up to $Re \approx 100,000$, \cite{Pfenniger61}. This suggests that enhanced stability may be achieved experimentally, where predicted by the linear theory.
|
1,314,259,995,028 | arxiv | \section{Introduction}
Today, humans regularly interact with intelligent technology thorough devices like Amazon Alexa, smart thermometers, and smart smoke detectors. With the advent of vacuum cleaner robots, people interact with robots on a daily basis \cite{20}. In addition to household robots, robots have been integrated into first responder and military teams \cite{21}. With this integration not only is there a need for research into how robots can become effective teammates but how they can increase the cohesion and effectiveness of their teams. The key to being an effective robotic teammate is trust.
\par
According to Hancock et al. \cite{1} the three classes of factors that influence trust in HRI are, human-related, robot-related and environmental factors. Wagner et al \cite{2} defines trust as \textit{“the reliance by one agent that actions prejudicial to the well-being of that agent will not be undertaken by influential others”}. Khavas el al. \cite{3}, in an analysis of trust in HRI notes that all definitions of trust incorporate whether robot’s actions and behaviors correspond to human’s interest.
\par
Trust can be broken down into three antecedents, performance, process and purpose \cite{4}. Performance is whether or not the task has been completed, process is how the robot is executing a task and purpose refers to the disposition between the two team members. When a robot breaks trust it is in one of these three areas. Breaking trust is referred to as a trust violation \cite{4}. The two types of trust violations are integrity and competence. An integrity violation is when a robot intentionally does something counter to the humans interest. A competence violation is when a robot incorrectly performs or fails to perform the task. Generally, a human's trust in a robot can be measured by answering the question, "Is the human teammate asking the robot to do tasks that are commensurate with it's capability". Trust calibration is the method in which a robot helps a human to maintain a level of trust that is in line with these capabilities. The goal of calibration is to prevent the humans from ending up in a state of over-trust or under trust. Over-trust is when a person believes that the robot can accomplish something that is outside of its capabilities. Under-trust is when a person does not believe the robot can accomplish tasks for which is is capable \cite{5,7,8}.
\begin{figure}[t]
\centering
\includegraphics[width=1\columnwidth]{Untitled_presentation-3.pdf}
\caption{Over trust and under trust as a function of trust in the robot vs. the capability of the robot}.
\label{fig1}
\end{figure}
\par
In addition to a robot's ability to calibrate trust, it must also recognise situations that resemble distrust but are, instead, caused by negative emotions. Negative emotions during an interaction with robots are a important area of study in HRI \cite{19}. Frustrations is a negative emotion that is often mentioned when it comes to human technology interactions \cite{10}. When a human does not trust a robot it can lead to frustration. Humans get frustrated with robots when they have a technical failure. Abd et al. \cite{11} stresses that the level of frustration in a human integrating with a robot was highest when the robot had the largest technical error. Manuel Giuliani et al. \cite{12} compares the effects of social norm violation and technical failure and shows technical failure cause more frustration. Weidemann et al. \cite{10} shows frustration negatively effects the feeling of superiority that the human has over a robot as well as the control that they have over the robot.
\section{Trust Calibration}
Trust calibration relies of understanding two states, over-trust and under-trust. The "reliability of the system," is the "probability that a task done by a system will be successful" and is represented by $P_{auto}$. The user’s estimation of $P_{auto}$ is $P_{trust}$. Over-trust occurs in a situation where $P_{trust}> P_{auto}$. Under-trust occurs when $P_{auto}> P_{trust}$ \cite{13}. The probability of trust is a combination of the successes and failures of the robot and the personality of the human teammate. Similarly frustration is a combination of the failures of the robots and the communication of the robot. Trust manifests itself in the form of trust actions and mistakes manifest themselves in the form of trust violations \cite{13}. \par
Trust calibration uses two methods depending on what state of trust the human teammate is in. Figure 1 \cite{4}, shows the relationship between the trust in a robot and the robot's capability. If the person is in a state of over-trust then the robot will attempt to dampen trust. If the person is in a state of under-trust then the trust repair method is used. These methods are interventions that take the form of trust calibration cues (TCCs). Trust calibration cues can be verbal, visual, audible, physical or a combination of any of these. There is some research concentrated on the effectiveness of different types of TCCs in different situations. Robinette et al. \cite{8} show a robot that uses anthropomorphic gestures is more effective than having a sign in emergency situations. Okumura et al. \cite{9} shows that verbal cues are more effective than audible and visual cues.
\section{Respecting Trust}
In order for a robot to make the decision to respect trust it has to be informed by the environment, its actions and the state of trust that its team mate is in. There has been work done on predictive models that use the performance of the robot in the form of successes $s$ and failures $f$ to predict the state of trust.
Gou et al. \cite{16} tested a beta distribution using an existing data set involving 39 human participants interacting with four drones in a simulated surveillance mission. The proposed method obtained a RMSE of 0.072. They determined that each person has an optimal set of parameters $\Theta={\alpha_0, \beta_0, w^s , w^f }$ where $w^s$ and $w^f$ are weights applied to the to number of $s$ and $f$, respectively. Theses values are used to determine the parameters of the Beta distribution, $\alpha$ and $\beta$. The parameter $\Theta$ is updated every time there is a trust decision. As with the previous methods, the expected value of trust $\hat{t}_i$ at time $i$ is ($t_i)=\alpha/(\alpha + \beta)$. Gou and Yang use $\hat{t}_i$ to determine the difference between a the previous trust($\hat{t}_{i-1}$) and $\hat{t}_i$ given a success($p_i =1$) and a failure($p_1=0$). \par Gou et al. that it is valid to estimate trust using successes and failures. The use of successes and failures to predict trust is important because it directly ties the the performance of the robot. After a robot has preformed well the trust of its human teammate will increase.
\subsection{Research Question}
\textbf{R0:} Can trust calibration actions be used to aid in predicting whether on not a human teammate should trust the robot?
\textbf{H0:}The trust calibration is effective at changing trust regardless of the performance of the robot. The TCCs can be used as a way to effective estimate future trust.
\section{Experimental design}
Our experiment is an online game conducted with participants from Mechanical Turk. The game is a simulation of a search and rescue (S\&R) scenario where a human and a robot, referred to as agents, move through a simulated building to identify simulated victims, referred to as targets. There are two types of targets, gold stars which are worth a $100$ points and red circles which are worth $-100$ points. Each agent has a predefined area of discovery. When each of the agents find a target, they need to take an action to select the target. Selecting targets is optional for the human teammate, however, selecting only the gold stars will maximize the team score. The game consists of 10 rounds. At the end of each round the human teammate makes a trust action. The trust action is a blind decision whether to integrate or discard the map and targets discovered by the robot. After the decision the collective team score is updated. Because it is a blind decision the human is forced to make their decision based on robots prior performance. When the robots information is integrated all the scores gained by the robot is added to the overall score of the team and the areas searched by robot cannot be searched again by the human. If the human agent discards the information of the robot the robot score will not be added to the team score and the areas search by robot can be searched by human later. After making trust decision, the human will be able to see the score and targets gained by robot in this round. Table 2 shows the targets score and TCC used in each round of the game. Three manipulation questions were used each asking specific questions regarding to the mechanics of the survey. If a participant failed two out of three of these manipulation questions they were excluded from the survey.
\begin{table}[]
\centering
\scriptsize
\begin{tabular}{ |p{3cm}|p{3cm} | }
\hline
All Negative TCCs & All Positive TCCs\\
\hline
\hline
I am sorry, I was having difficulty identifying the correct target. I will do better next round. &I am not going to be able to accurately identify targets next round.\\
\hline
I am sorry, I am still having trouble with identification. Let me try something different to see if that will help.& I am still having trouble identifying targets. \\
\hline
\end{tabular}
\caption{Trust calibration cues used.}
\label{table1}
\end{table}
\begin{table}[]
\centering
\scriptsize
\begin{tabular}{|l|l|l|l|l|}
\hline
\begin{tabular}[c]{@{}l@{}}Round\\ number\end{tabular} & \begin{tabular}[c]{@{}l@{}}Gold \\ stars\end{tabular} & \begin{tabular}[c]{@{}l@{}}Red\\ circles\end{tabular} & Score\\ \hline
1 & 2 & 3 & -100 \\ \hline
2 & 1 & 4 & -300 \\ \hline
3 & 1 & 2 & -100 \\ \hline
4 & 2 & 3 & -100 \\ \hline
5 & 0 & 2 & -200 \\ \hline
6 & 0 & 1 & -100 \\ \hline
7 & 0 & 1 & -100 \\ \hline
8 & 0 & 2 & -200 \\ \hline
9 & 2 & 3 & -100 \\ \hline
10 & 1 & 2 & -100 \\ \hline
\end{tabular}
\caption{{Number of targets and the scores for the all negative surveys}}
\label{tab:my_label}
\end{table}
\begin{table}[]
\centering
\scriptsize
\begin{tabular}{|l|l|l|l|l|}
\hline
\begin{tabular}[c]{@{}l@{}}Round\\ number\end{tabular} & \begin{tabular}[c]{@{}l@{}}Gold \\ stars\end{tabular} & \begin{tabular}[c]{@{}l@{}}Red\\ circles\end{tabular} & Score \\ \hline
1 & 3 & 2 & 100 \\ \hline
2 & 1 & 0 & 100 \\ \hline
3 & 2 & 0 & 200 \\ \hline
4 & 4 & 1 & 300 \\ \hline
5 & 4 & 0 & 400 \\ \hline
6 & 4 & 3 & 100 \\ \hline
7 & 1 & 0 & 100 \\ \hline
8 & 2 & 0 & 200 \\ \hline
9 & 3 & 2 & 100 \\ \hline
10 & 4 & 3 & 100 \\ \hline
\end{tabular}
\caption{Number of targets and the scores for the all positive surveys}
\label{tab:my_label}
\end{table}
\section{Preliminary results}
Four surveys were conducted using participants from Amazon Mechanical Turk. Two of the surveys were controls meaning that they had no TCCs. One of the controls had all negative scores and one of the controls had all positive scores. The other two surveys had TCCs located after the third round and on every subsequent round for 10 rounds. Table 2 is a list of the scores for the all negative surveys that were conducted. The all positive trust calibration survey as compared to the all positive control and likewise for the all negative versions. Table 3 is a list of the scores for the all positive surveys that were conducted. Because each survey had a different number of participants after the data was filtered for the manipulation questions the overall trust score is given in a percentage calculated using Equation 1.
\begin{equation}
\frac{\mbox{number of participants who integrated}}{\mbox{total number of participants}}
\end{equation}
The all positive TCC survey had a drop from 23 participants to 13 participants after the first calibration. The all negative TCC survey showed an increase from 10 to 18 participants after the calibration. Figure 2 is a plot of the trust scores for the all positive surveys and figure 3 is a plot of the trust scores for the all negative surveys. When the two trust calibration surveys were compared with each other it showed that it took two rounds for the participants to ignore the feedback from the robot regardless of the goal of the TCC.
\begin{figure}[t]
\centering
\includegraphics[width=1\columnwidth]{All_Positive.png}
\caption{Plot of function of percent trust vs. number of rounds of the all positive surveys. The blue line is the TCC test group and the green line is the control group}. The red box shows the round after the TCCs began.
\label{fig1}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=1\columnwidth]{All_Negative.png}
\caption{Plot of function of percent trust vs. number of rounds of the all negative surveys. The green line is the TCC group and the blue line is the control group}. The red box shows the round after the TCCs began.
\label{fig1}
\end{figure}
\section{Conclusion and Future Work}
Human robot teams are no longer the realm of fiction. In order to develop effective teams there must be a framework so that humans an robots can work together as a cohesive unit. The cornerstone of this framework is going to be the ability for the robot to communicate what its capability is and inform its human counterpart when their trust is not in line with its capabilities. If a robot can identify when its human teammate is showing signs of distrust for external reason (i.e. frustration) it can chose to act on its own to aid its teammate. Frustration and cognitive load has been shown to mirror the effects of distrust resulting in under-trust \cite{19}. A robot that can recognize when frustration presents as distrust will be able to help its teammate by not adding to cognitive load.
\par
Trust calibration cues have reliably can shift trust in the direction they are designed for. Both of the TCC experiments showed that the feedback that the robot provided corresponded with the desired change in trust. When we attempted to dampen trust we effectively lowered the trust value despite the good performance. Likewise, when we repaired trust, there was an increase in the trust score despite the low performance of the robot. This is important for trust estimation because, if, in a prior round the robot attempted to repair trust then it should estimate that its human teammate will have a higher level of trust. If in fact they do not, potentially, the robot could make the decision to respect the trust state of the teammate because there is an external force causing an artificial trust state.
\par
We are a long way from having a reliable prediction model that will aid a robot in choosing to respect trust or calibrate trust. We have shown that TCCs are effective at changing trust in the desired direction which will aid in the model. As mentioned in the beginning of this paper, different trust violations respond differently to different trust calibrations. We are going to conduct an experiment to see what that difference is. We also have to figure out a way to combine performance and calibration into a predictive model. After the basic model has been created in the future we would like to add to it to include biological signals such as heartbeat. Ultimately we want to test the model with people where an external force, like frustration, is applied to see if a robot, using our model, can accurate determine when to respect trust or calibrate trust.
|
1,314,259,995,029 | arxiv | \section{Introduction}
\subsection{Uncertainty principles}
\label{sec:introup}
The classical Weyl-Heisenberg uncertainty principle states that a
continuous-time signal cannot be simultaneously well-localized in both
time and frequency. Loosely speaking, this principle says that if
most of the energy of a signal $f$ is concentrated near a
time-interval of length $\Delta t$ and most of its energy in the
frequency domain is concentrated near an interval of length $\Delta
\omega$, then
\[
\Delta t \cdot \Delta \omega \ge 1.
\]
This principle is one of the major intellectual achievements of the
20th century and since then, much work has been concerned with
extending such uncertainty relations to other setups, namely, by
investigating to what extent it is possible to concentrate a function
$f$ and its Fourier transform $\hat f$, relaxing the assumption that
$f$ and $\hat f$ be concentrated near intervals as in the work of
Landau, Pollack and Slepian \cite{prolateI,prolateII,prolateIII}, or
by considering signals supported on a discrete set
\cite{prolateV,DonohoStark}.
Because our paper is concerned with finite signals, we now turn our
attention to ``discrete uncertainty relations'' and begin by recalling
the definition of the discrete Fourier transform
\begin{equation}
\label{eq:dft}
\hat f(\idf) = \frac{1}{\sqrt{N}} \sum_{t =0}^{N-1} f(t)
e^{-i 2\pi \idf t/N},
\end{equation}
where the frequency index $\idf$ ranges over the set $\{0, 1, \ldots,
N - 1\}$. For signals of length $N$, \cite{DonohoStark} introduced a
sharp uncertainty principle which simply states that the supports of a
signal $f$ in the time and frequency domains must obey
\begin{equation}
\label{eq:discreteup}
|\operatorname{supp} f| + |\operatorname{supp} \hat{f}| \geq 2\sqrt{N}.
\end{equation}
We emphasize that there are no other restriction on the organization
of the supports of $f$ and $\hat f$ other than the size constraint
\eqref{eq:discreteup}. \cite{DonohoStark} also observed that the
uncertainty relation \eqref{eq:discreteup} is tight in the sense that
equality is achieved for certain special signals. For example,
consider as in \cite{DonohoStark,DonohoHuo} the {\em Dirac comb}
signal: we suppose that the sample size $N$ is a perfect square and
let $f$ be equal to 1 at multiples of $\sqrt{N}$ and 0
everywhere else
\begin{equation}
\label{eq:dirac}
f(t) = \begin{cases}
1, & t=m\sqrt{N},~~ m=0,1,\ldots,\sqrt{N}-1 \\
0, & \text{elsewhere}.
\end{cases}
\end{equation}
Remarkably, the Dirac comb is invariant through the Fourier transform,
i.e. $\hat{f} = f$, and therefore, $|\operatorname{supp} f |+|\operatorname{supp} \hat f| =
2\sqrt{N}$. In other words, \eqref{eq:discreteup} holds with
equality.
In recent years, uncertainty relations have become very popular, in
part because they help explaining some miraculous properties of
$\ell_1$-minimization procedures as we will see below, and researchers
have naturally developed similar uncertainty relations between pairs
of bases other than the canonical basis and its conjugate. We single
out the work of Elad and Bruckstein \cite{EladBruckstein} which
introduces a generalized uncertainty principle for pairs $\Mt,\Mf$ of
orthonormal bases. Define the {\em mutual incoherence}
\cite{DonohoHuo,GribonvalNielsen,EladBruckstein} between $\Phi_1$ and
$\Phi_2$ as
\begin{equation}
\label{eq:mich}
\ich(\Mt,\Mf) =
\max_{\vt\in\Mt, \vf\in\Mf} |\<\vt,\vf\>|;
\end{equation}
then if $\at$ is the (unique) representation of $f$ in basis $\Mt$
with $\Gamma_1=\operatorname{supp}\at$, and $\af$ is the representation in $\Mf$, the
supports must obey
\begin{equation}
\label{eq:gendiscreteup}
|\Gamma_1| + |\Gamma_2| \geq \frac{2}{\ich(\Mt,\Mf)}.
\end{equation}
Note that the mutual incoherence $\mu$ always obeys $1/\sqrt{N} \le
\mu \le 1$ and measures how the two bases look alike. The smaller the
incoherence, the stronger the uncertainty relation. To see how this generalizes the discrete uncertainty principle, observe that in the case where $\Mt$ is the canonical or spike basis and $\Mf$ is the Fourier basis, $\ich = 1/\sqrt{N}$ (maximal incoherence) and \eqref{eq:gendiscreteup} is, of course, \eqref{eq:discreteup}.
\subsection{The tightness of the uncertainty relation is fragile}
It is true that there exist signals that saturate the uncertainty
relations but such signals are very special and are hardly
representative of ``generic'' or ``most'' signals. Consider the Dirac
comb for instance; here the locations and heights of the $\sqrt{N}$
spikes in the time domain carefully conspire to create an inordinate
number of cancellations in the frequency domain. This will not be the
case for sparsely supported signals in general. Simple numerical
experiments confirm that signals with the same support as the Dirac
comb but with different spike amplitudes almost always have Fourier
transforms that are nonzero everywhere. Indeed, constructing pathological examples other than the Dirac comb requires mathematical wizardry.
Moreover, if the signal length $N$ is prime (making signals like the
Dirac comb impossible to construct), the discrete uncertainty
principle is sharpened to \cite{tao:uncertainty}
\begin{equation}
\label{eq:primeup}
|\operatorname{supp} f| + |\operatorname{supp} \hat{f}| > N,
\end{equation}
which validates our intuition about the exceptionality of signals
such as the Dirac comb.
\subsection{Robust uncertainty principles}
Excluding these exceedingly rare and exceptional pairs $T: = \operatorname{supp} f, \Omega: = \operatorname{supp}\hat f$, how tight is the uncertainty relation? That is, given two
sets $T$ and $\Omega$, how large need $|T| + |\Omega|$ be so that it
is possible to construct a signal whose time and frequency supports
are $T$ and $\Omega$ respectively? In this paper, we introduce a {\em
robust} uncertainty principle (for general $N$) which illustrates
that for ``most'' sets $\supt,\supf$, \eqref{eq:primeup} is closer to
the truth than \eqref{eq:discreteup}. Suppose that we choose
$(\supt,\supf)$ at random from all pairs obeying
\begin{equation*}
|\supt|+|\supf| \le \frac{N}{\sqrt{(\beta + 1) \log N}}.
\end{equation*}
Then with overwhelming high probability---in fact, exceeding $1 -
O(N^{-\beta \rho})$ for some positive constant $\rho$ (we shall give
explicit values)---we will be unable to find a signal in $\mathbb{C}^N$
supported on $\supt$ in the time domain and $\supf$ in the frequency
domain. In other words, remove a negligible fraction of sets and
\begin{equation}
\label{eq:rup}
|\operatorname{supp} f|+|\operatorname{supp} \hat f| > \frac{N}{\sqrt{(\beta + 1) \log N}},
\end{equation}
holds, not \eqref{eq:discreteup}.
Our uncertainty principle is not only robust in the sense that it
holds for most sets, it is also {\em quantitative}. Consider a random
pair $(T, \Omega)$ as before and put $1_\Omega$ to be the indicator
function of the set $\Omega$. Then with essentially the same
probability as above, we have
\begin{equation}
\label{eq:qrup}
\|\hat f \cdot 1_\Omega\|^2 \le \|\hat f\|^2/2,
\end{equation}
say, for all functions $f$ supported on $T$. By symmetry, the same
inequality holds by exchanging the role of $T$ and $\Omega$,
\[
\|f \cdot 1_T\|^2 \le \|f\|^2/2,
\]
for all functions $\hat f$ supported on $\Omega$. Moreover,
as with the discrete uncertainty principle, the QRUP can be extended to arbitrary pairs of bases.
\subsection{Significance of uncertainty principles}
In the last three years or so, there has been a series of papers
starting with \cite{DonohoHuo} establishing a link between discrete
uncertainty principles and sparse approximation
\cite{DonohoHuo,GribonvalNielsen,DonohoElad,Tropp03}. In this field, the goal
is to separate a signal $f \in \mathbb{C}^N$ into two (or more) components,
each representing contributions from different phenomena. The idea is
as follows: suppose we have two (or possibly many more) orthonormal
bases $\Mt,\Mf$; we search among all the decompositions $(\at,\af)$ of
the signal $f$
\[
f = \begin{pmatrix} \Mt & \Mf \end{pmatrix} \begin{pmatrix} \at \\ \af \end{pmatrix} := \Phi\alpha
\]
for the shortest one
\begin{equation}
\label{eq:P_0}
(P_0) \quad\quad \min_{\alpha} \|\alpha\|_{\ell_0},\quad \Phi\alpha = f,
\end{equation}
where $\|\alpha\|_{\ell_0}$ is simply the size of the support of
$\alpha$, $\|\alpha\|_{\ell_0} := |\{\idtf,\ \alpha(\idtf) \neq 0\}|$.
The discrete uncertainty principles \eqref{eq:discreteup} and
\eqref{eq:gendiscreteup} are useful in the sense that they tell us
when $(P_0)$ has a unique solution. When $\Phi$ is the time-frequency
dictionary, it is possible to show that if a signal $f$ has a
decomposition $f = \Phi\alpha$ consisting of spikes on subdomain $\supt$
and frequencies on $\supf$, and
\begin{equation}
\label{eq:dhl0}
|\supt| + |\supf| < \sqrt{N},
\end{equation}
then $\alpha$ is the unique minimizer of $(P_0)$ \cite{DonohoHuo}. In a
nutshell, the reason is that if $\Phi(\alpha_0 + \delta_0)$ were
another decomposition, $\delta_0$ would obey $\Phi \delta_0 = 0$ which
says that $\delta_0$ would be of the form $\delta_0 = (\delta, - \hat
\delta)$. Now \eqref{eq:discreteup} implies that $\delta_0$ would have
at least $2\sqrt{N}$ nonzero entries which in turn would give
$\|\alpha_0 + \delta_0\|_{\ell_0} \ge \sqrt{N}$ for all $\alpha_0$
obeying $\|\alpha_0\|_{\ell_0} < \sqrt{N}$---thereby proving the
claim. Note that again the condition \eqref{eq:dhl0} is sharp because
of the extremal signal \eqref{eq:dirac}. Indeed, the Dirac comb may be
expressed as a superposition of $\sqrt{N}$ terms in the time or in the
frequency domain; for this special signal, $(P_0)$ does not have a
unique solution.
In \cite{EladBruckstein}, the same line of reasoning is followed for
general pairs of orthogonal bases, and $\ell_0$-uniqueness is
guaranteed when
\begin{equation}
\label{eq:ebl0}
|\Gamma_1| + |\Gamma_2| < \frac{1}{\ich(\Mt,\Mf)}.
\end{equation}
Unfortunately, as far as finding the sparsest decomposition, solving
$(P_0)$ directly is computationally infeasible because of the highly
non-convex nature of the $\|\cdot\|_{\ell_0}$ norm. To the best of
our knowledge, finding the minimizer obeying the constraints would
require searching over all possible {\em subsets} of columns of
$\Phi$, an algorithm that is combinatorial in nature and has
exponential complexity. Instead of solving $(P_0)$, we consider a
similar program in the $\ell_1$ norm which goes by the name of {\em
Basis Pursuit} \cite{BP}:
\begin{equation}
\label{eq:(P_1)}
(P_1)\quad \quad \min_{\alpha}
\|\alpha\|_{\ell_1}, \quad \Phi\alpha = f.
\end{equation}
Unlike the $\ell_0$ norm, the $\ell_1$ norm is convex. As a result,
$(P_1)$ can be solved efficiently using standard ``off the shelf''
optimization algorithms. The $\ell_1$-norm can also be viewed as a
``sparsity norm'' which among the vectors that meet the constraints,
will favor those with a few large coefficients and many small
coefficients over those where the coefficient magnitudes are
approximately equal \cite{BP}.
A beautiful result in \cite{DonohoHuo} actually shows that if $f$ has
a sparse decomposition $\alpha$ supported on $\Gamma$ with
\begin{equation}
\label{eq:dhgenl1}
|\Gamma| < \frac{1}{2}(1+\ich^{-1}),
\end{equation}
then the minimizer of $(P_1)$ is unique and is equal to the minimizer
of $(P_0)$ (\cite{EladBruckstein} improves the constant in
\eqref{eq:dhgenl1} from $1/2$ to $\approx .9142$). In these
situations, we can replace the highly non-convex program $(P_0)$ with
the much tamer (and convex) $(P_1)$.
We now review a few applications of these types of ideas.
\begin{itemize}
\item {\em Geometric Separation.} Suppose we have a dataset and one
wishes to separate point-like structures, from filamentary
(edge-like) structures, from sheet-like structures. In 2 dimensions,
for example, we might imagine synthesizing a signal as a
superposition of wavelets and curvelets which are ideally adapted to
represent point-like and curve-like structures respectively.
Delicate space/orientation uncertainty principles show that the
minimum $\ell_1$-norm decomposition in this combined dictionary
automatically separates point and curve-singularities; the wavelet
component in the decomposition \eqref{eq:(P_1)} accurately captures
all the pointwise singularities, while the curvelet component
captures all the edge curves. We refer to \cite{DonohoGeomSep} for
theoretical developments and to \cite{StarckAstro} for numerical
experiments.
\item {\em Texture-edges separation} Suppose now that we have an image
we wish to decompose as a sum of a cartoon-like geometric part plus a
texture part. The idea again is to use curvelets to represent the
geometric part of the image and local cosines to represent the
texture part. These ideas have recently been tested in practical settings,
with spectacular success \cite{EdgesTextures} (see also
\cite{MeyerAverbuchCoifman} for earlier and related ideas).
\end{itemize}
In a different direction, the QRUP is also implicit in some
of our own work on the exact reconstruction of sparse signals from
vastly undersampled frequency information \cite{CRT}. Here, we wish
to reconstruct a signal $f \in \mathbb{C}^N$ from the data of only $|\Omega|$
random frequency samples. The surprising result is that although most
of the information is missing, one can still reconstruct $f$ {\em
exactly} provided that $f$ is sparse. Suppose $|\Omega|$ obeys the
oversampling relation
\[
|\Omega| \asymp |T| \cdot \log N
\]
with $T:= \operatorname{supp} f$. Then with overwhelming probability, the object $f$
(digital image, signal, and so on) is the exact and unique solution of the
convex program that searches, among all signals that are consistent
with the data, for that with minimum $\ell_1$-norm.
We will draw on the the tools developed in the earlier work, making the QRUP {\em explicit} and applying it to the problem of searching for sparse decompositions.
\subsection{Innovations}
Nearly all the existing literature on uncertainty relations and its
consequences focuses on worst case scenarios, compare
\eqref{eq:discreteup} and \eqref{eq:dhgenl1}. What is new here is the
development of probabilistic models which show that the performance of
Basis Pursuit in an overwhelmingly large majority of situations is
actually very different than that predicted by the overly
``pessimistic'' bounds \eqref{eq:dhgenl1}. For the time-frequency
dictionary, we will see that if a representation $\alpha$ (with spike
locations $\supt$ and sinusoidal frequencies $\supf$) of a signal $f$
exists with
\[
|\supt| + |\supf| \asymp N/\sqrt{\log N},
\]
then $\alpha$ is the sparsest representation of $f$ almost all of the
time. If in addition, $\supt$ and $\supf$ satisfy
\begin{equation}
\label{eq:sparserup}
|\supt|+|\supf| \asymp N/\log N,
\end{equation}
then $\alpha$ can be recovered by solving the convex program $(P_1)$.
In fact, numerical simulations reported in section \ref{sec:numerical}
suggest that \eqref{eq:sparserup} is far closer to the empirical
behavior than \eqref{eq:dhgenl1}, see also \cite{DonohoStark}. We show
that similar results also hold for general pairs of bases $\Mt,\Mf$.
As discussed earlier, there is by now a well-established machinery
that allows turning uncertainty relations into statements about the
ability to find sparse decompositions. We would like to point out that
our results \eqref{eq:sparserup} are not an automatic consequence of
the uncertainty relation \eqref{eq:rup} together with these existing
ideas. Instead, our analysis relies on the study of eigenvalues of
random matrices which, of course, is completely new.
\subsection{Organization of the paper}
In Section~\ref{sec:pmodel} we develop a probability model that shall
be used throughout the paper to formulate our results. In Section
\ref{sec:qrup}, we will establish uncertainty relations such as
\eqref{eq:qrup}. Sections \ref{sec:bpIF} and \ref{sec:BPgeneral} will
prove uniqueness and equality of the $(P_0)$ and $(P_1)$ programs. In
the case where the basis pair $(\Phi_1, \Phi_2)$ is the time-frequency
dictionary (Section \ref{sec:bpIF}), we will be very careful in
calculating the constants appearing in the bounds. We will be
somewhat less precise in the general case (Section
\ref{sec:BPgeneral}), and will forgo explicit calculation of
constants. We report on numerical experiments in Section
\ref{sec:numerical} and close the paper with a short discussion
(Section \ref{sec:discussion}).
\section{A Probability Model for $\Gamma_1,\Gamma_2$}
\label{sec:pmodel}
To state our results precisely, we first need to specify a
probabilistic model. We let $I_1$ and $I_2$ be two independent
Bernoulli sequences with parameters $p_T$ and $p_\Omega$ respectively
\begin{eqnarray*}
I_1(\idt) = 1 & ~~\text{with probability}~~ p_T\\
I_2(\idf) = 1 & ~~\text{with probability}~~ p_\Omega
\end{eqnarray*}
where $\idt,\idf=0,\ldots,N-1$, and define the support sets for the
spikes and sinusoids (and in general for the bases $\Mt$ and $\Mf$)
as
\begin{equation}
\label{eq:random}
\supt = \{\idt~~\text{s.t.}~~I_1(\idt) = 1 \}, \qquad
\supf = \{\idf~~\text{s.t.}~~I_2(\idf) = 1\}.
\end{equation}
If both $p_T$ and $p_\Omega$ are not too small, an application of the
standard large deviations inequality shows us that our model is
approximately equivalent to sampling ${\hbox{\bf E}} |T| = p_T \cdot N$ spike
locations and ${\hbox{\bf E}} |\Omega| = p_\Omega \cdot N$ frequency locations
uniformly at random.
As we will see in the next section, the robust uncertainty principle
holds---with overwhelming probability---over sets $\supt$ and $\supf$
randomly sampled as above. Our estimates are quantitative and
introduce sufficient conditions so that the probability of ``failure''
be arbitrarily small, i.e. less than $O(N^{-\beta})$ for some
arbitrary $\beta > 0$. As a consequence, we will always assume that
\begin{equation}
\label{eq:betalogN}
\min({\hbox{\bf E}} |T|, {\hbox{\bf E}} |\Omega|) \ge 4 (\beta+1) \cdot \log N
\end{equation}
as otherwise, one would have to consider situations in which $T$ or
$\Omega$ (or both) are empty sets---a situation of rather limited
interest. We also note that for $p_T$ and $p_\Omega$ as above, we have
\begin{equation}
\label{eq:2T}
{\hbox{\bf P}}(|T| > 2 p_T \cdot N) \le N^{-\beta},
\end{equation}
as this follows from the well-known large deviation bound \cite{MassartSharp}
$${\hbox{\bf P}}(|T| > {\hbox{\bf E}} |T| + t) \le \exp\left(-\frac{t^2}{2 {\hbox{\bf E}} |T| +
2t/3}\right).$$
Further, to establish sparse approximation bounds (section
\ref{sec:bpIF}), we will also introduce a probability model on the
``active'' coefficients. Given a pair $(\supt, \supf)$, we sample the
coefficient vector $\{\alpha(\idtf),~\idtf\in\Gamma\}$ from a
distribution with identically and independently distributed
coordinates; we also impose that each $\alpha(\idtf)$ be drawn from a
continuous probability distribution that is {\em circularly symmetric}
in the complex plane; that is, the phase of $\alpha(\idtf)$ is uniformly
distributed on $[0,2\pi)$.
\section{Quantitative Robust Uncertainty Principles}
\label{sec:qrup}
Equipped with the probability model \eqref{eq:random}, we now
introduce our uncertainty relations. To state our result, we make use
of the standard notation $o(1)$ to indicate a numerical term
tending to $0$ as $N$ goes to infinity.
\begin{theorem}
\label{th:qrupIF}
Assume the parameters in the model \eqref{eq:random} obey
\begin{equation}
\label{eq:tauqrup}
2\sqrt{{\hbox{\bf E}} |T| \cdot {\hbox{\bf E}} |\Omega|} \le {\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega| \le
\frac{N}{\sqrt{(\beta+1)\log N}} \, (\rho_0/2 +
o(1)), \qquad \rho_0 = .7614
\end{equation}
(we will assume throughout the paper that $\beta \ge 1$ and $N \ge
512$) and let $(\supt,\supf)$ be a randomly sampled support pair.
Then with probability at least $1-O(\log N \cdot N^{-\beta})$; {\em
every} signal $f$ supported on $\supt$ in the time domain has most
of its energy in the frequency domain outside of $\supf$
\[
\|\hat f \cdot 1_\Omega\|^2 \leq \frac{\|\hat f\|^2}{2};
\]
and likewise, {\em every} signal $f$ supported on $\supf$ in the
frequency domain has most of its energy in the time domain outside of
$\supt$
\[
\|\hat f \cdot 1_T\|^2 \leq \frac{\|f\|^2}{2}.
\]
As a result, it is impossible to find a signal $f$ supported on
$\supt$ whose discrete Fourier transform $\hat{f}$ is supported on
$\supf$. For finite sample sizes $N$, we can select
the parameters in \eqref{eq:tauqrup} as
\[
{\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega| \le \frac{.2660\, N}{\sqrt{(\beta+1)\log N}}.
\]
\end{theorem}
To establish this result, we introduce (as in \cite{CRT}) the
$|\supt|\times |\supt|$ auxiliary matrix $\mathcal{H}_\supt$
\begin{equation}
\label{eq:auxt}
\mathcal{H}_\supt(\idt,\idt^\prime) = \begin{cases}
0 & \idt = \idt^\prime \\
\sum_{\idf\in\supf} e^{i\idf (\idt-\idt^\prime)} & \idt\not=\idt^\prime
\end{cases}.
\end{equation}
The following lemma effectively says that the eigenvalues of $\mathcal{H}_\supt$
are small compared to $N$.
\begin{lemma}
\label{lm:auxeigs}
Fix $q$ in $(0,1)$ and suppose that
\[
p_T + p_\Omega \le \rho_0 \cdot \frac{q}{\sqrt{(\beta+1)\log N}},
\qquad \rho_0 = .7614.
\]
Then the the matrix $\mathcal{H}_\supt$ obeys
\[
{\hbox{\bf P}}\left( \|\mathcal{H}_\supt\| \geq q N \right) \leq (\beta + 1) \log N \cdot
N^{-\beta}.
\]
\end{lemma}
\begin{proof}
The Markov inequality gives \begin{equation}
\label{eq:markov}
{\hbox{\bf P}}(\|\mathcal{H}_\supt\| \geq q N) \leq \frac{{\hbox{\bf E}}\|\mathcal{H}_\supt^n\|^2}{q^{2n}N^{2n}},
\quad \text{for all } n \geq 1.
\end{equation}
Recall next that the Frobenius norm $\|\cdot\|_F$ dominates the
operator norm $\|\mathcal{H}_\supt\|\leq\|\mathcal{H}_\supt\|_F$. This fact allows to leverage
results from \cite{CRT} which derives bounds for the conditional
expectation ${\hbox{\bf E}}[\|\mathcal{H}_\supt^n\|^2_F \, | \, T]$ (where the expectation is
over $\supf$ for a {\em fixed} $\supt$):
\[
{\hbox{\bf E}}[\|\mathcal{H}_\supt^n\|^2_F \, | \,T] \leq
(2n)\left(\frac{(1+\sqrt{5})^2}{2e(1-p_\Omega)}\right)^n n^n |\supt|^{n+1}
\, p_{\Omega}^n \, N^n.
\]
Our assumption about the size of $p_T + p_\Omega$ assures that
$p_\Omega < .12$ so that $(1+\sqrt{5})^2/2(1-p_\Omega) \le 6$, whence
\begin{equation}
\label{eq:crtbnd}
{\hbox{\bf E}}[\|\mathcal{H}_\supt^n\|^2_F \, | \,T] \leq 2n \,
(6/e)^n n^n |\supt|^{n+1} \, p_\Omega^n \, N^n.
\end{equation}
We will argue below that for $n \le (\beta + 1) \log N$ and $p_T$
obeying \eqref{eq:betalogN}
\begin{equation}
\label{eq:chernoff}
{\hbox{\bf E}} [|T|^{n+1}] \le 1.15^{n+1} \, [{\hbox{\bf E}} |T|]^{n+1} =
1.15^{n+1} \, (p_T \, N)^{n+1}.
\end{equation}
Since $p_T \le .25$, we established
\[
{\hbox{\bf E}} \|\mathcal{H}_\supt^n\|^2_F \le (6 \times 1.15/e)^n \, n^{n+1} \cdot
p_T^{n} \, p_\Omega^{n} \, N^{2n+1}.
\]
Observe now that together with $\sqrt{p_T \, p_\Omega} \le (p_T +
p_\Omega)/2$, this gives
\begin{equation}
\label{eq:pb1}
{\hbox{\bf P}}(\|\mathcal{H}_\supt\| \geq q N) \leq
\left(\frac{p_T + p_\Omega}{\rho_0 \, q}\right)^{2n} \, e^{-n} \, n^{n+1} N,
\qquad \rho_0 = 1/\sqrt{6 \times 1.15} = .7614.
\end{equation}
We now specialize \eqref{eq:pb1} and take $n= \lceil (\beta+1)\log N
\rceil$ where $\lceil x \rceil$ is the smallest integer greater or
equal to $x$. Then if $p_T + p_\Omega$ obeys \eqref{eq:tauqrup},
\begin{equation}
{\hbox{\bf P}}(\|\mathcal{H}_\supt\| \geq q N) \leq [(\beta+1)\log N + 1] \cdot N^{-\beta},
\end{equation}
as claimed.
\end{proof}
We now return to \eqref{eq:chernoff} and write $|T|$ as
$$
|T| = {\hbox{\bf E}} |T| \cdot (1 + Y), \qquad Y = \frac{|T| - {\hbox{\bf E}} |T|}{{\hbox{\bf E}} |T|}.
$$
Then
$$
{\hbox{\bf E}} |T|^{n+1} = ({\hbox{\bf E}} |T|)^{n+1} \cdot {\hbox{\bf E}} (1 + Y)^{n+1} \le ({\hbox{\bf E}}
|T|)^{n+1} \cdot {\hbox{\bf E}} [\exp((n+1) Y)].
$$
Observe that $Y$ is a an affine function of a sum of independent
Bernoulli random variables. Standard calculations then give
\[
{\hbox{\bf E}} [\exp(n Y)] = e^{-n} \cdot \left(1 + \frac{n}{N}
\frac{e^\lambda - 1}{\lambda} \right)^N, \qquad \lambda = n/(N p_T).
\]
Recall the assumption \eqref{eq:betalogN} which implies $\lambda \le
1/4$ which in turn gives $\lambda^{-1}(e^\lambda - 1) -
1 \le \log 1.15$. The claim follows.
We would like to remark that \eqref{eq:chernoff} might be considerably
improved when ${\hbox{\bf E}} |T| = p_T \cdot N$ is much larger than $n$ since in
that case, the binomial will have enhanced concentration around its
mean. For example,
\[
{\hbox{\bf E}} |T|^{n+1} \le 2 \cdot [{\hbox{\bf E}} |T|]^{n+1}
\]
in the event where $n \le \rho \cdot \sqrt{p_T \, N}$ for some
positive constant $\rho$ that the above method allows to calculate
explicitely. This would of course lead to improved constants in
\eqref{eq:tauqrup} and in the statement of Lemma \ref{lm:auxeigs}. In
this paper, we shall not pursue all these refinements as not to
clutter the exposition.
\begin{proof}{\bf of Theorem~\ref{th:qrupIF}}
Let $f\in\mathbb{C}^N$ be supported on $\supt$; as such, $\Rt^*\Rt f = f$,
where $\Rt$ is the restriction operator to $\supt$. Put $F_{\Omega
T}= \Rf\FT\Rt^*$. We have
\begin{equation*}
\|\hat{f} \cdot 1_\Omega\|_2 =
\|F_{\Omega T} f\|_2 \le \|F_{\Omega T}\| \cdot \|f\|_2,
\end{equation*}
and since, $\|F_{\Omega T}\|^2 = \|F_{\Omega T}^*F_{\Omega T}\|$, it
will suffice to show that that with high probability, the largest
eigenvalue of $F_{\Omega T}^*F_{\Omega T}$ is less than $1/2$.
Using the definition of the auxiliary matrix in \eqref{eq:auxt}, it is
not hard to verify the identity $F_{\Omega T}^*F_{\Omega T} =
\frac{|\supf|}{N} I + \frac{1}{N}\mathcal{H}_\supt$. Suppose that $p_T + p_\Omega$
obeys
the condition in Lemma~\ref{lm:auxeigs}; then
except for a set of probability less than $O(\log N \cdot N^{-\beta})$,
\begin{equation*}
\frac{|\supf|}{N} \le
2 p_\Omega \le 2 \rho_0 \cdot \frac{q}{\sqrt{(\beta+1) \log N}},
\quad \text{ and } \quad \frac{1}{N}\|\mathcal{H}_\supt\| \le q,
\end{equation*}
and, therefore,
\begin{equation}
\label{eq:AtAnorm}
\|F_{\Omega T}^*F_{\Omega T}\| \leq q \cdot \left(1 +
\frac{2 \rho_0}{\sqrt{(\beta+1) \log N}}\right) = q(1+o(1)).
\end{equation}
The theorem follows from taking $q = 1/2 + o(1)$. For the statement
about finite sample sizes, we observe that for $\beta \ge 1$ and $N
\ge 512$, $2/\sqrt{(\beta+1) \log N} \le .567$ and, therefore,
$\|F_{\Omega T}^*F_{\Omega T}\| \le 1/2$ provided that $q \le [2(1+
.567\rho_0)]^{-1}$. This establishes the first part of the theorem.
By symmetry of the discrete Fourier transform, the claim about the
size of $\|f \cdot 1_T\|$ for $\hat f$ supported on a random set
$\Omega$ is proven exactly in the same way. This concludes the proof
of the theorem.
\end{proof}
\section{Robust UPs and Basis Pursuit: Spikes and Sinusoids}
\label{sec:bpIF}
As in \cite{DonohoHuo,GribonvalNielsen,EladBruckstein}, our
uncertainty principles are directly applicable to finding sparse
approximations in redundant dictionaries. In this section, we look
exclusively at the case of spikes and sinusoids. We will leverage
Theorem~\ref{th:qrupIF} in two different ways:
\begin{enumerate}
\item $\ell_0$-uniqueness: If $f\in\mathbb{C}^N$ has a decomposition $\alpha$
supported on $\supt\cup\supf$ with $|\supt|+|\supf|\asymp (\log
N)^{-1/2} N$, then with high probability, $\alpha$ is the sparsest
representation of $f$.
\item Equivalence of $(P_0)$ and $(P_1)$: If $|\supt|+|\supf|\asymp
(\log N)^{-1} N$, then $(P_1)$ recovers $\alpha$ with overwhelmingly
large probability.
\end{enumerate}
\subsection{$\ell_0$-uniqueness}
\label{sec:l0uniq}
To illustrate that it is possible to do much better than
\eqref{eq:dhl0}, we first consider the case in which $N$ is a prime
integer. Tao \cite{tao:uncertainty} derived the following exact,
sharp discrete uncertainty principle.
\begin{lemma}
\label{lm:primeup}\cite{tao:uncertainty} Suppose that the sample size $N$ is a
prime integer. Then
\[
|\operatorname{supp} f| + |\operatorname{supp} \hat{f}| > N, \qquad \forall f \in \mathbb{C}^N.
\]
\end{lemma}
Using Lemma~\ref{lm:primeup}, a strong $\ell_0$-uniqueness result
immediately follows:
\begin{corollary}
\label{th:l0prime}
Let $\supt$ and $\supf$ be subsets of $\{0, \ldots, N-1\}$ for $N$
prime, and let $\alpha$ (with $\Phi\alpha = f$) be a vector supported
on $\suptf=\supt\cup\supf$ such that
\[
|\supt| + |\supf| \le N/2.
\]
Then the solution to $(P_0)$ is unique and is equal to $\alpha$.
Conversely, there exist distinct vectors $\alpha_0, \alpha_1$ obeying
$|\operatorname{supp}\alpha_0|, |\operatorname{supp}\alpha_1| \leq N/2 +1$ and $\Phi\alpha_0 =
\Phi \alpha_1$.
\end{corollary}
\begin{proof}
As we have seen in the introduction, one direction is trivial. If
$\alpha_0 + \delta_0$ is another decomposition, then $\delta_0$ is
of the form $\delta_0: = (\delta, - \hat \delta)$. Lemma
\ref{lm:primeup} gives $\|\delta_0\|_{\ell_0} > N$ and thus
$\|\alpha_0 + \delta_0\|_{\ell_0} \ge \|\delta\|_{\ell_0} -
\|\alpha\|_{\ell_0} > N/2$. Therefore, $\|\alpha_0 +
\delta_0\|_{\ell_0} > \|\alpha\|_{\ell_0}$.
For the converse, we know that since $\Phi$ has rank at most $N$, we
can find $\delta \neq 0$ with $|\operatorname{supp}\delta| = N + 1$ such that
$\Phi \delta = 0$. (Note that it is of course possible to construct
such $\delta$'s for any support of size greater than $N$). Consider
a partition of $\operatorname{supp}\delta = \suptf_0 \cup \suptf_1$ where
$\suptf_0$ and $\suptf_1$ are two disjoint sets with $|\suptf_0| =
N/2+1$ and $|\suptf_1| = N/2$, say. The claim follows by taking
$\alpha_0 = \delta|_{\suptf_0}$ and $\alpha_1 = -
\delta|_{\suptf_1}$.
\end{proof}
A slightly weaker statement addresses arbitrary sample sizes.
\begin{theorem}
\label{th:qrupl0IF}
Let $f = \Phi \alpha$ be a signal with support set $\suptf =
\supt\cup\supf$ and coefficients $\alpha$ sampled as in
Section~\ref{sec:pmodel}, and with parameters obeying
\eqref{eq:tauqrup}. Then with probability at least $1-O(\log N \cdot
N^{-\beta})$, the solution to $(P_0)$ is unique and equal to $\alpha$.
\end{theorem}
To prove Theorem \ref{th:qrupl0IF}, we shall need the following lemma:
\begin{lemma}
\label{lm:dimnull}
Suppose $\supt$ and $\supf$ are fixed subsets of $\{0,\ldots,N-1\}$, put
$\suptf
= \supt\cup\supf$, and let $\Phi_\suptf:=\Phi\Rtf^*$ be the
$N\times(|\supt|+|\supf|)$ matrix $\Phi_\suptf = \begin{pmatrix} \Rt^* &
\FT^*\Rf^*\end{pmatrix}$. Then
\[
|\suptf| < 2N \qquad \Rightarrow \qquad \operatorname{dim}(\operatorname{Null}(\Phi_\suptf)) <
\frac{|\suptf|}{2}.
\]
\end{lemma}
\newcommand{{F_{\Omega T}}}{{F_{\Omega T}}}
\newcommand{{F^*_{\Omega T}}}{{F^*_{\Omega T}}}
\begin{proof}
Obviously,
\[
\operatorname{dim}(\operatorname{Null}(\Phi_\suptf)) = \operatorname{dim}(\operatorname{Null}(\Phi^*_\suptf \Phi_\suptf)),
\]
and we then write the $|\suptf|\times|\suptf|$ matrix $\Phi^*_\suptf
\Phi_\suptf$ as
\[
\Phi_\suptf^* \Phi_\suptf = \begin{pmatrix} I & {F^*_{\Omega T}}
\\ {F_{\Omega T}} & I \end{pmatrix}
\]
with ${F_{\Omega T}}$ the partial Fourier transform from $T$ to $\Omega$ ${F_{\Omega T}}:=
\Rf\FT\Rt^*$. The dimension of the nullspace of $\Phi_\suptf^*
\Phi_\suptf $ is simply the number of eigenvalues of $\Phi_\suptf^*
\Phi_\suptf $ that are zero. Put
\[
G := I - \Phi_\suptf^* \Phi_\suptf = \begin{pmatrix} 0 & {F^*_{\Omega T}} \\ {F_{\Omega T}} & 0 \end{pmatrix}, \quad
\text{so that} \quad G^*G = \begin{pmatrix} {F^*_{\Omega T}} {F_{\Omega T}} & 0 \\ 0 & {F_{\Omega T}} {F^*_{\Omega T}} \end{pmatrix}.
\]
Letting $\lambda_j(\cdot)$ denotes the $j$th largest eigenvalue of a
matrix, observe that $\lambda_j(\Phi_\suptf^* \Phi_\suptf ) = 1 -
\lambda_j(G)$, and since $G$ is symmetric
\begin{equation}
\label{eq:Gtr}
\operatorname{Tr}(G^*G) =
\lambda_1^2(G) + \lambda_2^2(G) + \cdots + \lambda_{|\supt|+|\supf|}^2(G).
\end{equation}
We also have that $\operatorname{Tr}(G^*G) = \operatorname{Tr}({F^*_{\Omega T}}{F_{\Omega T}}) +
\operatorname{Tr}({F_{\Omega T}}{F^*_{\Omega T}})$, so the eigenvalues in \eqref{eq:Gtr} will
appear in duplicate,
\begin{equation}
\label{eq:Gtr2}
\lambda_1^2(G) + \lambda_2^2(G) + \cdots + \lambda_{|\supt|+|\supf|}^2(G) =
2\cdot(\lambda_1^2({F^*_{\Omega T}}{F_{\Omega T}}) + \cdots +
\lambda_{|\supt|}^2({F^*_{\Omega T}}{F_{\Omega T}})).
\end{equation}
We calculate
\[
({F^*_{\Omega T}}{F_{\Omega T}})_{\idt,{\idt^\prime}} = \frac{1}{N}\sum_{\idf\in\supf}
e^{i\idf(\idt-{\idt^\prime})} \quad\quad ({F_{\Omega T}}{F^*_{\Omega T}})_{\idf,{\idf^\prime}} =
\frac{1}{N}\sum_{\idt\in\supt} e^{i\idt(\idf-{\idf^\prime})}.
\]
and thus
\begin{equation}
\label{eq:trG}
\operatorname{Tr}(G^*G) = \frac{2(|\supt|\cdot|\supf|)}{N}.
\end{equation}
Observe now that for the null space of $\Phi_\suptf^* \Phi_\suptf $ to
have dimension $K$, at least $K$ of the eigenvalues in \eqref{eq:Gtr2}
must have magnitude greater than or equal to $1$. As a result
\[
\operatorname{Tr}(G^*G) < 2K ~ \Rightarrow ~\operatorname{dim}(\operatorname{Null}(\Phi_\suptf^* \Phi_\suptf )) < K.
\]
Using the fact that $(a+b)\geq 4ab/(a+b)$ (arithmetic mean dominates
geometric mean), we see that if $|\supt|+|\supf| < 2N$, then
$2|\supt|\cdot|\supf|/N < |\supt|+|\supf|$ which implies
\eqref{eq:Gtr} (and hence $\operatorname{dim}(\operatorname{Null}(\Phi_\suptf^* \Phi_\suptf ))$)
is less than $(|\supt|+|\supf|)/2$.
\end{proof}
\begin{proof}{\bf of Theorem~\ref{th:qrupl0IF}}
We assume $\suptf$ is selected such that $\Phi_\suptf$ has full
rank. This happens if $\|{F^*_{\Omega T}} {F_{\Omega T}}\| < 1$ and
Theorem~\ref{th:qrupIF} states that this occurs with probability at
least $1-O(\log N \cdot N^{-\beta})$.
Given this $\suptf$, the (continuous) probability distribution on
the $\{\alpha(\idtf), \idtf\in\suptf\}$ induces a continuous
probability distribution on $\operatorname{Range}(\Phi_\suptf)$. We will show
that for every ${\suptf^\prime}$ with $|{\suptf^\prime}|\leq |\suptf|$
\begin{equation}
\label{eq:dims}
\operatorname{dim}(\operatorname{Range}(\Phi_{\suptf^\prime})\cap\operatorname{Range}(\Phi_\suptf)) < |\suptf|.
\end{equation}
As such, the set of signals in $\operatorname{Range}(\Phi_\suptf)$ that have
expansions on a ${\suptf^\prime}\not=\suptf$ that are {\em at least} as sparse
as their expansions on $\suptf$ is a finite union of subspaces of
dimension strictly smaller than $|\suptf|$. This set has measure zero
as a subset of $\operatorname{Range}(\Phi_\suptf)$, and hence the probability of
observing such a signal is zero.
To show \eqref{eq:dims}, we may assume that $\Phi_{\suptf^\prime}$ also has full
rank, since if $\operatorname{dim}(\operatorname{Range}(\Phi_{\suptf^\prime})) < |{\suptf^\prime}|$, then
\eqref{eq:dims} is certainly true. For a set of coefficients $\alpha$
supported on $\suptf$ and $\alpha^\prime$ supported on ${\suptf^\prime}$ to have the
same image under $\Phi$, $\Phi\alpha = \Phi\alpha^\prime$ (or equivalently
$\Phi_\suptf\Rtf\alpha = \Phi_{\suptf^\prime}R_{\suptfp}\alpha^\prime$), two things must
be true:
\begin{enumerate}
\item $\alpha$ and $\alpha^\prime$ must agree on $\suptf\cap{\suptf^\prime}$. This
is a direct consequence of $\Phi_{\suptf^\prime}$ being full rank (its
columns are linearly independent).
\item There is a $\delta\in\operatorname{Null}(\Phi)$ such that $\alpha^\prime =
\alpha+\delta$. Of course,
\[
\delta(\idtf) = 0,\quad \idtf\in(\suptf\cup{\suptf^\prime})^c.
\]
By item 1 above, we will also have
\[
\delta(\idtf)=0,\quad \suptf\cap{\suptf^\prime}.
\]
Thus, $\operatorname{supp}\delta\subset(\suptf\backslash{\suptf^\prime})\cup({\suptf^\prime}\backslash\suptf)$.
\end{enumerate}
In light of these observations, we see that for
$\operatorname{dim}(\operatorname{Range}(\Phi_{\suptf^\prime})\cap\operatorname{Range}(\Phi_\suptf)) = |\suptf|$, we
need that for {\em every} $\alpha$ supported on $\suptf$, there is a
$\delta\in\operatorname{Null}(\Phi)$ that is supported on
$(\suptf\backslash{\suptf^\prime})\cup({\suptf^\prime}\backslash\suptf)$ such that
\[
\delta(\idtf) = -\alpha(\idtf) \quad \idtf\in\suptf\backslash{\suptf^\prime}.
\]
In other words, we need
\[
\operatorname{dim}(\operatorname{Null}(Q_{(\suptf\backslash{\suptf^\prime})\cup({\suptf^\prime}\backslash\suptf)}))
\geq \left|\suptf\backslash{\suptf^\prime}\right|.
\]
However, Lemma~\ref{lm:dimnull} tells us
\begin{eqnarray*}
\operatorname{dim}(\operatorname{Null}(Q_{(\suptf\backslash{\suptf^\prime})\cup({\suptf^\prime}\backslash\suptf)})) & < &
\frac{\left|\suptf\backslash{\suptf^\prime}\right|+\left|{\suptf^\prime}\backslash\suptf\right|}{2} \\
& \leq & \left|\suptf\backslash{\suptf^\prime}\right|,
\end{eqnarray*}
since $|{\suptf^\prime}|\leq |\suptf|$. Hence
$\operatorname{dim}(\operatorname{Range}(\Phi_{\suptf^\prime})\cap\operatorname{Range}(\Phi_\suptf)) < |\suptf|$, and the theorem follows.
\end{proof}
\subsection{Recovery via $\ell_1$-minimization}
\label{sec:l1}
The problem $(P_0)$ is combinatorial and solving it directly is
infeasible even for modest-sized signals. This is the reason why we
consider instead, the convex relaxation \eqref{eq:(P_1)}.
\begin{theorem}
\label{th:qrupl1IF}
Suppose $f = \Phi \alpha$ is a random signal sampled as in
Section~\ref{sec:pmodel} and with parameters obeying
\begin{equation}
\label{eq:taul1}
{\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega| \le
\frac{N}{(\beta+1)\log N} \cdot (1/8 + o(1)).
\end{equation}
Then with probability at least $1- O( (\log N) \cdot N^{-\beta})$, the
solutions of $(P_1)$ and $(P_0)$ are identical and equal to $\alpha$.
\end{theorem}
In addition to being computationally tractable, there are analytical
advantages which come with $(P_1)$, as our arguments will essentially
rely on a strong duality result \cite{BVConvex}. In fact, the next
section shows that $\alpha$ is a unique minimizer of $(P_1)$ if and only
if there exists a ``dual vector'' $\dv$ satisfying certain properties.
Here, the crucial part of the analysis relies on the fact that
``partial'' Fourier matrices ${F_{\Omega T}} := \Rf\FT\Rt^*$ have very
well-behaved eigenvalues, hence the connection with robust uncertainty
principles.
\subsubsection{$\ell_1$-duality}
\label{sec:duality}
For a vector of coefficients $\alpha\in\mathbb{C}^{2N}$ supported on
$\Gamma:=\Gamma_1\cup\Gamma_2$, define the 'sign' vector $\operatorname{sgn}\alpha$ by
$(\operatorname{sgn}\alpha)(\idtf) := \alpha(\idtf)/|\alpha(\idtf)|$ for $\idtf\in\Gamma$
and $(\operatorname{sgn}\alpha)(\idtf) = 0$ otherwise. We say that $\dv\in C^N$ is a
{\em dual vector} associated to $\alpha$ if $\dv$ obeys
\begin{eqnarray}
\label{eq:Psgn}
(\Phi^*\dv)(\idtf) = (\operatorname{sgn}\alpha)(\idtf) & \idtf\in\Gamma & \\
\label{eq:Plt1}
|(\Phi^*\dv)(\idtf)| < 1\quad\quad & ~~\idtf\in\Gamma^c. &
\end{eqnarray}
With this notion, we introduce a strong duality result which is
similar to that presented in \cite{CRT}, see also \cite{FuchsDual}.
\begin{lemma}
\label{duality}
Consider a vector $\alpha\in\mathbb{C}^{2N}$ with support $\Gamma =
\Gamma_1\cup\Gamma_2$ and put $f = \Phi\alpha$.
\begin{itemize}
\item Suppose that there exists a dual vector and that $\Phi_\Gamma$
has full rank. Then the minimizer $\alpha^\sharp$ to the problem
$(P_1)$ is unique and equal to $\alpha$.
\item Conversely, if $\alpha$ is the unique minimizer of $(P_1)$, then
there exists a dual vector.
\end{itemize}
\end{lemma}
\begin{proof}
The program dual to $(P_1)$ is
\begin{equation}
\label{eq:P_1dual}
(D1)\quad\quad
\max_{\dv} ~\Re\left(\dv^*f \right)\quad\quad \mathrm{subject~to}\quad
\|\Phi^*\dv\|_{\ell_\infty} \leq 1.
\end{equation}
It is a classical result in convex optimization that if $\tilde{\alpha}$
is a minimizer of $(P_1)$, then $\Re(\dv^*\Phi\tilde{\alpha}) \leq
\|\tilde{\alpha}\|_{\ell_1}$ for all feasible $\dv$. Since the primal
is a convex functional subject only to equality constraints, we will
have $\Re(\tilde{\dv}^*\Phi\tilde{\alpha}) = \|\tilde{\alpha}\|_{\ell_1}$
if and only if $\tilde{\dv}$ is a maximizer of $(D1)$
\mbox{\cite[Chap. 5]{BVConvex}}.
First, suppose that $\Phi\Rtf^*$ has full rank and that a dual vector
$\dv$ exists. Set $P = \Phi^*\dv$. Then
\begin{eqnarray*}
\Re \<\Phi\alpha,\dv\> & = & \Re\<\alpha,\Phi^*\dv\> \\
& = & \Re\sum_{\idtf=0}^{N-1} \overline{P(\idtf)}\alpha(\idtf) \\
& = & \Re\sum_{\idtf\in\Gamma} \overline{\operatorname{sgn}\alpha(\idtf)}\alpha(\idtf) \\
& = & \|\alpha\|_{\ell_1}
\end{eqnarray*}
and $\alpha$ is a minimizer of $(P_1)$. Since $|P(\idtf)| < 1$ for
$\idtf\in\Gamma^c$, all minimizers of $(P_1)$ must be supported on
$\Gamma$. But $\Phi\Rtf^*$ has full rank, so $\alpha$ is the unique
minimizer.
For the converse, suppose that $\alpha$ is the unique minimizer of
$(P_1)$. Then there exists at least one $\dv$ such that with $P =
\Phi^*\dv$, $\|P\|_{\ell_\infty} \leq 1$ and $\dv^*f =
\|\alpha\|_{\ell_1}$. Then
\begin{eqnarray*}
\|\alpha\|_{\ell_1} & = & \Re\<\Phi\alpha,\dv\> \\
& = & \Re\<\alpha,\Phi^*\dv\> \\
& = & \Re\sum_{\idtf\in\Gamma}\overline{P(\idtf)}\alpha(\idtf).
\end{eqnarray*}
Since $|P(\idtf)|\leq 1$, equality above can only hold if
$P(\idtf)=\operatorname{sgn}\alpha(\idtf)$ for $\idtf\in\Gamma$.
We will argue geometrically that for one of these $\dv$, we have
$|P(\idtf)|<1$ for $\idtf\in\Gamma^c$. Let V be the hyperplane
$\{d\in\mathbb{C}^{2N}:~\Phi d = f\}$, and let $B$ be the polytope
$B=\{d\in\mathbb{C}^{2N}:~\|d\|_{\ell_1}\leq\|\alpha\|_{\ell_1}\}$. Each of the
$\dv$ above corresponds to a hyperplane $H_\dv =
\{d:~\Re\<d,\Phi^*\dv\>=\|\alpha\|_{\ell_1}\}$ that contains $V$ (since
$\Re\<f,\dv\>=\|\alpha\|_{\ell_1}$) and which defines a halfspace
$\{d:~\Re\<d,\Phi^*\dv\> \leq 1\}$ that contains $B$ (and for each
such hyperplane, a $\dv$ exists that describes it as such). Since
$\alpha$ is the unique minimizer, for one of these ${\dv^\prime}$, the
hyperplane $H_{\dv^\prime}$ intersects $B$ only on the minimal facet
$\{d:~\operatorname{supp} d \subset\Gamma\}$, and we will have
$P(\idtf)<1,~\idtf\in\Gamma^c$.
\end{proof}
Thus to show that $(P_1)$ recovers a representation $\alpha$ from a
signal observation $\Phi \alpha$, it is enough to prove that a dual
vector with properties \eqref{eq:Psgn}--\eqref{eq:Plt1} exists.
As a sufficient condition for the equivalence of $(P_0)$ and $(P_1)$,
we construct the {\em minimum energy} dual vector
\[
\min \|P\|_2, \qquad \text{subject to} \qquad P\in\operatorname{Range}(\Phi^*)
\text{ and } P(\idtf) = \operatorname{sgn}(\alpha)(\idtf), \,\, \forall \idtf \in \suptf.
\]
This minimum energy vector is somehow ``small,'' and we hope that it
obeys the inequality constraints \eqref{eq:Plt1}. Note that $\|P\|_2 =
2 \|S\|_2$, and the problem is thus the same as finding that $S \in
\mathbb{C}^N$ with minimum norm and obeying the constraint above; the solution
is classical and given by
\[
\dv = \Phi_\Gamma (\Phi^*_\Gamma \Phi_\Gamma)^{-1}\Rtf\operatorname{sgn}\alpha
\]
where again, $\Rtf$ is the restriction operators to $\suptf$.
Setting $P = \Phi^*\dv$, we need to
establish that
\begin{enumerate}
\item $\Phi^*_\Gamma \Phi_\Gamma$ is invertible (so that $\dv$
exists), and if so
\item $|P(\idtf)| < 1$ for $\idtf\in\suptf^c$.
\end{enumerate}
The next section shows that for $|\supt|+|\supf|\asymp N/\log N$, not
only is $\Phi^*_\Gamma \Phi_\Gamma$ invertible with high probability
but in addition, the eigenvalues of $(\Phi^*_\Gamma \Phi_\Gamma)^{-1}$
are all less than two, say. These size estimates will be very useful
to show that $P$ is small componentwise.
\subsubsection{Invertibility}
\label{sec:inverse}
\begin{lemma}
\label{th:inv}
Fix $\beta\geq 1$ and the parameters as in \eqref{eq:taul1}. Then the
matrix $\Phi^*_\Gamma \Phi_\Gamma$ is invertible and obeys
\[
\|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| = 1 + o(1).
\]
with probability exceeding $1-O(\log N \cdot N^{-\beta})$.
\end{lemma}
\begin{proof}
We begin by recalling that with ${F_{\Omega T}}$ as before, $\Phi^*_\Gamma
\Phi_\Gamma$ is given by
\[
\Phi^*_\Gamma \Phi_\Gamma = I + \begin{pmatrix} 0 &
{F^*_{\Omega T}} \\ {F_{\Omega T}} & 0 \end{pmatrix}.
\]
Clearly, $\|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| =
1/\lambda_{\mathrm{min}}(\Phi^*_\Gamma \Phi_\Gamma)$ and since
$\lambda_{\mathrm{min}}(\Phi^*_\Gamma \Phi_\Gamma) \ge 1 -
\sqrt{\|{F^*_{\Omega T}}{F_{\Omega T}}\|}$, we have
\begin{equation*}
\|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| \le \frac{1}{1 - \sqrt{\|{F^*_{\Omega T}}{F_{\Omega T}}\|}}.
\end{equation*}
We then need to prove that $\|{F^*_{\Omega T}}{F_{\Omega T}}\| = o(1)$ with the required
probability. This follows from the conclusion of Lemma
\ref{lm:auxeigs} which \eqref{eq:taul1} alows to specialize to the
value $1/q = 8\rho_0 \sqrt{(\beta + 1) \log N}$. Note that this gives
more than what is claimed since
\[
\|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| \le 1 + \frac{1}{8\rho_0\,
\sqrt{(\beta + 1) \log N}} + O(1/\log N).
\]
\end{proof}
{\bf Remark.} Note that Lemma \ref{lm:auxeigs} assures us that it is
sufficient to take ${\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega|$ of the order of
$N/\sqrt{\log N}$ (rather than of the order of $N/\log N$ as the
Theorem states) and still have invertibility with $\|(\Phi^*_\Gamma
\Phi_\Gamma)^{-1}\| \le 2$, say. The reason why we actually need the
stronger condition will become apparent in the next subsection.
\subsubsection{Proof of Theorem \ref{th:qrupl1IF}}
\label{sec:Pbound}
To prove our theorem, it remains to show that, with high probability,
$|P(\idtf)| < 1$ on $\suptf^c$.
\begin{lemma}
\label{th:Plt1}
Under the hypotheses of Theorem \ref{th:qrupl1IF}, for each
$\idtf\in\suptf^c$
\[
{\hbox{\bf P}}\left( |P(\idtf)| \geq 1 \right) ~\leq ~4N^{-(\beta+1)}.
\]
As a result,
\[
{\hbox{\bf P}}\left( \max_{\idtf\in\suptf^c} |P(\idtf)| \geq 1\right) ~\leq
~8N^{-\beta}.
\]
\end{lemma}
\begin{proof}
The image of the dual vector $P$ is given by
\begin{equation*}
P := \begin{pmatrix} P_1(\idt) \\ P_2(\idf) \end{pmatrix}
= \Phi^* \Phi_\Gamma \, (\Phi^*_\Gamma \Phi_\Gamma)^{-1}\Rtf\operatorname{sgn}\alpha,
\end{equation*}
where the matrix $\Phi^* \Phi_\Gamma$ may be expanded in the time and
frequency subdomains as
\begin{equation*}
\Phi^* \Phi_\Gamma = \begin{pmatrix} \Rt^* & \FT^*\Rf^* \\ \FT\Rt^* & \Rf^* \end{pmatrix}.
\end{equation*}
Consider first $P_1(\idt)$ for $\idt\in\supt^c$ and let
$V_\idt\in\mathbb{C}^{|\suptf|}$ be the conjugate transpose of the row of the
matrix $\begin{pmatrix} \Rt^* & \FT^*\Rf^* \end{pmatrix}$ corresponding to index $\idt$.
For $\idt\in\supt^c$, the row of $\Rt^*$ with index $\idt$ is zero,
and $V_t$ is then the $(|\supt| + |\supf|)$-dimensional vector
\[
V_\idt = \begin{pmatrix} 0 \\
\left\{ \frac{1}{\sqrt{N}}e^{-i\idf\idt},~\idf\in\supf\right\} \end{pmatrix}.
\]
These notations permit to express $P_1(\idt)$ as the inner product
\begin{eqnarray*}
P_1(\idt) & = &
\<(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\Rtf\operatorname{sgn}\alpha, V_\idt \> \\
& = & \<\Rtf\operatorname{sgn}\alpha, (\Phi^*_\Gamma \Phi_\Gamma)^{-1}V_\idt \> \\
& = & \sum_{\idtf\in\suptf}\overline{W(\idtf)}\operatorname{sgn}\alpha(\idtf)
\end{eqnarray*}
where $W = (\Phi^*_\Gamma \Phi_\Gamma)^{-1}V_\idt$. The signs of
$\alpha$ on $\suptf$ are statistically independent of $\suptf$ (and
hence of $W$) and, therefore, for a fixed support set $\suptf$,
$P_1(\idt)$ is a weighted sum of independent complex-valued random
variables
\[
P_1(\idt) = \sum_{\idtf\in\suptf} X_\idtf
\]
with ${\hbox{\bf E}} X_\idtf = 0$ and $|X_\idtf| \leq |W(\idtf)|$. Applying the
complex Hoeffding inequality (see the Appendix) gives a
bound on the conditional ditribution of $P(t)$
\[
{\hbox{\bf P}}\left( |P_1(\idt)| \geq 1 \,\, | \,\, \suptf \right) ~\leq ~
4\exp\left(-\frac{1}{4\|W\|_2^2} \right).
\]
Thus, it suffices to develop a bound on the magnitude of the vector
$W$.
Controlling the eigenvalues of $\|(\Phi^*_\Gamma\Phi_\Gamma)^{-1}\|$
is essential here, as
\begin{equation}
\|W\| \le \|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| \cdot \|V_\idt\|.
\end{equation}
On the one hand, $\|V_\idt\| = \sqrt{|\supf|/N}$ and as we have seen,
size estimates about $|\supf|$ give $\|V_\idt\| \le
\sqrt{2(p_T+p_\Omega)}$ with the desired probability. On the other
hand, we have also seen that $\|(\Phi^*_\Gamma \Phi_\Gamma)^{-1}\| \le
1 + o(1)$--- also with the desired probability---and,
therefore,
\[
\|W\|^2 \le 2 \cdot \left(1 + o(1))\right) \cdot (p_T +
p_\Omega).
\]
This gives
\[
P\left( |P_1(\idt)| \geq 1\right) \leq 4\exp\left(-\frac{1}{8 (p_T +
p_\Omega) (1 + o(1))}\right).
\]
Select $p_T + p_\Omega$ as in \eqref{eq:taul1}. Then
\begin{equation*}
{\hbox{\bf P}}\left( |P_1(\idt)| \geq 1\right)
\leq 4 \exp(-(\beta+1)\log N)
\leq 4 N^{-(\beta+1)}
\end{equation*}
and
\[
{\hbox{\bf P}}\left( \max_{\idt\in\supt^c}|P_1(\idt)| \geq 1\right) ~\leq ~4N^{-\beta}.
\]
As we alluded earlier, the bound about the size of each individual
$P(t)$ one would obtain assuming that ${\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega|$ be only
of the order $N/\sqrt{\log N}$ would not allow taking the supremum via
the standard union bound. Our approach requires ${\hbox{\bf E}} |T| + {\hbox{\bf E}} |\Omega|$
to be of the order $N/\log N$.
By the symmetry of the Fourier transform, the same is true for
$P_2(\idf)$. This finishes the proof of Lemma and \ref{th:Plt1} and of
Theorem \ref{th:qrupl1IF}.
\end{proof}
\section{Robust UPs and Basis Pursuit}
\label{sec:BPgeneral}
The results of Sections~\ref{sec:qrup} and \ref{sec:bpIF} extend to
the general situation where the dictionary $\Phi$ is a union of two
orthonormal bases $\Mt,\Mf$. In this section, we present results for
pairs of orthogonal bases that parallel those for the time-frequency
dictionary presented in Sections~\ref{sec:qrup} and \ref{sec:bpIF}.
The bounds will depend critically on the degree of similarity of $\Mt$
and $\Mf$, which we measure using the the mutual incoherence defined
in \eqref{eq:mich}, $\ich:=\ich(\Mt,\Mf)$. As we will see, our
generalization introduces additional ``$\log N$'' factors. It is our
conjecture that bounds that do not include these factors exist.
As before, the key result is the quantitative robust uncertainty
principle. We use the same probabilistic setup to sample the support
sets $\Gamma_1$, $\Gamma_2$ in the $\Mt$ and $\Mf$ domains
respectively. The statement below is the analogue of
Theorem~\ref{th:qrupIF}.
\begin{theorem}
\label{th:qrupgen}
Let $\Phi:=\begin{pmatrix} \Mt & \Mf \end{pmatrix}$ be a dictionary composed of a union of
two orthonormal bases with mutual incoherence $\ich$. Suppose the
sampling parameters obey
\begin{equation}
\label{eq:taugenup}
{\hbox{\bf E}} |\Gamma_1| + {\hbox{\bf E}} |\Gamma_2|
\leq \frac{C_1}{\ich^2\cdot((\beta+1)\log N)^{5/2}}
\end{equation}
for some positive constant $C_1 > 0$. Assume $\mu \le 1/\sqrt{2(\beta
+1)\log N}$. Then with probability at least $1-O(\log N \cdot
N^{-\beta})$, every signal $f$ with $\Mt f$ supported on $\Gamma_1$ has
most of its energy in the $\Mf$-domain outside of $\Gamma_2$:
\[
\|\Mf f \cdot 1_{\Gamma_2}\|^2 \leq \|f\|^2/2,
\]
and vice versa. As a result, for nearly all pairs $(\Mt,\Mf)$ with
sizes obeying \eqref{eq:taugenup}, it is impossible to find a signal
$f$ supported on $\Gamma_1$ in the $\Mt$-domain and $\Gamma_2$ in the
$\Mf$-domain.
\end{theorem}
We would like to re-emphasize the significant difference between these
results and \eqref{eq:gendiscreteup}. Namely, \eqref{eq:taugenup}
effectively squares the size of the joint support since, ignoring
log-like factors, the factor $1/\mu$ is replaced by $1/\mu^2$. For
example, in the case where the two bases are maximally incoherent,
i.e. $\mu = 1/\sqrt{N}$, our condition says that it is nearly
impossible to concentrate a function in both domains simultaneously
unless (again, up to logarithmic factors)
\[
|\Gamma_1| + |\Gamma_2| \sim N,
\]
which needs to be compared with \eqref{eq:gendiscreteup}
\[
|\Gamma_1| + |\Gamma_2| \geq 2 \sqrt{N}.
\]
For mutual incoherences scaling like a power-law $\mu \sim
N^{-\gamma}$, our condition essentially reads $|\Gamma_1| + |\Gamma_2|
\sim N^{2\gamma}$ compared to $|\Gamma_1| + |\Gamma_2| \sim N^{\gamma}$.
The proof of Theorem~\ref{th:qrupgen} directly parallels that of
Theorem~\ref{th:qrupIF}, with $A:= R_{\supaf}\Mf^*\MtR_{\supat}^*$ playing the
role of the partial Fourier transform from $T$ to $\Omega$. Our
argument calls for bounds on the eigenvalues of the random matrix $A^*
A$ which we write as the sum of two terms; a diagonal and an
off-diagonal term
\[
A^* A = D + {\cal H}_1.
\]
We use large deviation theory to control the norm of $D$ while bounds
on the size of ${\cal H}_1$ are obtained by using moment estimates.
This calculation involves estimates about the expected value of the
Frobenius norm of large powers of $A^* A$ and is very delicate. We do
not reproduce all these arguments here (this is the scope of a whole
separate article) and simply state a result which is proved in
\cite{SparsityIncoherence}
\begin{equation}
\label{eq:A*A}
{\hbox{\bf P}}(\|A^* A\| \ge 1/2) \le C \cdot \log N \cdot N^{-\beta}
\end{equation}
for ${\hbox{\bf E}} |\Gamma_1| + {\hbox{\bf E}} |\Gamma_2|$ obeying \eqref{eq:taugenup} (here
$C$ is some universal positive constant). Now for \eqref{eq:A*A} to
hold, we also need that the incoherence be not too large and obeys
$\mu > 1/\sqrt{2(\beta +1)\log N}$ which is the additional condition
stated in the hypothesis. The idea that $\mu$ cannot be too large is
somewhat natural as otherwise for $\mu = 1$, say, the two bases would
share at least one element and we would have $\|A^*A\| = 1$ as soon as
$\Gamma_1$ and $\Gamma_2$ would contain a common element. As we have
seen in section 3, the size estimate \eqref{eq:A*A} would then
establish the theorem.
The generalized $\ell_0$-uniqueness result
follows directly from Theorem~\ref{th:qrupgen}:
\begin{theorem}
\label{th:qrupl0gen}
Let $f = \Phi \alpha$ be an observed signal sampled as in
Section~\ref{sec:pmodel}, and with parameters obeying
\[
{\hbox{\bf E}} |\Gamma_1| + {\hbox{\bf E}} |\Gamma_2| \le \frac{C_2}{\ich^2\cdot((\beta+1)\log
N)^{5/2}}.
\]
Assume $\mu \le 1/\sqrt{2(\beta +1)\log N}$. Then with probability
$1-O(\log N \cdot N^{-\beta})$, the solution to $(P_0)$ is unique and
equal to $\alpha$.
\end{theorem}
The only change to the proof presented in Section~\ref{sec:l0uniq} is
in the analogue to Lemma~\ref{lm:dimnull}:
\begin{lemma}
\label{lm:dimnullgen}
Let $\Gamma_1,\Gamma_2$ be fixed subsets of $\{0,\ldots,N-1\}$, let
$\Gamma=\Gamma_1\cup\Gamma_2$, and let $Q_\Gamma$ be the $N\times
|\Gamma|$ matrix
\[
Q_\Gamma = \begin{pmatrix} \MtR_{\supat}^* & \MfR_{\supaf}^* \end{pmatrix}.
\]
If $|\Gamma| < 2/\ich^2$, then
\[
\operatorname{dim}(\operatorname{Null}(Q_\Gamma)) < \frac{|\Gamma|}{2}.
\]
\end{lemma}
The proof of Lemma~\ref{lm:dimnullgen} has exactly the same structure
as the proof of Lemma~\ref{lm:dimnull}. The only modification comes in
calculating the trace of $G^*G$; here each term can be bounded by
$\ich^2$, and we have $\operatorname{Tr}(G^*G) \leq 2(|\Gamma_1|\cdot
|\Gamma_2|)\ich^2$. Lemma~\ref{lm:dimnullgen} follows.
The conditions for the equivalence of $(P_0)$ and $(P_1)$ can also be generalized.
\begin{theorem}
\label{th:qrupl1gen}
Let $f = \Phi\alpha$ be a random signal generated as in
Section~\ref{sec:pmodel} with
\[
{\hbox{\bf E}} |\Gamma_1| + {\hbox{\bf E}} |\Gamma_2| \le \frac{C_3}{\ich^2\cdot
((\beta+1)\log N)^{5/2}}.
\]
Assume $\mu \le 1/\sqrt{2(\beta +1)\log N}$. Then with probability
$1-O(\log N \cdot N^{-\beta})$, the solutions of $(P_0)$ and $(P_1)$
are identical and equal to $\alpha$.
\end{theorem}
The proof of Theorem~\ref{th:qrupl1gen} is again almost exactly the
same as that we have already seen. Using Theorem~\ref{th:qrupgen},
the eigenvalues of $(\Phi^*_\Gamma \Phi_\Gamma)^{-1}$ are controlled,
allowing us to construct a dual vector meeting the conditions
\eqref{eq:Psgn} and \eqref{eq:Plt1} of Section~\ref{sec:duality}.
Note that the $(\log N)^{5/2}$ term in the denominator means that that
${\hbox{\bf P}}(|P(\idtf)| < 1),~\idtf\in\Gamma^c$ goes to zero at a much faster
speed than a negative power of $N$, it decays as $\exp(- \rho (\log
N)^5)$ for some positive constant $\rho > 0$.
\section{Numerical Experiments}
\label{sec:numerical}
From a practical standpoint, the ability of $(P_1)$ to recover sparse
decompositions is nothing short of amazing. To illustrate this fact,
we consider a $256$ point signal composed of $60$ spikes and $60$
sinusoids; $|\supt|+|\supf|\approx N/2$, see Figure 1. Solving
$(P_1)$ recovers the original decomposition {\em exactly}.
We then empirically validate the previous numerical result by
repeating the experiment for various signals and sample sizes, see
Figure~\ref{fig:reccurves}. These experiments were designed as
follows:
\begin{enumerate}
\item set $N_\suptf$ as a percentage of the signal length $N$;
\item select a support set $\suptf = \supt\cup\supf$ of size
$|\suptf|=N_\suptf$ uniformly at random;
\item sample a vector $\alpha$ on $\suptf$ with independent and
identically distributed
Gaussian entries\footnote{The results presented here do not seem to depend
on the actual distribution used to sample the coefficients.};
\item make $f=\Phi\alpha$;
\item solve $(P_1)$ and obtain $\hat{\alpha}$;
\item compare $\alpha$ to $\hat{\alpha}$;
\item repeat $100$ times for each $N_\suptf$;
\item repeat for signal lengths $N=256,512,1024$.
\end{enumerate}
Figure~\ref{fig:reccurves}(a) shows that we are numerically able to
recover ``sparse'' superpositions of spikes and sinusoids when
$|\supt|+|\supf|$ is close to $N/2$, at least for this range of sample sizes
$N$ (we use the quotations since decompositions of this order can
hardly be considered sparse). Figure~\ref{fig:reccurves}(b) plots the
success rate of the sufficient condition for the recovery of the
sparsest $\alpha$ developed in Section~\ref{sec:duality} (i.e. the
minimum energy signal is a dual vector). Numerically, the sufficient
condition holds when $|\supt|+|\supf|$ is close to $N/5$.
The time-frequency dictionary is special in that it is maximally
incoherent ($\ich = 1$). But as suggested in \cite{DonohoHuo},
incoherence between two bases is the rule, rather than the exception.
To illustrate this, the above experiment was repeated for $N=256$ with
a dictionary that is a union of the spike basis and of an orthobasis
sampled uniformly at random (think about orthogonalizing $N$ vectors
sampled independently and uniformly on the unit-sphere of $\mathbb{C}^N$). As
shown in Figure~\ref{fig:reccurvesrandom}, the results are very close
to those obtained with time-frequency dictionaries; we recover
``sparse'' decompositions of size about $|\Gamma_1| + |\Gamma_2| \leq 0.4\cdot
N$.
\newcommand{Figures}{Figures}
\begin{figure}
\label{fig:recex}
\centerline{
\begin{tabular}{cccccc}
$\alpha$ & $f = \Phi\alpha$ & & spike component & & sinusoidal component \\
\includegraphics[width=1.5in]{Figures/recovery_example_p1} &
\includegraphics[width=1.5in]{Figures/recovery_example_p2} &
\raisebox{0.6in}{{\bf =}} &
\includegraphics[width=1.5in]{Figures/recovery_example_p3} &
\raisebox{0.6in}{{\bf +}} &
\includegraphics[width=1.5in]{Figures/recovery_example_p4} \\
(a) & (b) & & (c) & & (d)
\end{tabular}
}
\caption{\small\sl Recovery of a ``sparse'' decomposition.
(a) Magnitudes of a randomly generated coefficient vector $\alpha$ with
$120$ nonzero components. The spike components are on the left
(indices 1--256) and the sinusoids are on the right (indices
257--512). The spike magnitudes are made small compared to the
magnitudes of the sinusoids for effect; we cannot locate the spikes
by inspection from the observed signal $f$, whose real part is shown
in (b). Solving $(P_1)$ separates $f$ into its spike (c) and
sinusoidal components (d) (the real parts are plotted). }
\end{figure}
\begin{figure}
\centerline{
\begin{tabular}{ccc}
$\ell_1$-recovery & & sufficient condition \\
\raisebox{0.8in}{\rotatebox{90}{\% success}}
\includegraphics[width=3in]{Figures/recovery_curves} & \hspace{5mm} &
\raisebox{0.8in}{\rotatebox{90}{\% success}}
\includegraphics[width=3in]{Figures/sufficient_curves} \\
$(|\supt|+|\supf|)/N$ & & $(|\supt|+|\supf|)/N$
\end{tabular}
}
\caption{\small\sl $\ell_1$-recovery for the time-frequency dictionary.
(a) Success rate of $(P_1)$ in recovering the sparsest decomposition
versus the number of nonzero terms. (b) Success rate of the
sufficient condition (the minimum energy signal is a dual vector).
}
\label{fig:reccurves}
\end{figure}
\begin{figure}
\centerline{
\begin{tabular}{ccc}
$\ell_1$ recovery & & sufficient condition \\
\raisebox{0.8in}{\rotatebox{90}{\% success}}
\includegraphics[width=3in]{Figures/random_recovery_curves} &
\hspace{5mm} &
\raisebox{0.8in}{\rotatebox{90}{\% success}}
\includegraphics[width=3in]{Figures/random_sufficient_curves} \\
$(|\supt|+|\supf|)/N$ & & $(|\supt|+|\supf|)/N$
\end{tabular}
}
\caption{\small\sl $\ell_1$-recovery for the spike-random dictionary.
(a) Success rate of $(P_1)$ in recovering the sparsest decomposition
versus the number of nonzero terms. (b) Success rate of the
sufficient condition. }
\label{fig:reccurvesrandom}
\end{figure}
\section{Discussion}
\label{sec:discussion}
In this paper, we have demonstrated that except for a negligible
fraction of pairs $(T, \Omega)$, the behavior of the discrete
uncertainty relation is very different from what worst case
scenarios---which have been the focus of the literature thus
far---suggest. We introduced probability models and a robust
uncertainty principle showing that for for nearly all pairs $(T,
\Omega)$, it is actually impossible to concentrate a discrete signal
on $T$ and $\Omega$ simultaneously unless the size of the joint
support $|T| + |\Omega|$ be at least of the order of $N/\sqrt{\log
N}$. We derived significant consequences of this new uncertainty
principle, showing how one can recover sparse decompositions by
solving simple convex programs.
Our sampling models were selected in perhaps the most natural way,
giving to each time point and to each frequency point the same chance
of being sampled, independently of the others. Now there is little
doubt that conclusions similar to those derived in this paper would
hold for other probability models. In fact, our analysis develops a
machinery amenable to other setups. The centerpiece is the study of
the singular values of partial Fourier transforms. For other sampling
models such as models biased toward low or high ferequencies for
example, one would need to develop analogues of Lemma
\ref{lm:auxeigs}. Our machinery would then nearly automatically
transforms these new estimates into corresponding claims.
In conclusion, we would like to mention areas for possible improvement
and refinement. First, although we have made an effort to obtain
explicit constants in all our statements (with the exception of
section \ref{sec:BPgeneral}), there is little doubt that a much more
sophisticated analysis would yield better estimates for the singular
values of partial Fourier transforms, and thus provide better
constants. Another important question we shall leave for future
research, is whether the $1/\sqrt{\log N}$ factor in the QRUP (Theorem
\ref{th:qrupIF}) and the $1/\log N$ for the exact
$\ell_1$-reconstruction (Theorem \ref{th:qrupl1IF}) are necessary.
Finally, we already argued that one really needs to randomly sample
the support to derive our results but we wonder whether one needs to
assume that the signs of the coefficients $\alpha$ (in $f = \Phi
\alpha$) need to be randomized as well. Or would it be possible to
show analogs of Theorem \ref{th:qrupl1IF} ($\ell_1$ recovers the
sparsest decomposition) for all $\alpha$, provided that the support of
$\alpha$ may not be too large (and randomly selected)? Recent work
\cite{OptimalRecovery,LPdecode} suggests that this might be
possible---at the expense of additional logarithmic factors.
\section{Appendix: Concentration-of-Measure Inequalities}
The Hoeffding inequality is a well-known large deviation bound for
sums of independent random variables. For a proof and interesting
discussion, see \cite{Lugosi-Notes}.
\begin{lemma}
\label{lm:hoeffding}
(Hoeffding inequality) Let $X_0,\ldots,X_{N-1}$ be independent
real-valued random variables such that ${\hbox{\bf E}} X_j = 0$ and $|X_j| \leq
a_j$ for some positive real numbers $a_j$. For $\epsilon > 0$
\[
{\hbox{\bf P}}\left(\left| \sum_{j=0}^{N-1} X_j \right| \geq \epsilon\right) \leq
2\exp\left(-\frac{\epsilon^2}{2\|\mathbf{a}\|^2_2}\right)
\]
where $\|\mathbf{a}\|^2_2 = \sum_j a^2_j$.
\end{lemma}
\begin{lemma}
\label{lm:choeffding}
(complex Hoeffding inequality) Let $X_0,\ldots,X_{N-1}$ be independent
complex-valued random variables such that ${\hbox{\bf E}} X_j = 0$ and $|X_j| \leq
a_j$. Then for $\epsilon > 0$
\[
{\hbox{\bf P}}\left(\left|\sum_{j=0}^{N-1} X_j \right|\geq \epsilon\right) \leq
4\exp\left(-\frac{\epsilon^2}{4\|\mathbf{a}\|^2_2}\right).
\]
\end{lemma}
\begin{proof}
Separate the $X_j$ into their real and imaginary parts; $X^{\sf r}_j =
\Re X_j,~X^{\sf i}_j = \Im X_j$. Clearly, $|X^{\sf r}_j| \leq a_j$ and
$|X^{\sf i}_j| \leq a_j$. The result follows immediately from
Lemma~\ref{lm:hoeffding} and the fact that
\[
{\hbox{\bf P}}\left(\left|\sum_{j=0}^{N-1}X_j \right|\geq\epsilon\right) \leq
{\hbox{\bf P}}\left(\left|\sum_{j=0}^{N-1}X_j^{\sf r} \right|\geq\epsilon/\sqrt{2}\right) +
{\hbox{\bf P}}\left(\left|\sum_{j=0}^{N-1}X_j^{\sf i} \right|\geq\epsilon/\sqrt{2}\right).
\]
\end{proof}
|
1,314,259,995,030 | arxiv | \section*{Key Words}
Anomalous, diffusion, lipid bilayers, membranes, urea.
\section*{Introduction}
Diffusion is a vital process that underpins many cellular functions, including protein organisation \cite{Sheets1997}, signalling \cite{Choquet2003a, Kholodenko2006}, and cell survival \cite{Cheema2011}. In living systems diffusion rarely follows the Brownian motion predicted by a simple random walk model but instead exhibits `anomalous' subdiffusion, whereby the rate of diffusion is dependent on the timescale of observation \cite{Saxton1994}. Anomalous subdiffusion has been observed in 3D in the cytosol \cite{Regner2013} and in 2D in plasma membranes \cite{Hofling2013, Fujiwara2002a, Golan2017a}. The underlying mechanism for anomalous subdiffusion in membranes is thought to involve molecular crowding \cite{Kusumi2005a}, with contributions from slower-moving obstacles \cite{Saxton1987, Berry2014}, pinning sites, and compartmentalisation \cite{Kusumi2005a, Fujiwara2002a, Murase2004}; reviewed comprehensively elsewhere \cite{Saxton2012}. The notion that the cell membrane is a homogenous entity in which lipids and proteins are free to diffuse unhindered, as per the `fluid mosaic model' \cite{Singer1972}, has in recent years been re-evaluated to accommodate increased levels of complexity \cite{Kusumi2005a}.
Anomalous diffusion can be modelled by a power law:
\begin{equation} \label{eq:1}
\big \langle \Delta r^2 \big \rangle = 4 \Gamma \Delta t^\alpha,
\end{equation}
\noindent where the conventional diffusion coefficient {\itshape D} is replaced by an anomalous transport coefficient $\Gamma$, whose dimensions change for different degrees of anomalous behaviour.The anomalous coefficient $\alpha$ defines whether the diffusion is normal ($\alpha = 1$), sub-diffusive ($\alpha < 1$) or super-diffusive ($\alpha > 1$).
The units of $\Gamma$ vary with the degree of anomalous behaviour, which presents a challenge of interpretation. However, by de-dimensionalising the observation time \cite{Saxton1994} with a `jump time' $\tau$,
\begin{equation} \label{eq:2}
\big \langle \Delta r^2 \big \rangle = 4D\Delta t \Big( \frac{\Delta t}{\tau} \Big)^{\alpha-1},
\end{equation}
\noindent the length-scale $\lambda$ associated with the 2D anomalous behaviour can be defined ($\lambda = \sqrt{4D\tau}$).
Artificial bilayers have been critical in furthering our understanding of anomalous diffusion \cite{Schutz1997, Ratto2003, Horton2010, Spillane2014a, Wu2016, Rose2015}. In supported lipid bilayers (SLBs), phase separation \cite{Ratto2003}, protein binding \cite{Horton2010}, and defect formation \cite{Coker2017} have been used to generate anomalous diffusion. Simulations have also played a vital role\cite{Saxton1994, Saxton1989, Saxton2001, Stachura2014, Mardoukhi2015, Koldso2016, Jeon2016, Bakalis2015, Javanainen2013}, in particular those linking the role of mobile and immobile obstacles within the bilayer to the phenomenon \cite{Saxton1987, Berry2014}. Simulations have also provided the means to better interpret single particle tracking (SPT) data \cite{Kepten2015}, as well as methods for discriminating between distinct classes of anomalous diffusion \cite{Metzler2014}.
In order to elucidate the specific molecular mechanisms giving rise to anomalous subdiffusion {\itshape in vivo}, there is a need for experimental models which are able to exhibit readily tuneable anomalous subdiffusion of a biologically relevant magnitude \cite{Saxton2012}. Recently we used SPT to sample anomalous behaviour over four orders of magnitude of time by forming SLBs containing varying mole fractions of lipids functionalised with polyethylene glycol (PEG), thereby controlling nanoscale obstacle formation \cite{Coker2017}. Here, we make use of urea as a chaotropic agent, with reported ability to alter the physical properties of lipid bilayers \cite{Nowacka2012, Costa-Balogh2006, Yu2001, Yeagle1986a}. Urea is present at high concentrations in the tissues of deep-sea elasmobranchs (sharks, skates, rays) \cite{Smith1929} and is also part of the Natural Moisturising Factor in skin \cite{Rawlings2004}, where it is thought to offer cell membranes protection from osmotic shock due to highly saline or dehydrating conditions by stabilising the lamellar liquid phase. Here we use single-molecule total internal reflection fluorescence (smTIRF) and perform SPT to evaluate urea as a means to induce anomalous diffusion in pre-formed SLBs.
\section*{Materials and Methods}
\subsection*{Materials}
1,2-dicapryl-{\itshape sn}-glycero-3-phosphocholine (DCPC) was purchased from Avanti Polar Lipids (Alabaster, AL). Texas Red 1,2-dihexadecanoyl-{\itshape sn}-glycero-3-phosphoethanolamine triethylammonium salt (TR-DHPE) and 1,2-dipalmitoyl-{\itshape sn}-glycero-3-phosphoethanolamine-N-[methoxy(polyethylene glycol) - 5000] ammonium salt (PEG(5K)-DPPE) was purchased from Lipoid (Ludwigshafen, Germany). Unless stated, all other chemicals were purchased from Sigma-Aldrich. All aqueous solutions were prepared using doubly deionized 18.2 M$\Omega$ cm MilliQ water.
\subsection*{Supported Lipid Bilayers}
SLBs were prepared on glass coverslips by fusion of small unilamellar vesicles (SUVs) \cite{Brian1984} made from 1.77 mM DCPC doped with 1.0 mol\% PEG(5K)-DPPE and $3 \times 10^{-6}$ mol\% TR-DHPE. The addition of PEG-functionalised DPPE (below the mol\% required to induce anomalous diffusion \cite{Coker2017}) helps improve bilayer fluidity by raising the bilayer, thereby reducing interactions between the lipids in the lower leaflet and underlying glass \cite{Albertorio2005}. Texas Red-labelled DHPE was also included in order to assess the diffusive properties of the bilayer using smTIRF.
Lipid mixtures were first dried with nitrogen and placed under vacuum overnight. The dried lipids were hydrated with water and vortexed before tip sonication (Vibracell VCX130PB with CV188 tip, Sonics \& Materials, Newtown, CA) for 15 minutes at 25\% amplitude. The resulting clear vesicle suspension was centrifuged (3 minutes; 14000 $\times$ \emph{g}) before the supernatant was retained and any titanium residue (from the sonicator probe) was discarded. SUV preparations were stored at 4$^{\circ}$C for up to 48 hours.
Glass coverslips were rigorously cleaned using stepwise bath sonication with DECON-90, MilliQ water, and propan-2-ol for 20 minutes each. Immediately before use, the glass was dried under nitrogen and cleaned with oxygen-plasma treatment for 3 minutes (Diener Electronic, Femto). A well was created on each coverslip using vacuum grease (Dow Corning). The coverslip was heated to 37$^{\circ}$C before 50 $\upmu$L of SUV stock were diluted 1:1 in buffer (250 mM NaCl, 10 mM EDTA, 20 mM Tris pH 7.0) and added to the chamber immediately. DCPC SLBs were produced by fusion of the SUVs onto the glass coverslip. The vesicles were incubated for 30 minutes before the membranes were washed thoroughly with degassed MilliQ water followed by buffer.
Urea was added (or removed) by buffer exchange via pipetting; all but 50 $\upmu$L of fluid above the SLB was replaced with 200 $\upmu$l of the new buffer (containing 0.2, 0.5, or 1M urea), a minimum of 5 times. Bilayers were imaged 15 seconds after buffer exchange.
\subsection*{Total Internal Reflection Fluorescence Microscopy}
532 nm continuous-wave laser light was focussed at the back aperture of an objective lens (60$\times$ TIRF oil-immersion NA 1.49, Nikon, $\sim$1.4 kW cm$^{-2}$) such that total internal reflection occurred at the coverslip/sample interface. The excited TR-DHPE fluorescence was transmitted through 545 nm dichroic and 550 nm longpass filters before being imaged with an electron-multiplying CCD camera (Andor iXon). The inverted microscope objective was heated to maintain 37$^{\circ}$C at the sample throughout imaging; above the transition temperature for this lipid to ensure the bilayer was in the liquid phase. Bilayers were imaged at an exposure time of 20 ms for 5000 frames.
\subsection*{Single Particle Tracking}
SPT was performed using TrackMate \cite{Tinevez2017a}, a plugin for ImageJ \cite{Schneider2012a}. The space-time co-ordinates of the output tracks were used to calculate mean-squared displacements calculated for different observation times using custom-written procedures in MATLAB (MathWorks) as described previously \cite{Coker2017}.
\section*{Results}
Diffusion of TR-DHPE in the DCPC SLB was fast (6 $\upmu$m$^{2}$ s$^{-1}$) and normal ($\alpha$ = 1.01 $\pm$ 0.01) in the absence of urea (Fig. 1A\&B). In the presence of 1M urea, the diffusion became slower and more anomalous over time (Fig. 1C). $\alpha$ decreased roughly linearly to 0.38 and the transport coefficient ($\Gamma$) showed an approximately exponential decrease to 0.02 $\upmu$m$^{2}$ s$^{-\alpha}$ (Fig1D) over a 10 minute period. Although $\Gamma$ values cannot be directly compared (because they depend on $\alpha$, which is also changing), a linear change in $\alpha$ would be expected to cause an overall exponential change in $\Gamma$, as we report.
\begin{figure}[H]
\label{fig:X}
\centering
\includegraphics[keepaspectratio, width=1\textwidth]{Fig1.pdf}
\caption{\textbf{Time dependence of anomalous behaviour induced by 1 M urea} (A) Spot locations of tracked TR-DHPE in the absence of urea (left) and after the addition of 1M urea at four time points. Urea was removed by buffer exchange at 200-300 s. Image size: 3 $\times$ 3 $\upmu$m (B) Anomalous sub-diffusion increases over time from 15 seconds (turquoise) to 10 minutes (dark blue). (C) Linear decrease of $\alpha$ over time, at a rate of 9.7 $\times$ 10$^{-4}$ s$^{-1}$. (D) Exponential decrease of $\Gamma$ over time, {\itshape t$_{1/2}$} = 69 s. Error bars throughout represent standard errors from a minimum of 250 tracks.}
\end{figure}
Increasing the urea concentration of the buffer surrounding the SLB incrementally from 0 to 1 M, with a fixed short incubation time (15 s), resulted in increasingly slower diffusion (Fig. 2A). The behaviour is largely normal at this short interval, with only a modest decrease of $\alpha$ (to 0.94) at the highest concentration tested (Fig2B). An exponential decrease in $\Gamma$ with increasing urea concentration was observed (Fig. 2C). From the linear relationship between log$_{10}$($\Gamma$/$D$) and $\alpha$ (Fig. 2D) the characteristic length-scale ($\lambda$) associated with the system was calculated to be 45.1 nm, with a jump time ($\tau$) of 86.1 $\upmu$s.
\begin{figure}[H]
\label{fig:Y}
\centering
\includegraphics[keepaspectratio, width=0.7\textwidth]{Fig2.pdf}
\caption{\textbf{Effect of urea concentration on lipid diffusion in an SLB} (A) Diffusion of lipids becomes slower as urea concentration of the surrounding buffer is increased from 0 (black) to 1M (red). (B) Decrease of $\alpha$ with increasing urea concentration. (C) Exponential decrease of $\Gamma$ with increasing urea concentration. (D) Plot of log$_{10}$ ($\Gamma$/D) vs. $\alpha$ with linear fit. Blue: Data from 1M urea timecourse (see Fig. 1); Orange: Data from urea titration (This figure). Error bars represent standard errors.}
\end{figure}
\section*{Discussion}
We observe that urea causes diffusion in DCPC SLBs to become irreversibly slower and more anomalous in a time and concentration-dependent manner. Given our previous experiments reporting defect-mediated anomalous diffusion using PEG-doping of SLBs \cite{Coker2017}, it is appealing to suggest that a similar mechanism must operate for urea. For this case, urea would associate with the bilayer, where its chaotropic nature would act to induce the removal of bilayer patches from the glass coverslip surface, producing defects visible as excluded areas of the surface corresponding to those observed in Fig 1A. However, there is little evidence that urea acts directly to solubilise or otherwise permeabilise lipid bilayers \cite{Costa-Balogh2006}, and this hypothesis would rely on urea acting at the glass-lipid interface.
An alternative explanation for our results would be the action of urea to alter lipid phase behaviour, inducing phase co-existance phases\cite{Nowacka2012}. Unfortunately, the evidence supports a mode of action whereby urea stabilizes the liquid disordered phase \cite{Nowacka2012, Costa-Balogh2006}, suppressing phase separation, rather than encourage it. In our experiments, we observe a decrease in the area fraction of mobile lipids, which is the opposite trend.
A final hypothesis would be the action of urea not on the bilayer, but on the PEG-DHPE. A chaotropic effect on the PEG might act to increase the area fraction occupied by the PEG, which would then again drive the formation of defects in the membrane \cite{Coker2017}.
The effect that urea has on diffusion appears not only irreversible, but appears to progress even once urea is removed from the bulk solution. The half-life for this process at 1M urea was short (69 s) and was finished after approximately 500 s. We speculate that either our (1000-fold dilution) washing procedure must be ineffective, or there is a more long-lived, direct, interaction between urea and the bilayer. Given the low partition coefficient for urea in lipid bilayers \cite{Diamond1974} and the evidence from studies of multilamellar phases that it remains primarily in the aqueous layers between bilayers \cite{Costa-Balogh2006}, it is difficult to rationalize this as a possible mechanism.
Further work is needed to distinguish between these different possible mechanisms either by viewing the defects directly (e.g. by atomic force microscopy) or by restoring the defects by addition of fresh SUVs.
\section*{Conclusion}
We have presented preliminary findings demonstrating a novel approach to controlling anomalous subdiffusion in SLBs on a scale relevant to biological systems \cite{Murase2004, Schutz1997} by incorporating urea into the aqueous medium surrounding a supported lipid bilayer. Although this work involved the use of DCPC, it would be interesting to extend the method to other, more biologically-relevant lipid compositions. As a complementary method to the inclusion of PEG-lipids, we see potential for this approach for producing a simple membrane model with defined anomaleity,
\section*{Authors' Contributions}
EEW performed the experiments, HLEC and MRC performed the analysis, MIW secured the funding; all authors wrote and reviewed the manuscript.
\section*{Competing Interests}
The authors declare no competing interests.
\section*{Funding}
We thank the European Research Council for providing funding for this work (ERC-2012-StG-106913, CoSMiC).
\section*{References}
\printbibliography[heading=none]
\clearpage
\end{document}
|
1,314,259,995,031 | arxiv | \section{Introduction}
The CMS detector has a large acceptance and hermetic coverage. The various
subdetectors are: a silicon tracker with pixels and strips ($|\eta|<2.4$),
electromagnetic ($|\eta|<3$) and hadronic ($|\eta|<5$) calorimeters, muon
chambers ($|\eta|<2.4$). The acceptance is further extended with forward
detectors ($|\eta|<6.8$). CMS detects leptons and hadrons, both
charged and neutral ones. In the following, capabilities in soft, hard and
forward physics are described. For a very recent extensive review see
Ref.~1.
\section{Soft physics}
The minimum bias trigger will be based on the requirement of a symmetric
number of hits in both forward calorimeters ($3<|\eta|<5$, see
Fig.~\ref{fig:hftrig}). For Pb-Pb collisions the centrality
trigger will be provided by correlating barrel and forward energies. The
charged particle multiplicity can be measured event-by-event using hits in
the innermost pixel layer with about 2\% accuracy and systematics below 10\%.
\begin{figure}[h]
\begin{minipage}[c]{0.45\textwidth}
\includegraphics[width=\textwidth,angle=-90]{OnSel_Eff_ETowers_rf.eps}
\end{minipage}
\hspace{0.02\textwidth}
\begin{minipage}[c]{0.45\textwidth}
\includegraphics[width=\textwidth,angle=-90]{dNchdeta_EandEtcuts_rf.eps}
\end{minipage}
\caption{Left: Estimated loss of low multiplicity events due to triggering
requirements on nTowers for cuts on $E$ in minimum bias p-p collisions.
Right: Pseudo-rapidity distribution of charged hadrons in central Pb-Pb
collisions at 5.5~TeV from the Hydjet generator. Particle selection to mimic
the level-1 trigger is applied for total $\langle E \rangle$ and transverse
$\langle E_T \rangle$ energy.}
\label{fig:hftrig}
\end{figure}
CMS can study soft physics better than previously thought. Using a
modified pixel hit triplet finding algorithm, charged particles down to very low $p_T$
can be reconstructed (Fig.~\ref{fig:lowpt}-left). Particle identification
using energy loss in silicon is possible if $p<1-2$ GeV/$c$, benefitting
from analogue readout. Acceptances and efficiencies are at 80--90\%, the $p_T$
resolution is about 6\%. At the same time low fake track rate is achieved
thanks to the geometrical shape of the hit cluster: below
10\% even in central Pb-Pb for $p_T>0.4$ GeV/$c$. This enables the study of identified particle
spectra (down to $p_T$ of $0.1-0.3$ GeV/$c$) and yields, multiplicity
distributions and correlations. Weakly decaying resonances are
accessible if the found tracks are combined and selected via decay topology:
strange neutral particles ($\mathrm{K^0_S}$, Fig.~\ref{fig:lowpt}-center,
$\Lambda$, $\overline{\Lambda}$), multi-strange baryons ($\Xi^-$,
$\Omega^-$). Also open charm ($\mathrm{D^0}$, $\mathrm{D^{*+}}$) and open beauty
($\mathrm{B} \rightarrow \mathrm{J/\psi} + \mathrm{K}$) can be studied.
In Pb-Pb collisions azimuthal correlations give information on the
viscosity and parton density of the produced matter. The event plane can be
reconstructed using calorimetry. The estimated event plane resolution is
about 0.37~rad if $b =$ 9~fm. The second moment $v_2$ can be measured with
about 70\% accuracy. The results will improve by adding tracker information
and using forward detectors, such as the zero degree calorimeter.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.37\textwidth]{geometricalAcceptancePt.eps}
\includegraphics[width=0.37\textwidth]{mass_ka_moriond.eps}
\includegraphics[width=0.24\textwidth]{cSigMBvsHLTRatesInt_v9_col_20061221.eps}
\end{center}
\caption{Left: Acceptance of the track reconstruction algorithm as a
function of $p_T$, for tracks in the range $|\eta|<1$. Values are given
separately for pions (circles), kaons (triangles) and (anti)protons
(squares). Center: Invariant mass distribution of reconstructed
$\mathrm{K^0_S}\rightarrow \pi^+\pi^-$ in single minimum bias p-p collisions.
The mass distribution of the background is indicated with a black dashed
histogram. Right: Minimum bias and high level trigger $\mathrm{J/\psi}$,
$\Upsilon$, and jet trigger rates for design luminosity in central Pb-Pb
collisions.}
\label{fig:lowpt}
\end{figure}
\section{Hard physics}
Interesting events are selected first by the level-1 trigger. It is a fast
hardware trigger, decisions are made within about 3~$\mu$s after the
collision. It mostly uses signals from the muon chambers and calorimeters.
After that step the event rate is still high, the efficient observation of
rare hard probes requires a high level trigger (HLT). The trigger uses about
ten thousand CPUs working with the full event information including data from
the silicon tracker. A detailed study has been done with running offline
algorithms by parametrising their performance. Trigger tables are produced
considering various channels and luminosity scenarios
(Fig.~\ref{fig:lowpt}-right).
\begin{figure}
\begin{center}
\includegraphics[width=0.49\textwidth]{1month_pb_lm_as_rv1_jpsi_barrel.eps}
\includegraphics[width=0.49\textwidth]{1month_pb_lm_as_rv1_upsi_barrel}
\end{center}
\caption{(color online) Invariant mass spectra of opposite-sign and like-sign muon pairs
with $\mathrm{d}N_{ch}/\mathrm{d}\eta|_{\eta=0}$ = 2500, in the
$\mathrm{J/\psi}$ (left) and $\Upsilon$ (right) mass regions.}
\label{fig:qqbar}
\end{figure}
Charmonium and bottomonium resonances can report on the thermodynamical state
of the medium via their melting. It is an open question whether they are
regenerated or suppressed at LHC energy. They can be reconstructed in the
dimuon decay channel with help of precise tracking. Acceptances are at 25\%
($\Upsilon$) and 1.2\% ($\mathrm{J/\psi}$) with 80\% efficiency and 90\%
purity. The mass resolution is 86~MeV/$c^2$ at the $\Upsilon$ mass and
35~MeV/$c^2$ at the $\mathrm{J/\psi}$ mass, in the full acceptance,
and even better in the barrel
(Fig.~\ref{fig:qqbar}). This is the best resolution achieved at the LHC. With
help of the HLT, 50 times more $\mathrm{J/\psi}$ and 10 times more $\Upsilon$ will be
collected.
\begin{figure}[h]
\begin{center}
\includegraphics[width=0.45\textwidth]{trigJetsall.eps}
\hspace{0.05\textwidth}
\includegraphics[width=0.43\textwidth]{trigRaa.eps}
\end{center}
\caption{Left: Expected inclusive jet $E_T$ distributions in 10 centrality
bins. Right: Expected statistical reach for the nuclear modification factor
for inclusive charged hadrons. For both figures, central Pb-Pb collisions at
5.5~TeV have been generated by Hydjet, with integrated luminosity of
0.5~$\mathrm{nb^{-1}}$.
}
\label{fig:hltjet}
\end{figure}
Finding jets on top of a high background is a challenge in Pb-Pb collisions.
Jets are reconstructed using a pile-up subtraction algorithm. It consists of
an
iterative jet cone finder and an event-by-event background subtraction. For
100~GeV jets the directional resolutions are $\sigma_\eta \approx$ 2.8\%,
$\sigma_\phi \approx$ 3.2\%, while the energy resolution is $\sigma_{E_T}
\approx$ 16\%. Thanks to the HLT, the reach of the jet $E_T$ measurement can
be
extended to about 0.5~TeV (Fig.~\ref{fig:hltjet}-left). The data sets,
triggered with 50, 75 and 100 GeV, are merged with a simple scaling
procedure.
Parton energy loss in the hot and dense medium created in Pb-Pb collisions
can be studied by measuring the nuclear modification factors $R_{AA}$ and
$R_{CP}$. High $p_T$ charged particles can be tracked with about 75\%
algorithmic efficiency, few percent fake track rate for $p_T>1$ GeV/$c$ and
excellent momentum resolution. Using the HLT, the $p_T$ reach of the measurement is
extended from 90 to 300~GeV/$c$ (Fig.~\ref{fig:hltjet}-right).
\section{Forward physics}
The study of diffractive photoproduction of vector mesons in ultraperipheral
Pb-Pb collisions can constrain the gluon density at small $x$
(Fig.~\ref{fig:upc}-left). The decay channels $\rho\rightarrow\pi^+\pi^-$ and
$\Upsilon\rightarrow\mathrm{e}^+\mathrm{e}^-$ or $\mu^+\mu^-$ have been
studied, tagged with forward neutron detection in the zero degree
calorimeter. The combined acceptance and efficiency of the method is around 20\%
and it gives a good mass resolution in both channels
(Fig.~\ref{fig:upc}-centre and right).
\begin{figure}[h]
\begin{center}
\begin{minipage}[c]{0.29\textwidth}
\includegraphics[width=\textwidth]{DIS_nuclear_x_Q2_map}
\end{minipage}
\hspace{0.02\textwidth}
\begin{minipage}[c]{0.33\textwidth}
\includegraphics[width=\textwidth]{dNdminv_signalbckgd_nice_ee}
\end{minipage}
\begin{minipage}[c]{0.33\textwidth}
\includegraphics[width=\textwidth]{dNdminv_signalbckgd_nice_mumu}
\end{minipage}
\end{center}
\caption{Left: The approximate $(x,Q^2)$ range covered by photoproduction in
ultraperipheral Pb-Pb collisions at the LHC is indicated. Right: Invariant
mass $\mathrm{e^+e^-}$ and $\mu^+\mu^-$ distributions for photoproduced
$\Upsilon$ and dilepton continuum, as expected in ultraperipheral Pb-Pb
collisions at 5.5~TeV, for integrated luminosity of 0.5~$\mathrm{nb^{-1}}$.}
\label{fig:upc}
\end{figure}
\section{Summary}
The CMS detector combines capabilities for global event characterization and
for physics with specific probes. It performs equally well in soft, hard and
in forward physics, often supported by high level triggering.
\section*{Acknowledgment}
The author wishes to thank to the Hungarian Scientific Research Fund (T
048898).
\section*{References}
|
1,314,259,995,032 | arxiv | \section*{Introduction} Co-citation, ``the frequency with which two documents from the earlier literature are cited together in the later literature'', was first described in
1973~\cite{marshakova-shaikevich_system_1973,small_co_citation_1973}. As noted by \cite{small_co_citation_1973},
co-citation patterns differ from bibliographic coupling patterns~\cite{kessler_bibliographic_1963} but align with patterns of direct citation and
frequently co-cited publications must have high individual citations.
Co-citation has been the subject of further study and characterization, for example, comparisons to bibliographic coupling and direct
citation~\cite{boyack_cocitation_2010}, the study of invisible colleges~\cite{gmur_co-citation_2003,noma_co-citation_1984},
construction of networks by co-citation~\cite{small_clustering_1985,small_clustering_1985-1}, evaluation of clusters in combination with
textual analysis~\cite{braam_mapping_1991}, textual similarity at the article and other levels~\cite{colavizza_closer_2018},
and the fractal nature of publications aggregated by co-citations~\cite{vanraan_fractal_1990}.
Co-citations provide details of the relationship between key (highly cited) ideas, and changes in co-citation patterns over time may provide insight into
the mechanism with which new schools of thought develop. Implicit in the definition of co-citation is novel combinations of existing ideas, but
only some frequently co-cited article pairs reflect surprising combinations. For example, two publications presenting the leading methods for the same
computational problem may be highly co-cited, but this does not reflect a novel combination of ideas. Similarly, two publications describing methods that often constitute
part of the same workflow may be highly co-cited, but these co-citations are also not surprising.
On the other hand, for two articles in different fields, frequent co-citation is generally unexpected.
Novel, atypical, or otherwise unusual combinations of co-cited articles have been explored at the
journal-level~\cite{wang_2017,bradley_co-citations_2019,boyack_vs_uzzi_2014,uzzi_atypical_2013}. However,
journal-level classifications have limited resolution relative to article-level studies, which may better represent the actual structure and aggregations of the scientific
literature~\cite{shu_comparing_2019,article_boyack_topic,waltman_new_2012,article_stasa,article_gomez_journal}. Accordingly, we sought to discover measurable
characteristics of frequently co-cited publications from an article-level perspective.
To study frequently co-cited articles, we have developed a novel graph-theoretic approach that reflects the citation
neighborhood of a given pair of articles. In seeking to determine the degree to which a co-cited pair of papers represented a surprising combination,
we wished to avoid journal-based field classifications, which present challenges.
Instead, we attempted to use citation history to produce an estimate of the
probability that a given pair of publications $(x, y)$ would be co-cited. Since we focus on the activity before they are first co-cited, the
``probability" of co-citation is zero, by definition, since there are no co-citations yet. Hence, we approximated co-citation probabilities: we treat an article that cites one member
of a co-cited pair and also cites at least one article that cites the other member as a proxy for co-citation. Specifically, given a pair of publications $x,y$, we construct a directed bipartite graph
whose vertex set contains all publications that cite either $x$ or $y$ previous to their first co-citation. We then compute $\theta$, a normalized count of such
proxies, and use it to predict the probability of co-citation between $x$ and $y$. This approach enables an evaluation that is specific to the given pair of articles,
and does so without substantial computational cost, while avoiding definitions of disciplines derived
from journals or having to measure disciplinary distances.
To support our analysis, we constructed a dataset of articles from Scopus~\cite{scopus_ref} that were published in the eleven year period, 1985-1995, and
extracted the cited references in these articles. Recognizing that frequently co-cited publications must derive from highly-cited publications~\cite{small_co_citation_1973},
we identified those reference pairs (33.6 million pairs) for each article in the dataset that are drawn from the top 1\% most cited articles in Scopus and measured their frequency
of co-citation.
To investigate which statistical distributions might best describe the co-citation frequencies in these 33.6 million co-cited pairs, we reviewed prior work on distributions of citation frequency~\cite{radicchi_statistical_2008,eom_2011,price_1965,price_general_1976,newman_structure_2003,wang_quantifying_2013,stringer_statistical_2008,stringer_statistical_2010,redner_statistical_2005}.
This research has fit the frequency distribution of citation strength sometimes to a power law distribution and other times to a lognormal distribution.
A graph of the analogous co-citation data suggests that power law or lognormal distributions are candidates for describing co-citation strength as well and so we, accordingly,
investigated that conjecture. Interestingly, \cite{Mitzenmacher_2003} notes the debate between the appropriateness of power law versus lognormal distributions is
not confined to bibliometrics, but has been at issue in many disciplines and contexts.
To study how the best-fit distributional function and parameters for co-citation might vary with $\theta$, we stratified co-citation frequency data. We also measured whether a direct link exists between
two members of a co-cited pair (i.e., whether one member of a pair cites the other) and how this property is related to co-citation frequencies.
We find that the distribution of co-citation frequencies varies with $\theta$ and that a
power law distribution fits co-citation frequencies more often when $\theta$ is small, whereas a lognormal distribution fits more often for large $\theta$.
A pertinent aspect of co-citation is the rate at which frequencies accumulate. While citation dynamics of individual publications have been fairly well studied
by others, for example, \cite{wallace_2009,eom_2011}, the dynamics of co-cited articles are less well studied. Our interest was the special case analogous to the Sleeping Beauty
phenomenon~\cite{vanraansleeping2004,ke_defining_2015}, which may reflect delayed recognition of scientific discovery and the causes attributed to
it~\cite{merton_1963,garfield_1970,garfield_1980,cole_1970,barber_1961,glanzel_myth_2004}. Thus, we also identified co-cited pairs that
featured a period of dormancy before accumulating co-citations.
\section*{Materials \& Methods} \emph{Data} Citation counts were computed for all Scopus articles (88,639,980 records) updated through December 2019, as implemented in the
ERNIE project~\cite{GithubERNIE2019}. Records with corrupted or missing publication years or classified as `dummy' by the vendor were then removed,
resulting in a dataset of 76,572,284 publications. Hazen percentiles of citation counts, grouped by year of publication, were calculated for the these
data~\cite{bornmann_use_2013}. The top 1\% of highly cited publications from each year were combined into a set of highly cited publications consisting of 768,993 publications.
Publications of type `article', each containing at least five cited references and published in the 11 year period from 1985-1995, were subset from Scopus to form a dataset
of 3,394,799 publications and 51,801,106 references (8,397,935 unique). For each of these publications, all possible reference pairs were generated and then restricted to
those pairs where both members were in the set of highly cited publications (above).
For example, the data for 1985 consisted of 223,485 articles after processing as described above. Computing all reference pairs (that were also members of the highly cited
publication set of 768,993) from these 223,485 articles gave rise to 2,600,101 reference pairs (Table~\ref{tab:tab1}) that ranged in co-citation frequency from 1 to 874 within the 1985
dataset; from 1 to 11,949 across the 11 year period 1985-1995; and from 1 to 35,755 across all of Scopus. Collectively, the publications in our 1985-1995 dataset generated 33,641,395
unique co-citation pairs, for which we computed co-citation frequencies across all of Scopus.
\begin{figure}[ht]
\centering
\includegraphics[width=0.7\textwidth]{R1cc2_presentation_fig.pdf}
\caption{\bf The workflow we used to generate a dataset of 33,641,395 co-cited publications from references cited by articles in Scopus published in the years 1985-1995.}
\label{fig:schematic}
\end{figure}
\begin{table}[!ht]
\caption{{\bf Summary of Analyzed Data} Publication of type article that had at least five cited references indexed in Scopus were selected from the eleven years, 1985-1995. All possible reference pairs
were generated for the cited references of these articles and then restricted to those pairs where both members were in the set of 768,993 highly cited publications. The column Co-cited Pairs shows
the number of pairs in each year after the restriction was applied.}
\label{tab:tab1}
\centering
\begin{tabular}{lllc}
\hline
Year & Articles & References & Co-cited Pairs\\
\hline
1985 & 223,485 & 1,796,502 & 2,600,101 \\
1986 & 238,096 & 1,920,225 & 2,840,557 \\
1987 & 250,575 & 2,037,654 & 3,180,261 \\
1988 & 269,219 & 2,182,571 & 3,406,902 \\
1989 & 285,873 & 2,303,481 & 3,793,986 \\
1990 & 305,010 & 2,490,909 & 4,546,915 \\
1991 & 325,782. & 2,662,005 & 5,039,334 \\
1992 & 343,239. & 2,846,607 & 5,622,164 \\
1993 & 360,916 & 3,006,374 & 6,121,147 \\
1994 & 387,062. & 3,228,240 & 7,022,499 \\
1995 & 405,503. & 3,432,228 & 7,626,684 \\
\hline
\end{tabular}
\end{table}
\emph{Derivation of $\theta$ }
We now show how we define our prior on the probability of $x$ and $y$ being co-cited, based on the citation graph restricted to publications that cite either $x$ or $y$ (but not both) up to the year of their
first co-citation.
Recall that we defined a proxy co-citation of $x$ and $y$ to be
an article that cites one member of the co-cited pair $(x,y)$ and also cites at least one article that cites the other member.
The idea behind this definition is that
we consider papers that cite $x$ as proxies for $x$, and papers that cite $y$ as proxies for $y$. Thus, if a paper $a$ cites both $x$ and $y'$ (where $y'$ is a proxy for $y$), then it is a proxy for a
co-citation of $x$ and $y$. Similarly, if a paper $b$ cites both $y$ and $x'$ (where $x'$ is a proxy for $x$), it is also a proxy for a co-citation of $x$ and $y $.
This motivates the graph-theoretic formulation, which we now formally present.
We fix the pair $x, y$ and we define $N(x)$ to be the set of all publications that cite $x$ (but do not also cite $y$), and are published no later than the year of the first co-citation of $x$ and $y$.
We similarly define $N(y)$. We define a directed bipartite graph with vertex set $N(x) \cup N(y)$. Note that if $x$ cites $y$ then $x \in N(y)$, and similarly for the case where $y$ cites $x$.
Note also that since we have restricted $N(x)$ and $N(y)$ that $N(x) \cap N(y) = \emptyset$. We now describe how the directed edge set $E(x,y)$ is constructed.
For any pair of articles $a,b$ where $a \in N(x)$ and $b \in N(y)$, if $a$ cites $b$ then we include the directed edge $a \rightarrow b$ in $E(x,y)$.
Similarly, we include edge $b \rightarrow a$ if $b$ cites $a$. Finally, if a pair of articles both cite each other, then the graph has parallel edges.
By construction, this graph is {\em bipartite}, which means that all the edges go between the two sets $N(x)$ and $N(y)$ (i.e., no edges exist between two vertices in $N(x)$, nor between two vertices
in $N(y)$).
Note that by the definition, every edge in $E(x,y)$ arises because of a proxy co-citation, so that the number of proxy co-citations is the number of directed edges in $E(x,y)$.
Consider the situation where a publication $a$ cites $x$ (so that $a \in N(x)$) and also cites $b_1, b_2, b_3$ in $N(y)$: this defines three directed edges from $a$ to nodes of $N(y)$.
We count this as three proxy co-citations, not as one proxy co-citation. Similarly, if we have a publication $b$ that cites $y$ and also cites $a_1, a_2, a_3, a_4$ in $N(x)$,
then there are four directed edges that go from $b$ to nodes in $N(x)$ and we will count each of those directed edges as a different proxy co-citation.
Accordingly, letting $|X|$ denote the cardinality of a set $X$, we note $|E(x,y)|$, i.e., the number of directed edges that go between $N(x)$ and
$N(y)$, is the number of proxy co-citations between $x$ and $y$.
If no parallel edges are permitted, the maximum number of possible proxy co-citations is $|N(x)| \times |N(y)|$.
Under the assumption that both $N(x)$ and $N(y)$ each have at least one article, we
define $\theta(x,y)$, our prior on the probability of $x$ and $y$ being co-cited, as follows:
$$\theta(x,y)= \frac{|E(x,y)|}{ |N(x)| \times |N(y)|} . $$
Note that if parallel edges do not occur in the graph, then $\theta(x,y) \leq 1$, but that otherwise the value can be greater than $1$.
Note also that $\theta(x,y)=0$ if $E(x,y) = \emptyset$ (i.e., if there are no proxy co-citations) and
that $\theta(x,y)=1$ if every possible proxy co-citation occurs.
To efficiently calculate $\theta$, we used the following pipeline. We copied Scopus data from a relational schema in PostgreSQL into
a citation graph from Scopus into the Neo4j 3.5 graph database using an automated Extract Transform Load (ETL) pipeline that combined
Postgres CSV export and the Neo4j Bulk Import tool. The graph vertex set is all publications, each with a publication year attribute, and
the edge set is all citations between the publications. A Cypher index was created on the publication year. We developed Cypher queries
to calculate $\theta$ and tuned performance by splitting input publication pairs into small batches and processing them in parallel, using parallelization
in Bash and GNU Parallel. Batch size, the number of parallel job slots, and other parameters were tuned for performance, with best results achieved on
batch sizes varying from 20 to 100 pairs. The results of $\theta$ calculations were cross-checked using SQL calculations. In the small number of cases where
$\theta$ computed to $> 1$ (above) it was set to 1 for the purpose of this study.
\emph{Statistical Calculations} We denote the observed co-citation frequency data by the multi-set
\begin{displaymath}
X^o = \left\lbrace x^o_1, \ldots , x^o_N \right\rbrace ,
\end{displaymath}
where $N$ is the total number of pairs of articles and $x^o_i$ is the observed frequency of the $i^{th}$ pair of papers being co-cited.
Note that this is in general a multi-set, as different pairs of articles can have the same co-citation frequency.
Let $n(x)$ be the number of times that $x$ appears in $X^o$ (equivalent, $n(x)$ is the number of pairs of articles that
are co-cited $x$ times), and let $N(x) = \sum_{y=x}^{\infty}{n \left( y \right)}$ denote the total number of pairs of articles that are co-cited at least $x$ times.
Then
\begin{equation} \label{eqObsFreqDist}
f^o \left( x \left| x \geq \underline{x} \right. \right) = \frac{ n(x)}{N(\underline{x})} \text{ for } x \in \left\lbrack \underline{x} , \infty \right) ,
\end{equation}
where $\underline{x}$ is a parameter we use to analyze the distribution's right tail starting at varying frequencies. We describe in this subsection (i) the statistical computations for fitting lognormal and power law distributions to right tails of the observed co-citation frequency distributions as defined by (\ref{eqObsFreqDist}) for various $\underline{x}$ and (ii) how we assessed the quality of those fits. Further, we performed such analyses for various slices of the data, stratifying by $\theta$ and other parameters, as is described in the Results section.
We used a discrete version of a lognormal distribution to represent integer co-citation frequencies, $f \left( \cdot \right)$, following ~\cite{stringer_statistical_2008} and ~\cite{stringer_statistical_2010}, while appropriately normalizing for our conditional assessment of the right tail commencing at $\underline{x}$:
\begin{eqnarray}
f_{LN} \left( x \left| \mu, \sigma, \underline{x} \right. \right) &=& \frac{\tilde{f} \left( x \left| \mu, \sigma \right. \right) }{\sum_{n = \underline{x}}^\infty{\tilde{f} \left( n \left| \mu, \sigma \right. \right)}} \text{ for } x \geq \underline{x} \label{eqLognormDist} \\
\tilde{f} \left( x \left| \mu, \sigma \right. \right)&=& \int_{ x - 0.5 }^{ x + 0.5 } \frac{dq}{q \sqrt{2 \pi \sigma^2}} \exp \left( -\frac{\left( \ln{q} - \mu \right)^2}{2 \sigma^2} \right) , \nonumber
\end{eqnarray}
where $\mu$ and $\sigma$ are the mean and standard deviation, respectively, of the underlying normal distribution. These probabilities can be computed with the cumulative normal distribution,
\begin{displaymath}
\tilde{f} \left( x \left| \mu, \sigma \right. \right) = \Phi \left( \frac{\ln{\left( x + 0.5 \right)}}{\sigma} \right)
- \Phi \left( \frac{\ln{\left( x - 0.5 \right)}}{\sigma} \right) ,
\end{displaymath}
using the well-known error function.
We fit distributions to the co-citation frequency data for various extremities of the right tail, as parameterized by $\underline{x}$, using a maximum (log) likelihood estimator (MLE). We solved for the best-fit distributional parameters for the lognormal distribution, $\mu$ and $\sigma$, by modifying a multi-dimensional interval search algorithm from \cite{press_statistical_2007} and following ~\cite{stringer_statistical_2010}. A compiled version of this code using the C++ header file, \texttt{amoeba.h}, is available on our Github site~\cite{GithubERNIE2019}.
We fit a discrete power law distribution to the data for various values of $\underline{x}$, which was normalized for our conditional observations of the right tail:
\begin{equation}
f_{PL} \left( x \left| \alpha , \underline{x} \right. \right) = \frac{x^{-\alpha}}{\zeta\left( \alpha , \underline{x} \right)} \text{ for } x \geq \underline{x},
\end{equation}
where the Hurwitz zeta function,
\begin{displaymath}
\zeta \left( \alpha , \underline{x} \right) = \sum_{x=0}^{\infty} {\frac{1}{\left( x + \underline{x} \right)^{\alpha}}} ,
\end{displaymath}
is a generalization of the Riemann zeta function, $\zeta \left( \alpha , 1 \right)$, as is needed for analysis of the right tail.
We solved first-order conditions for the (log) MLE to find the best-fit distributional exponent $\alpha$,
\begin{equation} \label{eqFirstOrderPL}
\frac{\zeta^\prime \left( \alpha , \underline{x} \right)}{\zeta \left( \alpha , \underline{x} \right)} = - \frac{1}{N \left( \underline{x} \right)} \sum_{x \in X^o \left( \underline{x} \right)} \ln{x} ,
\end{equation}
as described in \cite{clauset_power-law_2009} and ~\cite{goldstein_problems_2004}, where $X^o \left( \underline{x} \right) = \left\lbrace x \in X^o : x \geq \underline{x} \right\rbrace$, are the observed co-citations with frequencies at least as great as $\underline{x}$ and $N \left( \underline{x} \right)$ is the number of such co-citations.
We solved (\ref{eqFirstOrderPL}) to find $\alpha$ using a bisection algorithm.
We used the ${\chi}^2$ goodness of fit (${\chi}^2$) and the Kolmogorov-Smirnov (K-S) tests to assess the null hypothesis that the distribution of the observed co-citation frequencies and the best-fit lognormal distribution are the same, and similarly for the best-fit power law distribution. We also computed the Kullback-Leibler Divergence (K-L) between the observed data and the best-fit distributions.
Both the ${\chi}^2$ and K-S tests employed the null hypothesis that the observed co-citation frequencies, $n{\left( x \right)}$ for $ x \in \left\lbrack \underline{x} , \infty \right)$, were sampled from the best-fit lognormal or power law distributions, which we denote by $f_d \left( \cdot \left| \underline{x} \right. \right)$ for $d \in \left\lbrace LN, PL \right\rbrace$, while suppressing the parameters specific to each of the distributions.
The usual ${\chi}^2$ statistic was computed by, first, grouping each of the observed co-citation frequencies into $k$ bins, denoted by $b_i$ for $i \in \left\lbrace 1, \ldots , k \right\rbrace$, and then computing
\begin{displaymath}
\chi^2 = \sum_{i=1}^{k} {\frac{\left( O_i - E_i \right)^2}{E_i}},
\end{displaymath}
where $O_i$ is the observed number of co-citations having frequencies associated with the $i$-th bin,
\begin{displaymath}
O_i = \sum_{x \in b_i}{n{\left( x \right)}} ,
\end{displaymath}
and $E_i$ is the expected number of observations for frequencies in bin $i$, if the null hypothesis was true, in a sample with size equal to the number of observed data points, $N{\left( \underline{x} \right)}$:
\begin{displaymath}
E_i = \sum_{x \in b_i} f_d \left( x \left| \underline{x} \right. \right) N{\left( \underline{x} \right)}
\end{displaymath}
If the null hypothesis was true, then we would expect $O_i$ and $E_i$ to be approximately equal, with deviations owing to variability due to sampling.
Constructing the bins $b_i$ requires only that $E_i \geq 5$ for every $i = 1, \ldots , k$. Test outcomes are sometimes sensitive to the minimum $E_i$ permitted, which we will denote by $\underline{E}$, and so we tested with multiple thresholds, including 10, 20, 50, and 70. Furthermore, statistical tests are stochastic: these multiple tests permitted a reduction in the probability of erroneously rejecting or accepting the null hypothesis based on a single test. The distribution of observed co-citation frequencies was skewed right with a long tail, so that aggregating bins to satisfy $E_i \geq \underline{E}$ was most critical in the right tail. This motivated a bin construction algorithm that aggregated frequencies in reverse order, starting with the extreme right tail. Algorithm \ref{alg:bins} requires a set of the unique observed co-citation frequencies, $\hat{X}^o$, which includes the elements of the multiset $X^o$ without repetition. While Algorithm \ref{alg:bins} does not guarantee in general that all bins satisfy $E_i \geq \underline{E}$, that criterion was satisfied for the observed data.
\begin{algorithm}
\caption{Frequency Bin Construction
\label{alg:bins}}
\begin{algorithmic}[1]
\State $i \leftarrow 1$
\State $b_1 = \left\lbrace \right\rbrace$
\While {$\left| \hat{X}^o \right| > 0$}
\State $b_i \leftarrow b_i \cup \left\lbrace \max{\left( \hat{X}^o \right)} \right\rbrace$
\State $\hat{X}^o \leftarrow \hat{X}^o \setminus \max{\left( \hat{X}^o \right)}$
\If {$E_i \geq \underline{E}$}
\State $i \leftarrow i + 1$
\State $b_i \leftarrow \left\lbrace \right\rbrace$
\EndIf
\EndWhile
\end{algorithmic}
\end{algorithm}
We implemented a K-S test using simulation to generate a sampling distribution to account for the discrete frequency observations \cite{internetks_bootstrap}. We denote the cumulative
distribution of observed co-citation frequencies by $F^o{\left( x \left| \underline{x} \right. \right)} = \sum_{i=\underline{x}}^{x} {f^o{\left( i \left| \underline{x} \right. \right)}}$, and the best-fit cumulative
distribution by $F_d{\left( x \left| \underline{x} \right. \right)} = \sum_{i=\underline{x}}^{x} {f_d{\left( i \left| \underline{x} \right. \right)}}$. The K-S test involves testing the maximum absolute difference
between the observed and theorized cumulative distributions,
\begin{displaymath}
D_n = \max_x{\left| F^o{\left( x \left| \underline{x} \right. \right)} - F_d{\left( x \left| \underline{x} \right. \right)} \right|},
\end{displaymath}
where $n$ is the number of observations giving rise to $F^o{\left( x \left| \underline{x} \right. \right)}$, against the distribution of such differences between samples from the theorized distribution with the same number of observations, $n$,
\begin{displaymath}
\tilde{D}_n = \max_x{\left| \tilde{F}_{d,1}{\left( x \left| \underline{x} \right. \right)} - \tilde{F}_{d,2}{\left( x \left| \underline{x} \right. \right)} \right|} ,
\end{displaymath}
where $\tilde{F}_{d,j}{\left( x \left| \underline{x} \right. \right)}$ is the empirical distribution of sample $j$ of size $n$ (notation suppressed) drawn from $F_d{\left( x \left| \underline{x} \right. \right)}$. We generated 100 such random variables $\tilde{D}_n$ for each test. We reject the null hypothesis if $D_n$ is larger than substantially all of the $\tilde{D}_n$, say all but 5\%, for equivalence with a $p$-value of 0.05. The number of $\tilde{D}_n$ samples drawn yields a $p$-value with a resolution of 1\%.
We computed the K-L Divergence two ways due to its asymmetry:
\begin{eqnarray*}
D_{K-L}{\left( f^o \parallel f_d \right)} &=& \sum_{x=\underline{x}}^\infty {f^o{\left( x \left| \underline{x} \right. \right)} \ln{\frac{f^o{\left( x \left| \underline{x} \right. \right)}}{f_d{\left( x \left| \underline{x} \right. \right)}}}} \\
D_{K-L}{\left( f_d \parallel f^o \right)} &=& \sum_{x=\underline{x}}^\infty {f_d{\left( x \left| \underline{x} \right. \right)} \ln{\frac{f_d{\left( x \left| \underline{x} \right. \right)}}{f^o{\left( x \left| \underline{x} \right. \right)}}}} .
\end{eqnarray*}
Separate from the tests above, we tested whether the distribution of co-citation frequencies was independent of $\theta$ using a $\chi^2$ test, using the null hypothesis that the co-citation frequency distribution was
independent of $\theta$ . We initially created a contingency table on $\theta$ and co-citation frequency using these bins for $\theta$, $\left\lbrace \left\lbrack 0.0 , 0.2 \right) , \left\lbrack 0.2 , 0.4 \right) , \left\lbrack 0.4 , 0.6 \right) ,
\left\lbrack 0.6 , 0.8 \right) , \left\lbrack 0.8 , 1.0 \right) \right\rbrace$, and logarithmic bins for
frequency to accommodate the skewed distributions:
\begin{displaymath}
\left\lbrace \left[ 10 , 100 \right) , \left[ {100} , {1000} \right) , \left[ {1000} , {10000} \right) , \left[ {10000} , {100000} \right] \right\rbrace.
\end{displaymath}
We, subsequently, aggregated these bins to have an expected number of co-citations in each bin equal to or greater than 5 to account for a decreasing number of observations as $\theta$ and frequency increased by having just two
intervals for frequency: $\left\lbrace \left[ 10 , 100 \right) , \left[ {100} , {100000} \right] \right\rbrace$.
\emph{Kinetics of Co-citation}
We extended prior work on delayed recognition and the Sleeping Beauty phenomemon~\cite{ke_defining_2015,vanraansleeping2004,li_distinguishing_2016,glanzel_myth_2004} towards co-citation.
We have modified the beauty coefficient (B) of ~\cite{ke_defining_2015} to address co-citations by:
(i) counting citations to a pair of publications (co-citations) rather than citations to individual papers,
(ii) setting $t_0$ (age zero) to the first year in which a pair of publications could be co-cited (i.e., the publication year of the more recently published member of a co-cited pair), and
(iii) setting $C_0$ to the number of co-citations occurring in year $t_0$. Rather than calculate awakening time as in \cite{ke_defining_2015}, we opted to measure the simpler length of time between $t_0$ and the first year in which a co-citation was recorded; we label this measurement as the timelag $t_l$, so that $t_l=0$ if a co-citation was recorded in $t_0$.
\section*{Results and Discussion} \iffalse
USE THIS CONSTRUCT FOR BLOCK COMMENTS
\fi
Our base dataset, described in Table \ref{tab:tab1}, consists of the 33,641,395 co-cited reference pairs (33.6 million pairs) and their co-citation frequencies, gathered from Scopus during the 11-year period from 1985-1995 (Materials and Methods). A striking distribution of co-citation frequencies with a long right tail is observed with a minimum co-citation of 1, a median of 2, and a maximum co-citation frequency of 51,567 (Figure~\ref{fig:basedata}). Approximately 33.3 of 33.6 million pairs (99\% of observations) have co-citation frequencies ranging from 1--67 and the remaining 1\% have co-citation frequencies ranging from 68--51,567. Since the focus of our study was co-citations of frequently cited publications, we further restricted this dataset to those pairs with a co-citation frequency of at least 10, which resulted in a smaller dataset of 4,119,324 co-cited pairs (4.1 million pairs) with minimum co-citation frequency of 10, median of 18, and a maximum co-citation frequency of 51,567. In order to focus on co-citations derived from highly cited publications, $\theta$ was calculated for all pairs with a co-citation frequency of at least 10. We also note whether one article in a co-citation pair cites the other (connectedness).
Influenced by the use of linked co-citations for clustering~\cite{small_clustering_1985}, we also examined the extent to which members of a co-cited pair were also found in other co-cited pairs. We found that 205,543 articles contributed to 4.12 million co-cited pairs. The highest frequency observed in our dataset, 51,567 co-citations, was for a pair of articles from the field of physical chemistry: Becke (1993)~\cite{becke_densityfunctional_1993} and Lee, Yang, and Parr (1988)~\cite{lee_development_1988}. The members of this pair are not connected and are found in a total of 1,504 co-cited pairs with frequencies ranging from 10 to 51,567. The second highest frequency, 28,407 co-citations, was for another pair of articles from the field of
biochemistry: \cite{laemmli_cleavage_1970,bradford_rapid_1976}. Members of this pair are not connected and are found in 41,909 co-cited pairs, 24,558 for the Laemmli gel electrophoresis article and 17,352 for the Bradford protein estimation article. In terms of this second pair, both articles describe methods heavily used in biochemistry and molecular biology, an area with strong referencing activity, so this result is not entirely surprising.
Having developed $\theta(x,y)$ as a prediction of the probability that articles $x$ and $y$ would be co-cited, we first tested whether the distribution of co-citation frequencies was independent of $\theta$ (Materials and Methods). The null hypothesis that the co-citation frequency distribution was independent of $\theta$ was rejected with a very small $p$-value: the statistical software indicated a $p$-value with no significant non-zero digits. We next investigated what distribution functions might fit the frequencies of co-citation as $\theta$ varied.
Based on the long tails of citation frequencies, prior research has assessed the fit of lognormal and power law distributions \cite{stringer_statistical_2008,radicchi_statistical_2008,stringer_statistical_2010}.
We noted long right tails in co-citation frequencies, which, similarly, motivated us to assess the fit of lognormal and power law distributions to co-citation data. Further, we stratified the data according to (i) the minimum frequency for the right tail $\underline{x}$, (ii) $\theta$, and (iii) whether the two members of each co-citation pair were connected. Figure \ref{fig:mondrian} shows which distribution, if either, fits the data in each slice, based on tests of statistical significance. Note that there were no circumstances where both distributions fit: if one fit, then the other did not.
Statistical tests were not possible for some slices due to an insufficient number of data points. This was the case for certain combinations of large $\underline{x}$, large $\theta$, and co-citations that were not connected. The number of data points obviously decreases as $\underline{x}$ increases, and we found the decrease in the number of data points to be more precipitous when $\theta$ was large and co-citations were unconnected due to the lighter right tails for these parameter combinations. The graph in the right panel of Figure \ref{fig:Figs_4_5}, which has a logarithmic $y$-axis, shows that the number of data points per $\theta$ interval analyzed decreases most often by more than an order of magnitude from one interval to the next as $\theta$ increases.
Most pairs of publications that are co-cited at least ten times, therefore, have small values of $\theta$.
Figure \ref{fig:mondrian} indicates when the null hypothesis of a best-fit lognormal or power law fitting the observed data can not be rejected. We computed two types of statistics for evaluating the null hypothesis (${\chi}^2$ and K-S) and, moreover, we computed the ${\chi}^2$ statistic for four binning strategies. Figure \ref{fig:mondrian} indicates a distributional fit, specifically, if either the K-S $p$-value is greater than 0.05 or if two or more of the ${\chi}^2$ statistics are greater than 0.05.
While we computed the K-L Divergence (see supplementary material), we did not use these computations for formal statements of distributional fit because they are neither a norm nor do they determine statistical significance.
These K-L computations did, however, support the findings based on formal tests of statistical significance.
Power law distributions fit most often when co-citations are connected (Fig.~\ref{fig:mondrian}), when more extreme right tails are considered, and when co-citations have small values of $\theta$. Lognormal distributions fit, conversely, in some circumstances, when a greater portion of the right tail is considered. These observations support the existence of heavy tails for $\theta$ small, even if a lognormal distribution fits the observed data more broadly. This observation is consistent with our observations of the most frequent co-citations having small $\theta$ values, as shown in the scatter plot in the left panel of Figure \ref{fig:Figs_4_5}.
Mitzenmacher \cite{Mitzenmacher_2003} shows a close relationship between the power law and lognormal distributions vis-\`a-vis subtle variations in generative mechanisms that determine whether the resulting distribution is power law or lognormal.
The stratified layers in Figure \ref{fig:mondrian} where a lognormal distribution fits for some portion of the right tail and, in the same instance, a power law describes the more extreme tail,
may, therefore, be due to a generative mechanism whose parameters are close to those for a power law distribution as well as those for a lognormal distribution.
\begin{table}[!ht]
\caption{{\bf Exponents of best-fit power law distributions} These observations are for power law exponents where comparison across intervals of $\theta$ were possible, and where statistical tests indicated that a power law was a good fit to the data. The articles of the co-citations were connected for all data shown.}
\label{tab:exponents}
\centering
\begin{tabular}{ccc}
\hline
Right-tail cutoff ($\underline{x}$) & $\theta$ & Power law exponent ($\alpha$) \\
\hline
200 & $\left[ 0.0 , 0.2 \right)$ & 3.26 \\
200 & $\left[ 0.2 , 0.4 \right)$ & 3.37 \\ \hline
250 & $\left[ 0.0 , 0.2 \right)$ & 3.27 \\
250 & $\left[ 0.2 , 0.4 \right)$ & 3.37 \\ \hline
300 & $\left[ 0.0 , 0.2 \right)$ & 3.22 \\
300 & $\left[ 0.2 , 0.4 \right)$ & 3.35 \\
\hline
\end{tabular}
\end{table}
Table 2 shows the exponents of the best-fit power law distributions when statistical tests indicated that a power law was a good fit and where comparisons were possible among the intervals of $\theta$: these were possible for
$\theta$ intervals of $\left[ 0.0 , 0.2 \right)$ and $\left[ 0.2 , 0.4 \right)$, for connected co-citations, and right tails commencing at $\underline{x} \in \left\lbrace 200,250,300 \right\rbrace$. The power law exponent $\alpha$ in these comparisons was less for $\theta \in \left[ 0.0 , 0.2 \right)$ than for $\theta \in \left[ 0.2 , 0.4 \right)$, indicating heavier tails for $\theta$ small and, therefore, a greater chance of extreme co-citation frequency.
Figure \ref{fig:perc} shows a log-log plot of the number of co-citations ($y$-axis) exhibiting the counts on the $x$-axis, for $\theta$ in the interval $\left[0.0, 0.2 \right)$ (note that both axes employ log scaling). The pattern for points below the 99th percentile clearly indicate that the number of co-citations referenced at a given frequency decreases greatly as the frequency increases. Also, the broadening of the scatter where fewer co-citations are cited more frequently is indicative of a long right tail, as has been observed in other research where lognormal or power law distributions have been fit to data, as in \cite{montebruno_tale_2019}.
Perline~\cite{perline_2005} warns against fitting a power law function to truncated data. Informally, a portion of the entire data set can appear linear on a log-log plot, while the entire data set would not. He cites instances where researchers have mistakenly characterized an entire data set as following a power law due to an analysis of only a portion of the data, when a lognormal distribution might provide a better fit to the entire data set. Indeed, the scatter plot in Figure \ref{fig:perc} is not linear and so, as Figure \ref{fig:mondrian} shows, a power law does not fit the entire data set. This is what Perline calls a weak power law where a power law distribution function fits the tail, but not the entire distribution. Our concern, however, is not with characterizing the distributional function for the entire data set, but with characterizing the features of high frequency co-citations, which by definition means we
were concerned with the right tail of the distribution. Moreover, the results avoid confusion between lognormal and power law distribution functions because we have shown not only that a power law provides a statistically significant fit, but also that a lognormal distribution function does not fit.
Our analysis found particularly heavy tails that were well fit by power law distributions for small $\theta$, in the intervals $\left[ 0.0, 0.2 \right)$ and $\left[ 0.2, 0.4 \right)$, and for co-citations whose constituents are connected, as shown in Fig.~\ref{fig:mondrian}.
The closely related Matthew Effect~\cite{merton_1968}, cumulative advantage~\cite{price_general_1976}, and the preferential attachment class of models~\cite{barabasi_albert_2002} provide a possible explanation for citation frequencies following a power law distribution for some sufficiently extreme portion of the right tail. For greater values of $\theta$, insufficient data in the right tails precludes a definitive assessment in this regard, although one might argue that the lack of observations in the tails is counter to the existence of a power law relationship. It is also noteworthy that the exponents we found for co-citations (Table \ref{tab:exponents}) are close in value to those reported for citations by \cite{price_general_1976} and \cite{radicchi_statistical_2008}.
\emph{Delayed Co-citations} The delayed onset of citations to a well cited publication, also referred to as `Delayed Recognition' and 'Sleeping Beauty', has been studied by Garfield, van Raan, and others~\cite{garfield_1970,vanraan_2019,ke_defining_2015,vanraansleeping2004,li_distinguishing_2016,glanzel_myth_2004,bornmann_identifying_2018}.
We sought to extend this concept to frequently co-cited articles. As an initial step, we calculated two parameters (Materials and Methods): (1) the beauty coefficient~\cite{ke_defining_2015} modified for
co-cited articles and (2) timelag $t_l$, the length of time between first possible year of co-citation and the first year in which a co-citation was recorded. We further focused our consideration of delayed co-citations
to the 95th percentile or greater of co-citation frequencies in our dataset of 4.1 million co-cited pairs. Within the bounds of this restriction, 24 co-cited pairs have a beauty coefficient of 1,000 or greater and all 24 are
in the 99th percentile of co-citation frequencies. Thus, very high beauty coefficients are associated with high co-citation frequencies.
We also examined the relationship of $t_l$ with co-citation frequencies (Fig.~\ref{fig:composite}) and observed that high $t_l$ values were associated with lower co-citation frequencies. These data in appear to be consistent with a report from van Raan and Winnink~\cite{vanraan_2019}, who conclude that `probability of awakening after a period of deep sleep is becoming rapidly smaller for longer sleeping periods'. Further, when two articles are connected, they tend to have smaller $t_l$ values compared to pairs that are not connected in the same frequency range.
\clearpage
\section*{Figures}
\begin{figure}[h]
\centering
\includegraphics[width=0.7\textwidth]{R1percent_connected2.pdf}
\caption{The x-axis shows percentiles for all three plots \textbf{Left Side} \emph{Co-citation frequencies of highly cited publications from Scopus 1985-1995} Co-citation frequencies are plotted against their percentile values.
The upper and lower plots were both generated from 33,641,395 data points. The lower plot shows the same data with a logarithmic (ln) transformation of y-axis. The minimum co-citation frequency is 1, the median is 2, the third
quartile is 4, and the maximum is 51,567. Additionally, 15,140,356 pairs (45 \%) have a co-citation frequency of 1. Frequencies of 12, 22, 67, and 209 correspond to quantile values of 0.9, 0.95, 0.99, and 0.999 respectively.
\textbf{Right Side} Direct citations between members of a co-cited pair (connectedness) increase with co-citation frequency. The proportion of connected pairs (a direct citation exists between the two members of a pair) within each
percentile is shown. Data are plotted for all pairs with a co-citation frequency of at least $10$
(4.1 million pairs)}
\label{fig:basedata}
\end{figure}
\clearpage
\begin{figure}[h]
\centering
\includegraphics[width=0.6\textwidth]{R1_fit_heatmap.pdf}
\caption{\textbf{Distributional fits to the observed co-citation frequencies} The graph shows where a lognormal or power law distribution demonstrated a statistically significant fit with the observed co-citation frequencies
stratified by $\theta$, extent of the right tail tested $\underline{x}$, and whether co-citations were connected. A power law fit more often for $\theta$ in the intervals $\left[ 0.0, 0.2 \right)$ and $\left[ 0.2, 0.4 \right)$ when
cocitation constituents were connected. When a lognormal distribution fit, it was for broader portions of the data set. Data were insufficient for testing as $\theta$ increased due to (i) fewer observations and (ii) less prominent right tails.}
\label{fig:mondrian}
\end{figure}
\clearpage
\begin{figure*}
\begin{tabular}{cc}
\includegraphics[width=2.65in]{R1replacement4a.jpg} &
\includegraphics[width=2.65in]{R1replacement4b.jpg} \\
(a) Co-citation Scopus frequency versus $\theta$ & (b) Number of co-cited pairs per $\theta$ interval
\end{tabular}
\caption{Co-citation dynamics relative to $\theta$. (a) Points represent the Scopus frequency vs. $\theta$ value for each co-cited pair. Darker regions indicate denser plots of the translucent points.
Co-cited pairs with the greater frequency are observed for pairs with smaller $\theta$. (b) The $y$-axis employs a log scale and shows the number of co-cited pairs per $\theta$ interval. The number of co-cited pairs
decreases, most often, by more than an order of magnitude per interval as $\theta$ increases. The dominance of co-cited pairs with smaller $\theta$ are also reflected by regions of greater density in panel (a).}
\label{fig:Figs_4_5}
\end{figure*}
\begin{figure}[h]
\includegraphics[width=0.5\textwidth]{R1_loglog}
\caption{\textbf{Log-log plot of the number of co-citations versus co-citation count for $\theta \in \left\lbrack 0.0, 0.2 \right)$} The $y$-axis shows the number of co-cited pairs observed having the citation counts plotted along
the $x$-axis. The tightly clustered plot below the 99$th$ percentile demonstrates a clear pattern of decreasing number of co-cited pairs having an increasing number of citation counts. The scatter plot for the tail above
the 99$th$ percentile broadens, indicating a long tail of relatively few co-cited pairs that were cited with extreme frequency.}
\label{fig:perc}
\end{figure}
\clearpage
\begin{figure}[h]
\includegraphics[width=0.8\textwidth]{R1lag3.jpg}
\caption{\textbf{Relationship between time lag ($t_l$) and co-citation frequency} Extended lag times are associated with lower co-citation frequencies. Connected pairs have lower $t_l$ values. Data are shown for 207,214 pairs
consisting of $\geq$ 95th percentile of co-citation frequencies for the 4.1 million row dataset. The observations are stratified by percentile group (vertical panels) and connectedness (upper and lower halves). Co-citation
frequency (y-axis) is plotted against $t_l$, the time between first possible co-citation and first co-citation.}
\label{fig:composite}
\end{figure}
\begin{figure}[h]
\includegraphics[width=0.7\textwidth]{R1composite_samples3.pdf}
\caption{\textbf{Co-citation frequencies of highly cited publications from Scopus 1985-1995} \emph{Upper panel} Publication 1: Instability of the interface of two gases accelerated by a shock wave
(1972) doi: 10.1007/BF01015969, first cited (1993), total citations (566). Publication 2: Taylor instability in shock acceleration of compressible fluids (1960) doi: 10.1002/cpa.3160130207, first cited (1973),
total citations (566), first co-cited (1993), total co-citations (541).
\newline
\newline
\emph{Lower Panel} Publication 1: Colorimetric assay of catalase doi: 10.1016/0003-2697(72)90132-7 (1972) doi: 10.1016/0304-4165(79)90289-7, first cited (1972), total citations (2683). Publication 2: Levels
of glutathione, glutathione reductase and glutathione S-transferase activities in rat lung and liver (1979) doi: 10.1016/0304-4165(79)90289-7, first cited (1979), total citations (2464), first co-cited (1979), total co-citations (470).}
\label{fig:composite2}.
\end{figure}
\clearpage
\section*{Conclusions}
In this article, we report on our exploration of features that impact the frequency of co-citations. In particular, we wished to examine article pairs with high co-citation frequencies with respect to whether they originated from the same school(s) of thought or represented novel combinations of existing ideas. However, defining a discipline is challenging, and determining the discipline(s) relevant to specific publications remains a challenging problem.
Journal-level classifications of disciplines have known limitations and while article-level approaches offer some advantages, they are not free of their own limitations~\cite{article_stasa}.
Consequently, we designed $\theta$, a statistic that examines the citation neighborhood of a pair of articles $x$ and $y$ to estimate the probability that they would be co-cited. Our approach has advantages compared to alternate approaches: it avoids the challenges of journal-level analyses, it does not require a definition of ``discipline" (or ``disciplinary distance"), it does not require assignment of disciplines to articles, it is computationally feasible, and, most importantly, it enables an evaluation that is specific to a given pair of articles.
We note that when $x$ and $y$ are from the same sub-field, then $\theta$ may be very large, and conversely, when $x$ and $y$ are from very different fields, it might be reasonable to expect that $\theta$ will be small. Thus, in a
sense, $\theta$ may correlate with disciplinary similarity, with large values for $\theta$ reflecting conditions where the two publications are in the same (or very close) sub-disciplines, and small values for $\theta$ reflecting that the
disciplines for the two publications are very distantly related. We also comment that in this initial study, we have not considered second-degree information, that is publications that cite publications that cite an article of interest.
Our data indicate that the most frequent co-citations occur when co-citations have small values of $\theta$, as shown in Figure \ref{fig:Figs_4_5}. Our study considered the hypothesis that the
frequency distribution is independent of $\theta$, but our statistical tests rejected this hypothesis, and
showed instead that the frequency distribution is best characterized by a power law for small values of $\theta$ and connected publications, and in many other regions is best characterized by a lognormal distribution.
The observation that power laws are consistent with small values of $\theta$ and connected co-citations is consistent with the theory of preferential attachment for these parameter settings. To the extent that
preferential attachment is the mechanism giving rise to a power law, this suggests that preferential attachment is, at least, stronger for small $\theta$ values and connected co-citations than for other parameter combinations,
or that preferential attachment is not applicable to other parameter values.
Observing power laws, heavy tails, and pairs with extreme co-citation strength for small values of $\theta$ (i.e., pairs that have small {\em a priori} probabilities of being co-cited) may seem, on its face, paradoxical.
One possible explanation of the pairs in the extreme right tail with both small $\theta$ and large co-citation strength is that those pairs represent novel combinations of ideas that, when recognized within the research community, catalyze an
increased citation rate, consistent with preferential attachment coupled to time-dependent initial attractiveness~\cite{eom_2011} as an underlying generative mechanism. However, small values of $\theta$ do not guarantee
a high co-citation count: indeed, even for small values of $\theta$, co-citations with a power law predominantly have relatively low co-citation strength.
We also note the increasing proportion of connected pairs as the percentile for co-citation frequency
increases (Fig. \ref{fig:basedata}); this pair of parameters appears to be associated with a fertile environment where extremely high co-citation frequencies are possible.
This observation raises the question of whether small values of $\theta$ and connected co-citations are associated with preferential attachment and, if a causal relationship exists, then
how do $\theta$ and co-citation connection provide an environment supporting preferential attachment? A possibility is that one article in a co-cited pair citing the other makes the
potential significance of the combination of their ideas apparent to researchers. The clear pattern of the highest frequency co-cited pairs typically having low $\theta$ values suggests that these pairs
are highly cited and hence impactful because of the novelty in the ideas or fields that are combined
(as reflected in low $\theta$). However, other factors should be considered, such as the prominence of authors and prestige of a journal~\cite{garfield_1980} where the first co-citation appears.
We did not apply field-normalization techniques when assembling the parent pool of 768,993 highly cited articles consisting of the top 1\% of highly cited articles from each year in the Scopus bibliography.
Thus, the highly co-cited pairs we observe are biased towards high-referencing areas such as biomedicine and parts of the physical sciences~\cite{small_citation_1980}.
However, the dataset we analyzed has a lower bound of 10 on co-citation frequencies and includes pairs from fields other than those that are high referencing.
For example, the maximum $t_l$ we observed in the dataset of 4.1 million pairs was 149 years,
and is associated to a pair of articles independently published in 1840, establishing their eponymous
Staudt-Clausen theorem \cite{clausen_1840,vonstaudt_1840}; this pair of articles was apparently co-cited 10 times since their publication.
A second pair of articles concerning electron theory of metals~\cite{drude_1900_1,drude_1900_2} was first co-cited in 1994 for a total of 109 times, with $t_l$ observed of 94 years. Both cases are drawn from mathematics and physics rather than the medical literature. They are also consistent with the suggestion that the probability of awakening is smaller after a period of deep sleep~\cite{vanraan_2019}.
As we have defined $t_l$, with its heavy penalty for early citation, we create additional sensitivity to coverage and data quality especially for pairs with low citation numbers. Indeed, for the Staudt-Clausen pair, a manual search of other sources revealed an article~\cite{carlitz_cvs_1961} in which they are co-cited. Both these articles were originally published in German and it is possible that additional co-citations were not captured. Thus, big data approaches that serve to identify trends should be accompanied by more meticulous case studies, where possible. Other approaches for examining depth of sleep and awakening time should certainly be considered~\cite{vanraansleeping2004,ke_defining_2015}.
Lastly, using our approach to revisit invisible colleges~\cite{price_beaver_1966,crane_1972,small_clustering_1985} seems warranted, since it seems likely that the upper bound of a hundred members predicted
by \cite{price_beaver_1966}
is likely to have increased in a global scientific enterprise with electronic publishing and social media.
Finally, we view these results as a first step towards further investigation of co-citation behavior, and we introduce a new technique based on exploring first-degree neighbors of co-cited publications; we are hopeful that this graph-theoretic study will stimulate new approaches that will provide additional insights, and prove complementary to other article level approaches.
\section*{Acknowledgments} In addition to support through federal funding, the ERNIE project features a collaboration with Elsevier. We thank our colleagues from Elsevier for their support of the collaboration.
\section*{Competing Interests}
The authors have no competing interests. Scopus data used in this study was available to us through a collaborative agreement with Elsevier on the ERNIE project. Elsevier personnel played no role in conceptualization, experimental design, review of results, or conclusions presented. The content of this publication is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health or Elsevier. Sitaram Devarakonda's present affiliation is Randstad USA. His contributions to this article were made while he was a full-time employee of NET ESolutions Corporation.
\section*{Author Contributions}
Conceptualization, GC, JB, SD, and TW; Methodology, AD, DK, GC, JB, SD, SL, and TW; Investigation, DL-H, GC, JB, and SD; Writing -Original Draft, GC, JB, TW; Writing- Review and Editing, AD, DK, DL-H, GC, JB, SD, SL, and TW; Funding Acquisition, GC; Resources, DK and GC; Supervision, GC. Authors are listed in alphabetic order.
\bibliographystyle{acm}
|
1,314,259,995,033 | arxiv | \section{Introduction}
Skyrmions are topological solitons \cite{Manton:2004} of the texture
type, i.e.~they are maps from one-point compactified 3-space,
$X=\mathbb{R}^3\cup\{\infty\}\simeq S^3$ to a target space $N=S^3$
with a nonvanishing topological degree
$\pi_3(S^3)=\mathbb{Z}\ni B\neq 0$.
Usually the map is constructed using an SU(2) matrix $U$, where the
nonlinear sigma model constraint is $\det U=1$, which forces the 4
components to live on a 3-sphere of unit radius.
It is also possible to write the SU(2) matrix as an O(4) vector of
unit length.
In this paper, however, it proves convenient to write the SU(2) field
as two complex scalar fields, $\psi_{1,2}$,
living on the complexified 1-sphere ($|\psi_1|^2+|\psi_2|^2=1$).
The convenience is two-fold.
First of all, we would like to associate the zero lines of each
complex scalar field with (deformed) vortex rings.
Secondly, it proves convenient for our calculations as we will be
using the Hopf map, which is naturally written in terms of two complex
scalar fields.
First we prove a theorem which shows that under the Hopf map, the
map $\boldsymbol{\psi}:\mathbb{R}^3\cup\{\infty\}\simeq S^3\to S^3$ of degree $B$
will necessarily have Hopf charge $Q=B$.
This statement is known in the
literature \cite{Ward:2001vi,Manton:2004} and has been used several
times to generate initial conditions for
Hopfions \cite{Battye:1998pe,Battye:1998zn}
in a different model, called the Faddeev-Skyrme
model \cite{Faddeev:1996zj}, which maps $\mathbb{R}^3$ to $S^2$ and
thus naturally possesses a Hopf charge.
Nevertheless, we have not found the theorem written down in the
literature, and thus we shall give it here and supply a
proof.\footnote{Ref.~\cite{Meissner:1985nb} restricts ${\rm tr}\, U=0$, and
thus maps $\mathbb{R}^3\to S^2$ and not $\mathbb{R}^3\to S^3$;
therefore we do not consider the calculation of the Hopf charge there
as a general proof.
Similarly, ref.~\cite{Ward:2004gr} finds an interpolation between the
Skyrme model and the Faddeev-Skyrme model and states that the baryon
charge equals the Hopf charge when the model is restricted to the
Faddeev-Skyrme model, i.e.~$\mathbb{R}^3\to S^2$.
We do not make such restriction in this paper. }
The implication of the theorem is that 2 distinct regular points under
the projection of a Skyrme map to the 2-sphere have preimages in
3-space with linking number $Q=B$.
We further make the interpretation of two antipodal points on the
2-sphere being vortex zeros. So far all is done with rigor.
Finally, we conjecture that we can interpret the topological degree of
a Skyrmion map as the product of winding numbers of two vortex lines,
summing over clusters of wound vortices.
This paper is organized as follows.
In sec.~\ref{sec:maps}, after giving the maps, we present our theorem,
corollary and conjecture.
In sec.~\ref{sec:examples}, we illustrate the theorem and conjecture
with examples of a toroidal vortex and rational map Skyrmions.
Sec.~\ref{sec:discussion} is devoted to discussion and outlook.
\section{The maps}\label{sec:maps}
\subsection{Theorem and conjecture}
We begin with considering a map from $U: X\to N$ where
$X=\mathbb{R}^3\cup\,\{\infty\}\simeq S^3$ the one-point compactified
3-dimensional configuration space and $N=S^3$ is the target space,
which we take to be the 3-sphere in this paper.
Each space has an associated metric, that is $(X,g)$ and $(N,h)$.
The map $U$ is characterized by the third homotopy group,
$B\in\pi_3(S^3)=\mathbb{Z}$ with $B$ the topological degree, which is
usually called the baryon number.
Next, we will consider the Hopf map $H: S^3\to S^2$, which is due to
the Hopf fibration $S^1\hookrightarrow S^3\overset{H}{\to}S^2$.
The explicit form of the Hopf map is
\begin{eqnarray}
H^a(\boldsymbol{\psi},\ol{\boldsymbol{\psi}}) = \boldsymbol{\psi}^\dag \tau^a \boldsymbol{\psi}, \qquad
a=1,2,3, \label{eq:Hopfmap}
\end{eqnarray}
with $\boldsymbol{\psi}$ living on the complexified 1-sphere:
\begin{align}
\boldsymbol{\psi} &=
\begin{pmatrix}
\psi_1\\
\psi_2\\
\end{pmatrix}, \qquad
\psi_{1,2}\in\mathbb{C}, \label{eq:bpsi_def}\\
&\boldsymbol{\psi}^\dag\boldsymbol{\psi} = |\psi_1|^2 + |\psi_2|^2 = 1, \label{eq:S3constraint}
\end{align}
which is exactly a real 3-sphere
and $\tau^a$ are the Pauli SU(2) matrices.
The topological charge of the Hopf map is
\begin{eqnarray}
Q \in \pi_3(S^2),
\end{eqnarray}
but it is not the degree of the mapping as it is a mapping between
spaces of different dimensions.
The map $U:X\to N=S^3$ is given by
\begin{eqnarray}
U(\mathbf{x}) =
\begin{pmatrix}
\boldsymbol{\psi} & -\i\tau^2\ol{\boldsymbol{\psi}}
\end{pmatrix}
=
\begin{pmatrix}
\psi_1 & -\ol{\psi}_2\\
\psi_2 & \ol{\psi}_1
\end{pmatrix}, \label{eq:Udef}
\end{eqnarray}
which thus maps $\mathbb{R}^3\cup\{\infty\}=X\to S^3$, due to the
constraint \eqref{eq:S3constraint}.
The degree of the mapping $U$ from $X$ to $N$ can be calculated as the
pullback of the normalized volume form on $N$, $\Omega_N$
by $U$:
\begin{align}
B &= \int_X U^*\Omega_N \nonumber\\
&= -\frac{1}{24\pi^2}\int_X
{\rm tr}\,\left(U^\dag\partial_i U U^\dag\partial_j U U^\dag\partial_k U\right)
\d{x}^i\wedge\d{x}^j\wedge\d{x}^k\nonumber\\
&= \frac{1}{4\pi^2}\int_X
(\boldsymbol{\psi}^\dag\partial_i\boldsymbol{\psi})(\partial_j\boldsymbol{\psi}^\dag\partial_k\boldsymbol{\psi})\;
\d{x}^i\wedge\d{x}^j\wedge\d{x}^k. \label{eq:B}
\end{align}
Finally, we are interested in the map
$\boldsymbol{\phi}\equiv H\circ\, U: X\to S^2$, which is the composite map of $U$
and $H$.
This takes a field configuration on $X$, maps it with degree $B$ to
$N$ and then to $S^2$.
The Hopf charge (or Hopf index) of the above described map, $\boldsymbol{\phi}$,
is given by \cite{Gladikowski:1996mb,Faddeev:1996zj},
\begin{eqnarray}
Q = -\frac{1}{4\pi^2}\int_X A\wedge F,
\label{eq:Qcharge}
\end{eqnarray}
where the field-strength tensor in terms of the coordinates on $S^2$
is \cite{Gladikowski:1996mb,Faddeev:1996zj},
\begin{eqnarray}
F = \frac14\boldsymbol{\phi}\cdot\partial_i\boldsymbol{\phi}\times\partial_j\boldsymbol{\phi}\;
\d{x}^i\wedge\d{x}^j, \label{eq:FijS2}
\end{eqnarray}
and $A$ is a corresponding gauge field $F=\d{A}$.
However, it is not possible to write a \emph{local} expression for the
Chern-Simon action \eqref{eq:Qcharge} in terms of the coordinates,
$\boldsymbol{\phi}$, on $S^2$, because it vanishes identically, as well known.
\begin{theorem}\label{thm:1}
A map $U:\mathbb{R}^3\cup\{\infty\}\to S^3$ with topological degree
$B$ under the Hopf map $H:S^3\to S^2$ has Hopf charge $Q=B$ and thus
distinct regular points on $S^2$ under the composite map
$H\circ U:\mathbb{R}^3\cup\{\infty\}\to S^2$ have preimages on
$\mathbb{R}^3\cup\{\infty\}$ that are linked $Q=B$ times.
\end{theorem}
\noindent\emph{Proof}:
We calculate the field-strength tensor \eqref{eq:FijS2} in terms of
the coordinates on $S^3$ via the Hopf map \eqref{eq:Hopfmap} as
\begin{align}
F &= \frac{1}{4}\epsilon^{a b c}
(\boldsymbol{\psi}^\dag\tau^a\boldsymbol{\psi})\partial_i(\boldsymbol{\psi}^\dag\tau^b\boldsymbol{\psi})\partial_j(\boldsymbol{\psi}^\dag\tau^c\boldsymbol{\psi})\;
\d{x}^i\wedge\d{x}^j\nonumber\\
&= \i\left(
\psi_2\ol{\psi}_2\partial_{i}\psi_1\partial_{j}\ol{\psi}_1
-\psi_2\ol{\psi}_1\partial_{i}\psi_1\partial_{j}\ol{\psi}_2
+\psi_1\ol{\psi}_1\partial_{i}\psi_2\partial_{j}\ol{\psi}_2
-\psi_1\ol{\psi}_2\partial_{i}\psi_2\partial_{j}\ol{\psi}_1
\right) \d{x}^i\wedge\d{x}^j \nonumber\\
&= -\i\partial_{i}\boldsymbol{\psi}^\dag\partial_{j}\boldsymbol{\psi}\;
\d{x}^i\wedge\d{x}^j, \label{eq:FijS3}
\end{align}
where we have used the constraint \eqref{eq:S3constraint}.
The above-calculated field-strength tensor can also readily be
obtained from the following gauge field
\begin{eqnarray}
A = -\frac{\i}{2}\left(\boldsymbol{\psi}^\dag\partial_i\boldsymbol{\psi}
- \partial_i\boldsymbol{\psi}^\dag\boldsymbol{\psi}\right) \d{x}^i. \label{eq:AiS3}
\end{eqnarray}
We can now explicitly evaluate the Hopf charge \eqref{eq:Qcharge} with
the field-strength tensor \eqref{eq:FijS3} and the corresponding gauge
field \eqref{eq:AiS3} and a simple calculation shows that it reduces
to
\begin{eqnarray}
Q = \frac{1}{4\pi^2}\int_X
(\boldsymbol{\psi}^\dag\partial_i\boldsymbol{\psi})(\partial_j\boldsymbol{\psi}^\dag\partial_k\boldsymbol{\psi})\;
\d{x}^i\wedge\d{x}^j\wedge\d{x}^k, \label{eq:Q}
\end{eqnarray}
which is exactly the same as the baryon charge \eqref{eq:B}.
Since the baryon number $B$ \eqref{eq:B} and the Hopf charge
$Q$ \eqref{eq:Q} are given by the same integral expressions, then
$B=Q$ follows.
The final step is to use the fact that preimages of two distinct
regular points on $S^2$ are linked $Q=B$ times under the Hopf
map \eqref{eq:Hopfmap} and hence theorem \ref{thm:1}
follows. \hfill $\square$
\bigskip
Now, if we pick any two regular (constant) points on $S^2$ as
\begin{eqnarray}
\boldsymbol{\phi}_1\in S^2, \qquad
\boldsymbol{\phi}_2\in S^2,
\end{eqnarray}
their preimages under the Hopf map composed with $U$, i.e.~$\boldsymbol{\phi}=H\circ U$, have
linking number $Q=B$.
Since this holds for any two regular points, it also holds for the
following case: Take the two points on $S^2$ to be
\begin{eqnarray}
\boldsymbol{\phi}_1 = H(\boldsymbol{\psi}_1,\ol{\boldsymbol{\psi}}_1)
= (0,0,-1)^{\rm T}, \qquad
\boldsymbol{\phi}_2 = H(\boldsymbol{\psi}_2,\ol{\boldsymbol{\psi}}_2)
= (0,0,1)^{\rm T}, \label{eq:s12}
\end{eqnarray}
with
\begin{eqnarray}
\boldsymbol{\psi}_1 =
\begin{pmatrix}
0\\
1
\end{pmatrix}, \qquad
\boldsymbol{\psi}_2 =
\begin{pmatrix}
1\\
0
\end{pmatrix}.
\label{eq:bpsi_points}
\end{eqnarray}
\emph{Any} two regular points will have linking number $Q=B$; however,
in order to interpret the preimages of the two points on $S^2$ as two
vortex lines, we further need to require orthogonality
\begin{eqnarray}
\boldsymbol{\psi}_1^\dag\boldsymbol{\psi}_2 = 0,
\end{eqnarray}
which obviously holds for the two points in
eq.~\eqref{eq:bpsi_points}.
Clearly it is possible that either both the points \eqref{eq:s12} or
one of them are not regular points.
Since the canonical mapping may not correspond to regular points under
the Hopf map, we propose to rotate the 2-sphere until two regular
points are found: $\boldsymbol{\phi}^M=M\boldsymbol{\phi}$ : $X\to S^2$ as
\begin{eqnarray}
\boldsymbol{\phi}^M = M H(\boldsymbol{\psi},\bar{\boldsymbol{\psi}}).
\end{eqnarray}
The most general rotation of the 2-sphere can be done with three Euler
angles and the following parametrization
\begin{align}
M_{\alpha\beta\gamma} &= M_z(\gamma)M_x(\beta)M_z(\alpha), \label{eq:Mabg}\\
M_z(\alpha) &=
\begin{pmatrix}
\cos\alpha & \sin\alpha & 0\\
-\sin\alpha & \cos\alpha & 0\\
0 & 0 & 1
\end{pmatrix},\qquad
M_x(\beta) =
\begin{pmatrix}
1 & 0 & 0\\
0 & \cos\beta & \sin\beta\\
0 & -\sin\beta & \cos\beta
\end{pmatrix}.
\end{align}
A particularly useful rotation brings the north and south poles to the
equator of the 2-sphere:
\begin{eqnarray}
M_{0\frac{\pi}{2}\gamma} =
\begin{pmatrix}
\cos\gamma & 0 & \sin\gamma\\
-\sin\gamma & 0 & \cos\gamma\\
0 & -1 & 0
\end{pmatrix},
\end{eqnarray}
which yields a 1-parameter family of rotations of the north and south
poles to the equator with angle $\gamma\in[0,2\pi)$:
\begin{eqnarray}
\boldsymbol{\phi}_{1,2}^{M_{0\frac{\pi}{2}\gamma}} =
M_{0\frac{\pi}{2}\gamma}\boldsymbol{\phi}_{1,2} = \mp
\begin{pmatrix}
\sin\gamma\\
\cos\gamma\\
0
\end{pmatrix},
\label{eq:rotated_bphi_gamma_family}
\end{eqnarray}
where the upper sign corresponds to $\boldsymbol{\phi}_1$ and the lower sign
$\boldsymbol{\phi}_2$.
Another useful rotation is
\begin{eqnarray}
M_{0\beta0} = M_x(\beta),
\end{eqnarray}
which yields a slightly different 1-parameter family of rotations
\begin{eqnarray}
\boldsymbol{\phi}_{1,2}^{M_{0\beta0}} = M_{0\beta0}\boldsymbol{\phi}_{1,2} = \mp
\begin{pmatrix}
0\\
\sin\beta\\
\cos\beta
\end{pmatrix},
\label{eq:rotated_bphi_beta_family}
\end{eqnarray}
where again the upper sign corresponds to $\boldsymbol{\phi}_1$ and the lower sign
$\boldsymbol{\phi}_2$.
If we now take the parametrization of $\boldsymbol{\psi}$
\begin{eqnarray}
\boldsymbol{\psi} =
\begin{pmatrix}
e^{\i\chi}\cos f\\
e^{\i\vartheta}\sin f
\end{pmatrix}, \label{eq:vortex_parm}
\end{eqnarray}
we may interpret the two points, $\boldsymbol{\psi}_1$ and $\boldsymbol{\psi}_2$, of
eq.~\eqref{eq:bpsi_points} as the vortex zeros of the fields $\psi_1$
and $\psi_2$, respectively, see eq.~\eqref{eq:bpsi_def}.
The composite map $\boldsymbol{\phi}:X\to S^2$ of eq.~\eqref{eq:vortex_parm} thus
reads
\begin{eqnarray}
\boldsymbol{\phi} =
\begin{pmatrix}
\sin 2f \cos(\vartheta - \chi)\\
\sin 2f \sin(\vartheta - \chi)\\
\cos 2f
\end{pmatrix},
\end{eqnarray}
from which it is clear that the two points $\boldsymbol{\phi}_{1,2}\in S^2$ of
eq.~\eqref{eq:s12} indeed are independent of $\vartheta$ and $\chi$ as
they correspond to $f=\frac\pi2$ and $f=0$, respectively.
These two vortex zeroes are canonically mapped to the south and north
poles, respectively.
Using now the rotated map $\boldsymbol{\phi}^{M_{0\frac\pi2\gamma}}$ of
eq.~\eqref{eq:rotated_bphi_gamma_family}, the
vortex \eqref{eq:vortex_parm} is mapped to
\begin{eqnarray}
\boldsymbol{\phi}^{M_{0\frac\pi2\gamma}} =
\begin{pmatrix}
\sin\gamma\cos2f + \cos\gamma\sin2f\cos(\vartheta-\chi)\\
\cos\gamma\cos2f - \sin\gamma\sin2f\cos(\vartheta-\chi)\\
-\sin2f\sin(\vartheta-\chi)
\end{pmatrix},
\end{eqnarray}
which at $f=\frac\pi2,0$ equals
eq.~\eqref{eq:rotated_bphi_gamma_family} by construction.
\begin{corollary}\label{crl:1}
Two vortex lines (zeros), $\boldsymbol{\psi}_1$ and $\boldsymbol{\psi}_2$ of
$\boldsymbol{\psi}\in X=\mathbb{R}^3\cup\{\infty\}$
are mapped to two distinct points on $S^2$ under the Hopf map
$H\circ U:X\to S^2$ and hence their preimages in $X$ are linked $Q=B$
times due to theorem \ref{thm:1}.
\end{corollary}
We may take a map $U:X\to N$ with topological degree $B$, project it
onto $S^2$ with $H$ and select two regular points under the latter
mapping
\begin{eqnarray}
\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\gamma}} = H\circ U,
\end{eqnarray}
where we have performed a rotation using
eq.~\eqref{eq:rotated_bphi_gamma_family} and chosen an appropriate
value for $\gamma$ such that the mapping is regular.
Now due to the Corollary \ref{crl:1}, we can follow the way back to
$X$ with the inverse mappings and interpret the two points as vortex
lines
\begin{eqnarray}
\mathbf{x}_{1,2}(\tau,\ell) = (H\circ U)^{-1}\left(\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\gamma}}\right),
\end{eqnarray}
which yields two vortex lines with some parametrization $\tau$ and we
have included an index $\ell$ in case the preimages separate into several
clusters.
We are now ready to make the following conjecture.
\begin{conjecture}\label{cjt:1}
A map $U:\mathbb{R}^3\cup\{\infty\}=X\to S^3$ having degree
$B$ \eqref{eq:B} can be interpreted as two vortices in $\psi_1$ and
$\psi_2$ of $\boldsymbol{\psi}\in S^3$ which in each cluster topologically have
winding numbers $p_\ell$ and $q_\ell$, respectively. Then due to
Corollary \ref{crl:1}, the linking number $Q$ \eqref{eq:Q} is
$\sum_{\ell}p_{\ell}q_{\ell}$ and due to theorem \ref{thm:1},
$B=Q=\sum_{\ell}p_{\ell}q_{\ell}$.
\end{conjecture}
\subsection{The rational map}
We will now consider $U$ to be in a class of maps, where it is a
radial suspension in $\mathbb{R}^3$ and the tangent directions are
described by rational maps between Riemann spheres.
The rational map Ansatz is given by
\begin{eqnarray}
U = \exp\left(\i f(r) \mathbf{n}\cdot\mathbf{\tau}\right),
\label{eq:UR}
\end{eqnarray}
with
\begin{eqnarray}
\mathbf{n} =
\left(
\frac{R+\ol{R}}{1+|R|^2},
\frac{\i(\ol{R}-R)}{1+|R|^2},
\frac{1-|R|^2}{1+|R|^2}
\right),
\end{eqnarray}
where $R=R(z)$ is a holomorphic function of the Riemann sphere
coordinate $z=e^{\i\phi}\tan\tfrac{\theta}{2}$ and $(r,\theta,\phi)$ are
standard spherical coordinates in $\mathbb{R}^3$.
Using eq.~\eqref{eq:Udef}, we get
\begin{eqnarray}
\boldsymbol{\psi} = \frac{1}{1+|R|^2}
\begin{pmatrix}
e^{\i f} + |R|^2 e^{-\i f}\\
\i 2R\sin f
\end{pmatrix},
\end{eqnarray}
which we map to the 2-sphere using $H$ of eq.~\eqref{eq:Hopfmap},
yielding
\begin{eqnarray}
\boldsymbol{\phi} = \frac{1}{(1+|R|^2)^2}
\begin{pmatrix}
-2\Im(R)(1+|R|^2)\sin 2f + 4\Re(R)(1-|R|^2)\sin^2f\\
2\Re(R)(1+|R|^2)\sin 2f + 4\Im(R)(1-|R|^2)\sin^2f\\
4|R|^2\cos 2f + (1-|R|^2)^2
\end{pmatrix}. \label{eq:HopfR}
\end{eqnarray}
It is easy to check that the above $\boldsymbol{\phi}$ is a real-valued 3-vector
of unit norm, thus living on $S^2$.
One can also readily verify that $f=\frac\pi2,0$ correspond to
$\boldsymbol{\phi}_{1,2}$, respectively.
\section{Examples}\label{sec:examples}
\subsection{Toroidal vortex}
Let us consider a simple example inspired by
ref.~\cite{Gudnason:2016yix,Gudnason:2014gla,Gudnason:2014hsa,Gudnason:2014jga,Gudnason:2018oyx}
where a vortex ring is twisted $P$ times, yielding baryon number $P$:
\begin{eqnarray}
\boldsymbol{\psi} =
\begin{pmatrix}
\cos f(r) + \i\sin f(r)\cos\theta\\
\sin f(r)\sin\theta e^{\i P\phi}
\end{pmatrix}.
\end{eqnarray}
\begin{figure}[!htp]
\begin{center}
\includegraphics[width=0.5\linewidth]{T4B31n}
\caption{Vortex ring with $P=3$ twists. Figure taken from
ref.~\cite{Gudnason:2014jga}.}
\label{fig:tvtx}
\end{center}
\end{figure}
The energy functional that gives rise to toroidal vortices is given by
\cite{Gudnason:2016yix}
\begin{eqnarray}
E[\boldsymbol{\psi}] = \|\d\boldsymbol{\psi}\|_{L^2(X)}^2
+ \frac14\|\boldsymbol{\psi}^*\d{\mu}\|_{L^2(X)}^2
+ \int_X*V(\boldsymbol{\psi}), \qquad
V(\boldsymbol{\psi}) = \frac{m^2}{2}(1 - |\psi_1|^2), \label{eq:toroidal}
\end{eqnarray}
where $\mu$ is the Maurer-Cartan form on $N$ (SU(2)) and the second term
is the norm-squared of the pullback of the exterior derivative of the
Maurer-Cartan form on $N$ by $\boldsymbol{\psi}$.
$m$ is a positive constant which must be large enough $m>m_{\rm
crit}$.
Finally $*$ denotes the Hodge dual such that $*1$ gives the volume
form (and in this case on $X$).
The baryon charge density isosurface is shown in fig.~\ref{fig:tvtx},
which is taken from ref.~\cite{Gudnason:2014jga} where further details
can be found.
It is easy to check that the topological degree \eqref{eq:B} is given
by
\begin{align}
B &= -\frac{P}{\pi}\int_0^\pi \sin\theta\,\d{\theta}
\int_0^\infty \sin^2f(r) \partial_rf(r)\,\d{r} \nonumber\\
&= -\frac{P}{2\pi}\left[2f(r) - \sin 2f(r)\right]_{f(0)}^{f(\infty)} \nonumber\\
&= P,
\end{align}
where we have used the boundary conditions $f(0)=\pi$ and
$f(\infty)=0$.
Under the map \eqref{eq:Hopfmap} we have
\begin{eqnarray}
\boldsymbol{\phi} =
\begin{pmatrix}
\sin\theta\cos P\phi\sin 2f(r) + \sin 2\theta\sin P\phi\sin^2f(r)\\
\sin\theta\sin P\phi\sin 2f(r) - \sin 2\theta\cos P\phi\sin^2f(r)\\
\sin^2\theta\cos 2f(r) + \cos^2\theta
\end{pmatrix}.
\end{eqnarray}
An obvious choice would be to pick the two points $\boldsymbol{\phi}_{1,2}$ of
eq.~\eqref{eq:s12} on the 2-sphere, yielding
\begin{align}
\cos^2f = \cos^2\theta &= 0, \qquad (\boldsymbol{\phi}=\boldsymbol{\phi}_1,
\; \psi_1=0)\\
\sin^2 f\sin^2\theta &= 0, \qquad (\boldsymbol{\phi}=\boldsymbol{\phi}_2,\; \psi_2=0).
\end{align}
Let us start with the latter equation; $\sin f=0$ corresponds to the vacuum
at $r\to\infty$ and the origin where $f=\pi$. Hence in the interior of
$\mathbb{R}^3\backslash\{0\}$, $\sin f\neq 0$ and thus $\sin\theta=0$
corresponds to the $x^3$ axis.
We call it a ``vacuum vortex,'' specified by $\psi_2=0$.
On $X\simeq S^3$ this is topologically a circle ($S^1$) going from the
north pole of the 3-sphere (the vacuum) from $x^3=-\infty$ to the
origin $\mathbf{x}=\mathbf{0}$ which is the south pole of the 3-sphere, and
then towards $x^3\to+\infty$ back to north pole.
The former equation has two conditions yielding $\theta=\frac\pi2$ and
$f(r)=\frac\pi2$, which is a circle in the $(x^1,x^2)$-plane,
representing a ring-shaped ``physical'' vortex specified by
$\psi_1=0$.
This obviously yields a linking number equal to 1.
Although this is a natural interpretation of where the two vortices
might be in this field configuration, $\boldsymbol{\phi}_2$ is not a regular point
of the mapping when $P>1$.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\beta=0$]{\includegraphics[height=0.4\linewidth]{tvtxP3_1_crop}}\ \
\subfloat[$\beta=\frac\pi6$]{\includegraphics[height=0.3\linewidth]{tvtxP3_2_crop}}\ \
\subfloat[$\beta=\frac\pi2$]{\includegraphics[height=0.28\linewidth]{tvtxP3_3_crop}}}
\caption{The toroidal vortex (black) with $P=3$ twists and its vacuum
vortex (red) at rotation angles $\beta=0,\frac\pi6,\frac\pi2$.
The angle $\beta=0$ corresponds to no rotation and shows the
degeneracy of the vacuum vortex which is due to the north pole on the
2-sphere not being a regular point under the Hopf map. }
\label{fig:tvtx_beta}
\end{center}
\end{figure}
In order to move away from the point where the $P$ vortices linking
the ``vacuum vortex'' are degenerate, we pick two points on the
2-sphere after a rotation by an angle $\beta$:
\begin{eqnarray}
\boldsymbol{\phi}_{1,2}^{M_{0\beta0}},
\end{eqnarray}
with $M_{\alpha\beta\gamma}$ given by eq.~\eqref{eq:Mabg} and
$\boldsymbol{\phi}_{1,2}$ given by eq.~\eqref{eq:s12}.
The expression is not particularly illuminating, so we will just plot
the preimages of the two points on the rotated 2-sphere in
fig.~\ref{fig:tvtx_beta}.
Plotting the preimage of one of the points on the 2-sphere amounts to
finding the solutions to the inverse map
\begin{eqnarray}
\mathbf{x} = \boldsymbol{\phi}^{-1}(\boldsymbol{\phi}_a),
\end{eqnarray}
with $\boldsymbol{\phi}_a$ a chosen point on $S^2\ni\boldsymbol{\phi}_a=\boldsymbol{\phi}(\mathbf{x})$.
In practice, the solution to this problem is a hairline and not easy
to see on a 3-dimensional graph, so we plot instead a surface that
corresponds to 1\% of the neighborhood around $\boldsymbol{\phi}_a$.
In particular, if we want to plot $\mathbf{x}=\boldsymbol{\phi}^{-1}((0,0,-1))$,
we instead plot the surface $\boldsymbol{\phi}^{-1}\big((a,b,-\sqrt{1-a^2-b^2})\big)$ with
$\sqrt{a^2+b^2}=0.01$.
Fig.~\ref{fig:tvtx_beta}(a) shows the unrotated degenerate case, where
the vacuum vortex with winding number 3 is coincident -- this thus
corresponds to a point on the 2-sphere which is not regular under the
Hopf map \eqref{eq:Hopfmap}.
In fig.~\ref{fig:tvtx_beta}(b) we have increased $\beta$ to
$\beta=\frac\pi6$ and we have moved away from the degeneracy of the
vacuum vortex. Now we can clearly see that the vortex ring, which is
the black circle depicted in fig.~\eqref{fig:tvtx_beta}(a), is linked
3 times with the vacuum vortex (red).
Note that both preimages are themselves not knots, but indeed
unknots.
A comment in store is about the black line, i.e.~the vortex ring
itself in fig.~\ref{fig:tvtx_beta}. In fig.~\ref{fig:tvtx_beta}(a) the
preimage shows the center of the vortex and what one normally would
associate with the position of the vortex; unfortunately the antipodal
point on the 2-sphere under the Hopf map is not regular, as mentioned
above.
Once we rotate the two points, $\boldsymbol{\phi}_{1,2}$, keeping them antipodal
on the 2-sphere, we also move the vortex point itself and the preimage
runs $P$ times around the vortex center line on fixed level sets of
the vortex field.
At the $\beta=\frac\pi2$ rotation, we have rotated all the way to the
equator, which corresponds to $\gamma=0$ of
eq.~\eqref{eq:rotated_bphi_gamma_family}.
Here the vortex line and the vacuum vortex becomes identical, except
that one is rotated by $\pi/P=\pi/3$ with respect to the other.
This example thus confirms conjecture \ref{cjt:1} with the vortex ring
having $q=1$ and the vacuum vortex having $p=3$, yielding $B=Q=pq=3$.
Before moving on to the next example, let us make one more comment.
The energy \eqref{eq:toroidal} is an example where the potential term
is asymmetric in $\psi_1$ and $\psi_2$, so the physical vortex zeros
correspond to $\psi_1=0$ and the vacuum vortex zeros to $\psi_2=0$.
Instead, we could consider the potential term which is symmetric
in $\psi_1$ and $\psi_2$
\cite{Gudnason:2014gla,Gudnason:2014hsa,Gudnason:2014jga,Gudnason:2018oyx}
\begin{eqnarray}
V(\boldsymbol{\psi}) = \pm\frac{m^2}{8}\big[1 - (\boldsymbol{\psi}^\dag \sigma_3 \boldsymbol{\psi})^2\big]
= \pm \frac{m^2}{2} |\psi_1|^2 |\psi_2|^2. \label{eq:toroidal2}
\end{eqnarray}
For the positive sign, there are two vacua: $\boldsymbol{\psi}=(e^{\i\alpha},0)$
and $(0,e^{\i\alpha})$
\cite{Gudnason:2014gla,Gudnason:2014hsa,Gudnason:2014jga,Gudnason:2018oyx},
while for the negative sign the vacuum is: $S^1\times S^1$,
$|\psi_1|^2=|\psi_2|^2=1/2$.
In the former case, the situation is similar to that of the
potential \eqref{eq:toroidal} admitting physical vortex zeros and
vacuum vortex zeros, while in the latter case both zeros can be
physical vortices.
This potential is motivated by two-component Bose-Einstein condensates
(BEC) \cite{Kasamatsu}, and we called the model the BEC-Skyrme model,
see Appendix A of ref.~\cite{Gudnason:2014hsa} for a more precise
correspondence.
In fact, a Skyrmion in two-component BECs was constructed as
a link of two kinds of vortices
\cite{Ruostekoski:2001fc,Battye:2001ec,Nitta:2012hy}.
\subsection{Rational map Skyrmions}
We will now illustrate theorem \ref{thm:1} and conjecture \ref{cjt:1}
using the rational map approximation to Skyrmion
solutions \cite{Houghton:1997kg}.
The energy functional is now simply given by
\begin{eqnarray}
E[\boldsymbol{\psi}] = \|\d\boldsymbol{\psi}\|_{L^2(X)}^2
+ \frac14\|\boldsymbol{\psi}^*\d{\mu}\|_{L^2(X)}^2,
\label{eq:massless_Skyrme}
\end{eqnarray}
see the previous subsection for an explanation.
Inserting the rational map Ansatz \eqref{eq:UR} yields
\begin{eqnarray}
E[f,R] = \int_0^\infty\left(f^{'2} + 2B\sin^2f(f^{'2}+1)
+ \mathcal{I}[R]\frac{\sin^4f}{r^4}\right) r^2\; \d{r},
\end{eqnarray}
with
\begin{eqnarray}
\mathcal{I}[R] = \frac{1}{4\pi}\|R^*\Omega_N\|_{L^2(S^2)}^2,
\label{eq:calI}
\end{eqnarray}
where the only way the rational map enters the energy functional is
through this integral which is the norm-squared of the pullback of the
area form on $N$ by $R$, and $B$ is the degree of the rational map $R$.
We will thus utilize the map \eqref{eq:HopfR} with $R(z)$ being the
rational map of degree $B$.
Plotting the points \eqref{eq:s12} corresponds to a vortex (which is
the antivacuum of the Skyrmion) and the vacuum vortex (which contains the
vacuum).
In all cases, except the $B=1$ case, the vacuum vortex does not
correspond to a regular point under the map \eqref{eq:HopfR} and hence
the preimages degenerate, making it impossible to count the linking
number -- which indeed is in accord with theorem \ref{thm:1}.
For certain $B$, even the vortex (the antivacuum) does not correspond
to a regular point under the mapping.
Therefore, we turn to (two) antipodal points on the 2-sphere, which do
correspond to regular points under the map \eqref{eq:HopfR} by
rotating the 2-sphere using
eqs.~\eqref{eq:rotated_bphi_gamma_family}, \eqref{eq:rotated_bphi_beta_family}
and the
linking numbers thus exactly equal the baryon numbers of the
solitons.
In order to facilitate the visualization of the preimages of the
soliton solutions in terms of Skyrmion maps, it will prove helpful to
plot a fixed level set of the baryon charge density so as to get a
frame of reference for the preimages.
The baryon charge density is given by
\begin{eqnarray}
\mathcal{B} = *\boldsymbol{\psi}^*\Omega_N,
\end{eqnarray}
which is a 0-form (scalar quantity) and is calculated as the Hodge
dual on $X$ of the pullback of the normalized volume form on $N$ by
the map $\boldsymbol{\psi}$.
We are now ready to present the results of various preimages of the
points $\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\gamma}}$ and
$\boldsymbol{\phi}_{1,2}^{M_{0\beta0}}$ for rational map Skyrmions with
$B=1,2,\ldots,8$.
We will display the degenerate points $\boldsymbol{\phi}_{1,2}$ just for reference.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.11]{B1_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\frac\pi4}}$]{\includegraphics[scale=0.12]{B1_link3_crop}}}
\caption{Links for the $B=1$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta). (b) A link between the vortex
(yellow) and the vacuum vortex (blue) which is a closed loop.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB1}
\end{center}
\end{figure}
The rational map Skyrmion with topological degree 1 is given by the
spherically symmetric rational map \cite{Houghton:1997kg}
\begin{eqnarray}
R_1(z) = z.
\end{eqnarray}
Fig.~\ref{fig:RMB1} shows the preimages of $\boldsymbol{\phi}_{1,2}$ and again
after a rotation using the rotation matrix $M_{0\frac\pi2\frac\pi4}$
of eq.~\eqref{eq:rotated_bphi_gamma_family} has been applied.
In this case, and only in this case, $\boldsymbol{\phi}_{1,2}$ are regular points
under the mapping \eqref{eq:HopfR}.
The vacuum vortex (magenta) in fig.~\ref{fig:RMB1}(a) goes from
$x^3=-\infty$ to $x^3=\infty$, which are identified by the one-point
compactification and hence it is a vortex ring, linking the other
vortex ring (yellow) exactly once, as expected.
In order to see what happens to the preimages once the rotation of the
2-sphere has been applied, we show
$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\frac\pi4}}$ in fig.~\ref{fig:RMB1}(b).
The two points are still antipodal on the 2-sphere in order to lend
the interpretation as ``vortices,'' but it is clear that the vortex
(yellow) is slightly shifted and the vacuum vortex (blue) is now
closing in the bulk of $\mathbb{R}^3$.
Topologically it is the same thing of course and since both points are
regular, they both give linking number $Q=B=1$ as theorem \ref{thm:1}
states and the interpretation as vortex links according to
conjecture \ref{cjt:1} is also clear.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.08]{B2_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi60}}$]{\includegraphics[scale=0.14]{B2_link1_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi20}}$]{\includegraphics[scale=0.12]{B2_link2_crop}}}
\caption{Links for the $B=2$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta) which is degenerate. (b,c) Nondegenerate links between the
vortex and vacuum vortex, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB2}
\end{center}
\end{figure}
The rational map Skyrmion with topological degree 2 is given by the
axially symmetric rational map \cite{Houghton:1997kg}
\begin{eqnarray}
R_2(z) = z^2.
\end{eqnarray}
Fig.~\ref{fig:RMB2} shows preimages of $\boldsymbol{\phi}_{1,2}$ and again after a
rotation by $\beta=\frac\pi6$ and $\beta=\frac\pi2$.
The vacuum vortex (magenta) in fig.~\ref{fig:RMB2}(a) is degenerate
and this is because the point on the 2-sphere is not a regular point
under the mapping \eqref{eq:HopfR}, as mentioned already.
Rotating the points, keeping them mutually antipodal, the preimages of
fig.~\ref{fig:RMB2}(b) are perfectly linked twice and in
fig.~\ref{fig:RMB2}(c) the vacuum vortex becomes identical with the
vortex, albeit with a $\pi/2$ rotation with respect to the latter.
This example confirms conjecture \ref{cjt:1} with the vortex ring
having $q=1$ and the vacuum vortex having $p=2$, yielding $B=Q=pq=2$.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.085]{B3_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi60}}$]{\includegraphics[scale=0.13]{B3_link1_crop}}}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\frac\pi4}}$]{\includegraphics[scale=0.13]{B3_link3_crop}}}
\caption{Links for the $B=3$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta) which is degenerate. (b,c) Nondegenerate links between the
vortex and vacuum vortex, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB3}
\end{center}
\end{figure}
The next soliton is the rational map Skyrmion of topological degree
3. The rational map is given by \cite{Houghton:1997kg}
\begin{eqnarray}
R_3(z) = \frac{\i\sqrt{3}z^2 - 1}{z(z^2 - \i\sqrt{3})},
\end{eqnarray}
and possesses tetrahedral symmetry.
Fig.~\ref{fig:RMB3} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as two
rotations by $\beta=\frac\pi6$ and by $\gamma=\frac\pi4$.
The vacuum vortex (magenta) in fig.~\ref{fig:RMB3}(a) is still
degenerate as mentioned above.
There is now evidence for the vacuum vortex of rational map Skyrmions
to be $B$ intersecting (infinite) lines coming from and returning to
$\partial\mathbb{R}^3$.
After a suitable rotation as shown in fig.~\ref{fig:RMB3}(b,c) the
linking number of two antipodal points on the 2-sphere is now equal to
three, as promised.
This example confirms conjecture \ref{cjt:1} with the vortex ring
having $q=1$ and the vacuum vortex having $p=3$, yielding $B=Q=pq=3$.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.095]{B4_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi60}}$]{\includegraphics[scale=0.14]{B4_link1_crop}}}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac{3\pi}{2}0}}$]{\includegraphics[scale=0.108]{B4_link6_crop}}}
\caption{Links for the $B=4$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta), which are both degenerate. (b,c) Nondegenerate links between the
vortices and vacuum vortices, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB4}
\end{center}
\end{figure}
The $B=4$ Skyrmion has octahedral symmetry, which is the dual symmetry
of the cube, and the rational map with such symmetry
reads \cite{Houghton:1997kg}
\begin{eqnarray}
R_4(z) = \frac{z^4 + \i2\sqrt{3}z^2 + 1}{z^4 - \i2\sqrt{3}z^2 + 1}.
\end{eqnarray}
Fig.~\ref{fig:RMB4} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as two
rotations thereof, by $\beta=\frac\pi6$ and by
$\beta=\frac{3\pi}{2}$.
As for all $B>1$, the vacuum vortex (magenta) in fig.~\ref{fig:RMB4}
is degenerate, but this time also the vortex or antivacuum (yellow) is
degenerate with merging points of the curves at each face of the
cube.
Rotating the vortex points by $\beta=\frac\pi6$ and by
$\beta=\frac{3\pi}{2}$, see fig.~\ref{fig:RMB4}(b,c), yields regular
points on the 2-sphere under the mapping and the links are clear.
This time, however, the linking number is split into two disjoint
clusters of links and the total linking number is given by $q_1=1$,
$p_1=2$, $q_2=1$, $p_2=2$ and hence $B=Q=\sum_{\ell=1}^2p_\ell q_\ell=4$,
as promised.
This example confirms conjecture \ref{cjt:1} and this time with 2
clusters adding up to the total linking number.
An interesting note is that one can see the structure of the $B=4$
cubic Skyrmion being composed by two tori, with one of them flipped
with respect to the other.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.105]{B5_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi20}}$]{\includegraphics[scale=0.12]{B5_link2_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\frac\pi4}}$]{\includegraphics[scale=0.105]{B5_link3_crop}}}
\caption{Links for the $B=5$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta) which is degenerate. (b,c) Nondegenerate links between the
vortices and vacuum vortices, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB5}
\end{center}
\end{figure}
The $B=5$ Skyrmion has dihedral ($D_{2d}$) symmetry and the
corresponding rational map is \cite{Houghton:1997kg}
\begin{eqnarray}
R_5(z) = \frac{z\left(z^4 + b z^2 + a\right)}{a z^4 - b z^2 + 1},
\end{eqnarray}
which contains enhanced $D_4$ symmetry if $b=0$ and further
enhancement to octahedral ($O_h$) symmetry if $a=-5$, see
ref.~\cite{Houghton:1997kg}.
The choice of the parameters is now for the first $B$ not fixed by
choosing the highest symmetry, because there is a lower value of
$\mathcal{I}$ (eq.~\eqref{eq:calI})
for different values of $a,b$.
In particular, $a=3.07$ and $b=3.94$ minimizes
$\mathcal{I}$ \cite{Houghton:1997kg}.
Fig.~\ref{fig:RMB5} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as two
rotations thereof by $\beta=\frac\pi2$ and by $\gamma=\frac\pi4$.
Only the vacuum vortex (magenta) is degenerate in the canonical frame,
see fig.~\ref{fig:RMB5}(a).
The easiest linking number is found in fig.~\ref{fig:RMB5}(c), where
the vortex (yellow) is linked twice with a vacuum vortex (blue) (bottom of
the figure) and thrice with another vacuum vortex (blue) (top of the
figure).
This yields $q=1$, $p=5$, yielding $B=Q=pq=5$, as expected.
The reason for counting five windings for the vacuum vortex is that
there is (from the vortex point of view) no difference between a
doubly wound vacuum vortex and two separate singly wound vacuum
vortices linking the vortex.
Hence, from the vortex point of view, there is a winding-5 vacuum
vortex that has split into two clusters (which is irrelevant for the
counting).
Of course, we would have taken the opposite point of view, reversing
the roles of the two preimages.
This would lead to $q_1=2$, $p_1=1$, $q_2=3$, $p_2=1$ and now
$B=Q=\sum_{\ell=1}^2p_\ell q_\ell=5$.
Turning to the counting of the linking number in
fig.~\ref{fig:RMB5}(b), the situation is slightly complicated by the
fact that the two clusters are linked.
Taking the viewpoint of the red vortices, we have $q_1=1$, $p_1=2$,
$q_2=1$, $p_2=3$ and $B=Q=5$ as promised.
If we swap the roles of the two preimages, we of course get the same
answer.
This is the first nontrivial example in the class of rational map
Skyrmions and it still confirms conjecture \ref{cjt:1}.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.105]{B6_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac{3\pi}{2}0}}$]{\includegraphics[scale=0.145]{B6_link6_crop}}}
\caption{Links for the $B=6$ Skyrmion.
(a) Links between the vortex rings (yellow) and the vacuum vortex
(magenta) which is degenerate. (b) Nondegenerate links between the
vortex and vacuum vortex, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB6}
\end{center}
\end{figure}
The $B=6$ Skyrmion has $D_{4d}$ dihedral symmetry, which is generated
by the rational map \cite{Houghton:1997kg}
\begin{eqnarray}
R_6(z) = \frac{z^4 + \i a}{z^2(\i a z^4 + 1)},
\end{eqnarray}
with $a\in\mathbb{R}$.
This is the first $B$ for which symmetry does not fix the parameters
of the rational map.
Minimization of $\mathcal{I}$ (eq.~\eqref{eq:calI}) yields
$a=0.16$ \cite{Houghton:1997kg}.
Fig.~\ref{fig:RMB6} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as of
a rotation of them by $\beta=\frac{3\pi}{2}$.
The vacuum vortex (magenta) in fig.~\ref{fig:RMB6}(a) is still
degenerate as promised, but after a swift $\beta$ rotation, the
mapping of the vortex points is regular.
After a bit of disentangling, it is clear that the vacuum vortex
(white) links the vortex (black) six times in fig.~\ref{fig:RMB6}(b),
corresponding to $q=1$, $p=6$ and $B=Q=pq=6$.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.093]{B7_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi60}}$]{\includegraphics[scale=0.153]{B7_link1_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi20}}$]{\includegraphics[scale=0.108]{B7_link2_crop}}}
\caption{Links for the $B=7$ Skyrmion.
(a) A link between the vortex ring (yellow) and the vacuum vortex
(magenta), which are both degenerate. (b,c) Nondegenerate links between the
vortices and vacuum vortices, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB7}
\end{center}
\end{figure}
The $B=7$ Skyrmion is the most symmetric of them all and possesses
icosahedral symmetry, which fixes the rational map
as \cite{Houghton:1997kg}
\begin{eqnarray}
R_7(z) = \frac{z^5 + 3}{z^2(3z^5 + 1)}.
\end{eqnarray}
Fig.~\ref{fig:RMB7} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as of
rotations thereof by $\beta=\frac\pi6$ and by $\beta=\frac\pi2$.
In the canonical frame, both vortices are degenerate.
After rotating by $\beta=\frac\pi6$ the mapping is regular and the
vortex (dark red) links three vacuum vortices (ligth red) two, three
and two times, respectively, yielding $q=1$, $p=7$, $B=Q=pq=7$.
The counting goes slightly different if we continue the rotation of
the 2-sphere to $\beta=\frac\pi2$ where both vortices have turned into
3 rings.
If we take the point of view of the red vortices, the winding numbers
are $q_1=1$, $p_1=2$, $q_2=1$, $p_2=3$, $q_3=1$, $p_3=2$.
Notice, however, that the clusters themselves are linked and therefore
the number of vacuum vortices is not 7 but 3.
\begin{figure}[!htp]
\begin{center}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}$]{\includegraphics[scale=0.095]{B8_link4_crop}}
\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi20}}$]{\includegraphics[scale=0.125]{B8_link2_crop}}}
\mbox{\subfloat[$\boldsymbol{\phi}_{1,2}^{M_{0\frac\pi2\frac\pi4}}$]{\includegraphics[scale=0.13]{B8_link3_crop}}}
\caption{Links for the $B=8$ Skyrmion.
(a) Links between the vortex rings (yellow) and the vacuum vortex
(magenta) which is degenerate. (b,c) Nondegenerate links between the
vortices and vacuum vortices, which are both closed loops.
The gray isosurface is the baryon charge density illustrating the
shape of the Skyrmion.
}
\label{fig:RMB8}
\end{center}
\end{figure}
The last Skyrmion here is the $B=8$ Skyrmion, which has $D_{6d}$
symmetry in the massless theory \eqref{eq:massless_Skyrme}, in
contradistinction from the solution of the massive theory which is
composed by two cubes \cite{Battye:2006na}.
The rational map for the fullerene-like Skyrmion with $D_{6d}$
symmetry has the corresponding rational map \cite{Houghton:1997kg}
\begin{eqnarray}
R_8(z) = \frac{z^6 - a}{z^2(a z^6 + 1)},
\end{eqnarray}
with $a\in\mathbb{R}$.
The minimization of $\mathcal{I}$ of eq.~\eqref{eq:calI} yields
$a=0.14$ \cite{Houghton:1997kg}.
Fig.~\ref{fig:RMB8} shows preimages of $\boldsymbol{\phi}_{1,2}$ as well as of
rotations thereof by $\beta=\frac\pi2$ and by $\gamma=\frac\pi4$.
As expected by now, the vacuum vortex in fig.~\ref{fig:RMB8}(a) is
degenerate.
In fig.~\ref{fig:RMB8}(b) the vacuum vortex (cyan) and the vortex
(red) are linked eight times and the counting is simply $q=1$, $p=8$,
and thus $B=Q=pq=8$.
Rotating by $\pi/4$ around the equator of the 2-sphere, yields
different preimages.
Now there are three vortices (yellow), see fig.~\ref{fig:RMB8}(c),
that link the vacuum vortex (blue) and they link the vacuum vortex
two, four and two times, respectively.
The counting now goes like $q_1=1$, $p_1=2$, $q_2=1$, $p_2=4$,
$q_3=1$, $p_3=2$ and thus we have $B=Q=\sum_{\ell=1}^3 p_\ell q_\ell=8$
again, as promised.
\section{Discussion and outlook}\label{sec:discussion}
In this paper, we have proved theorem \ref{thm:1} which states that
the degree of a Skyrme field is the same as the linking number of two
preimages of two distinct regular points on the 2-sphere of said field
under the Hopf map.
We further conjecture that the 2 linked lines may be interpreted as
vortices in the original O(4) field.
Note that such an interpretation is impossible in the Faddeev-Skyrme
model which is based on O(3) fields in $\mathbb{R}^3$, although they
do possess Hopf charge and knots.
We illustrated the conjecture and hence the theorem with two examples:
a toroidal vortex, which is simply an axially symmetric Skyrmion with
topological degree $P$ (energetically stabilized by a certain
potential, see ref.~\cite{Gudnason:2016yix}); and with the eight first
rational map Skyrmions of ref.~\cite{Houghton:1997kg}.
The toroidal vortex or the $P$-wound axially symmetric Skyrmion is in
fact the motivation for conjecture \ref{cjt:1} and naturally it works
well.
The rational map Skyrmions, on the other hand, are a nontrivial check
on the conjecture and so far it has passed the checks.
One should note that all the preimages that we studied in this paper
are themselves unknots, viz.~they are topologically equivalent to
circles.
So we have only checked the conjecture \ref{cjt:1} with various
numbers of linked unknots.
It is possible that the conjecture needs refinement in more
complicated situations where the preimage itself become links or
a knot or even linked knots, which then by the nature of the game will
be linked with the other preimage.
Although $B=Q$ holds by theorem \ref{thm:1}, the conjecture may
receive corrections of the form, schematically
\begin{equation}
B = Q = \sum_{\rm unknots} p q
+ \sum_{\rm links} F_{\rm links}(p,q)
+ \sum_{\rm knots} F_{\rm knots}(p,q)
+ \sum_{\rm linked\ knots} F_{\rm linked\ knots}(p,q),
\end{equation}
where we have suppressed cluster indices.
We leave this for future studies.
Lord Kelvin imagined that atoms are described by
knots of vortices \cite{Thomson:1869}.
With theorem \ref{thm:1} we can say that nuclei are not knots, but
contain links of vortices via a certain projection.
\subsection*{Acknowledgments}
We would like to thank Michikazu Kobayashi for collaboration at the
early stage of this work and we thank Chris Halcrow, Steffen Krusch,
Martin Speight and Paul Sutcliffe for discussions and comments.
S.~B.~G. thanks the Outstanding Talent Program of Henan University for
partial support.
The work of S.~B.~G.~is supported by the National Natural Science
Foundation of China (Grant No.~11675223).
M.~N.~is supported by the Ministry of Education, Culture, Sports,
Science (MEXT)-Supported Program for the Strategic Research Foundation
at Private Universities ``Topological Science'' (Grant No.~S1511006) and
by a Grant-in-Aid for Scientific Research on Innovative Areas
``Topological Materials Science'' (KAKENHI Grant No.~15H05855) from
MEXT, Japan.
M.~N.~is also supported in part by the Japan Society for
the Promotion of Science (JSPS) Grant-in-Aid for Scientific Research
(KAKENHI Grant No.~16H03984 and No.~18H01217).
|
1,314,259,995,034 | arxiv | \section{Introduction}
\medskip \medskip
It is well known that information on the nucleon internal structure is
contained in the electromagnetic structure functions or in the one photon
exchange cross section, $\sigma _{1\gamma}$, for the deep inelastic
lepton scattering off nucleons. However, a determination of the one photon
cross section from the data, which is a goal of electroproduction
(i.\,e.\ electron-- and muon scattering)
experiments, demands excluding contributions from other
electroweak processes. These processes account for a large fraction
of the measured cross section, especially in the low $x$ and high $y$ region.
They cannot be discarded from the measured cross section
on the event--by--event basis; the measured differential cross section
can instead be multiplied by a correction factor calculated theoretically. This
is called a radiative correction procedure.
\medskip
The first
radiative correction scheme was created in the sixties by L.W. Mo
and Y.S. Tsai \cite{Tsai60,MTa,MTb} (MT scheme) in connection with the early
SLAC electron scattering experiments. Another approach, originally formulated
for the CERN muon scattering experiment planned by the BCDMS Collaboration,
was suggested by the
Dubna group in the seventies \cite{Dubna70} and upgraded
later \cite{Dubna80}(D scheme). In the analysis of the data from the
deep inelastic electroproduction experiments, both schemes
were extensively used (cf.\ e.\,g.~\cite{TS:PR}), the MT scheme often in the
exact (i.\,e.\ no `peaking approximation') and upgraded version.
However, the precision of the recent
experiments is so high, see \ e.\,g.\cite{NMCF2}, that also
the radiative correction
procedure has to be based on more precise theoretical calculations.
Therefore
understanding similarities and differences in the two radiatiative
correction schemes is of ultimate
importance for concluding about consistency of results coming from different
experiments.
\medskip
The goal of this paper is to compare analytically as well as numerically
the most upgraded versions of the two approaches. Our experience
with the radiative correction procedure in the deep inelastic experiments
carried out at CERN by the EMC, NMC and BCDMS
Collaborations
allows to point out the problems encountered during the application of
this procedure in the data analysis.
The previous comparisons between the two considered methods were limited to
analysing the numerical results of the early versions of the schemes
\cite{TS} or only dealt with a subset of the radiative processes
\cite{konkurencja}.
\medskip
The paper is organized
as follows. In Section 2 the deep inelastic kinematics and cross sections are
defined. Sections 3 and 4, together with Appendices A and B contain
a description of the MT and D schemes respectively.
A complete set of formulae is given for each scheme
using the original notation. Only minor simplifications and changes
are introduced for the sake of clarity.
Useful relations between the respective notations are given
in the Appendix C. The schemes are then compared in Sections 5 (theoretical
ideas) and 6 (numerical results) and finally a summary is given in Section 7.
\medskip\medskip\medskip
\section{Basic Definitions and Kinematics of Deep Inelastic Scattering}
\medskip\medskip
As mentioned above, a goal of electroproduction experiments is to extract
the differential cross section in the one photon exchange
approximation (fig.~1a)
from the data. This cross section can be expressed in the following
way by the structure functions $F_1(x,Q^2)$ and $F_2(x,Q^2)$ of the target:
\begin{eqnarray}
{\D^2\sigma _{1\gamma}(x,Q^2)\over \D Q^2\D x}={4\pi \alpha^2\over
Q^4}\left[\left(1-y-
{Mxy\over 2E}\right) {F_2(x,Q^2)\over x}+\left (1-{2m^2 \over Q^2}\right )
y^2F_1(x,Q^2)\right]\,.
\label{basic}
\end{eqnarray}
In this equation $\alpha$ is the fine structure constant, $m$ is the electron
(muon) mass, $Q^2 = -q^2$ where $q^2$ is the square of the four--momentum
transfer between the incoming and outgoing lepton, $x = Q^2$/$(2M\nu)$
the Bjorken scaling variable, $M$ is taken
as the proton mass, $E$ and $\nu$ are the lepton's incident energy
and energy transfer in the proton rest frame
and $y=\nu/E$.
\medskip
The one photon exchange process described by the eq.~(\ref{basic}), is a part
of the lowest order (or Born) electroproduction cross section, $\sigma ^B$.
The other part of $\sigma ^B$ proceeds from the $Z^0$ boson
exchange. The two contributions cannot be separated
experimentally. However in the present fixed target experiments the involved
virtualities (i.\,e.\ the $Q^2$ values) are small comparing to the $Z^0$ mass
squared and therefore eq.(\ref{basic}) is a good approximation of the Born
cross section. The $Z^0$ contribution will be discussed later in more detail.
\medskip
The differential cross section (1) can also be expressed in terms
of structure functions $F_2(x,Q^2)$ and $R(x,Q^2)$:
\begin{eqnarray}
{\D^2\sigma _{1\gamma}(x,Q^2)\over \D Q^2\D x}=
{4\pi \alpha^2\over Q^4}{F_2\over x}
\left[1-y-
{Mxy\over 2E} + \left (1-{2m^2\over Q^2}\right )
{y^2(1+4M^2x^2/Q^2)\over 2(1+R)}\right]\ ,
\label{basicp}
\end{eqnarray}
where $R$ is defined as:
\begin{equation}
R(x,Q^2)={\sigma_L\over\sigma_T}={(1+4M^2x^2/Q^2)F_2\over 2xF_1}-1;
\label{r}
\end{equation}
$\sigma_L$ and $\sigma_T$ denote the cross sections
for the longitudinally and transversally polarised virtual photon
respectively.
\medskip
As mentioned earlier the radiative events account for
a large
fraction of the measured cross section, $\sigma _{\rm meas}$.
These effects may lead to a wrong interpretation of the measured event
kinematics. For example
an elastic scattering from the target can be mistaken for a deep inelastic
event if it is accompanied by an energetic bremsstrahlung photon not
measured in the experiment.
The magnitude of the radiative effects in the measured cross
section will be characterized by the so called radiative correction
factor, $\eta (x,y)$, defined as follows:
\begin{equation}
\eta (x,y) = {\sigma _{1\gamma }\over \sigma_{\rm meas}}\,.
\label{eta}
\end{equation}
This factor is used in the data analysis \cite{NMCF2} and will also be employed
in comparing results of calculations between different radiative correction
schemes.
\medskip
Finally we have to stress that in the most of the deep inelastic experiments
only the inclusive measurements are performed. This means that only
incident and scattered leptons are measured and kinematics of the reaction
is defined by the leptonic observables. However the kinematics of radiative
events
cannot be defined by leptonic variables only, e.\,g.\ a bremsstrahlung photon
emission by the lepton results in a {\it measured} $Q^2$ different from
the {\it actual} one. Therefore, when appropriate,
we shall make a clear distinction between the hadron-- and lepton--defined
variables.
\medskip\medskip\medskip
\section{Mo and Tsai Scheme}
\medskip\medskip
In the original MT scheme \cite{Tsai60,MTa,MTb} the following
processes were considered to contribute
to the measured electron--proton inelastic cross section\footnote
{The original MT scheme was formulated for the electron scattering;
therefore a large effort was put to quantify the effects of the energy loss
in the electron passage through the target. We shall neglect them in this
paper since in the examples of practical applications we shall deal with
muon scattering only.}: real photon
bremsstrahlung from an initial and final electron (fig.~1b),
vertex correction
(fig.~1c) and vacuum polarisation correction (fig.~1d). In the latter one only
the electron and muon loops were originally considered. The proton
structure was accounted for through the two unspecified structure functions.
The MT scheme formulation is thus model independent.
The mathematical formulation of the MT is non-covariant.
\medskip
Corrections depicted by the diagrams in fig.~1 can be divided into two
groups: emission of real photons (fig.~1b) with energy larger than $\Delta$
and those with energy smaller than $\Delta$ (fig.~1b) together with virtual
corrections (fig.~1c,d).
Contributions from the soft photon emission and from the vertex correction
{\it separately} are infrared divergent but the divergences cancel when
the contributions are considered jointly \cite{BD}.
The parameter $\Delta$ may have a meaning of
the energy resolution or in another words a maximal energy of the emitted
photon which is still not detectable in the experiment.
It is often called the `infrared cut--off' parameter.
This interpretation implies that $\Delta$ may not be too large.
Too small $\Delta$ must also be avoided since it
may cause numerical instabilities in the computation of the soft photon
contribution.
The numerical results of the calculations should not depend on $\Delta$.
\medskip
In the MT approach the measured cross section can be expressed as follows
(see Appendix A for the definitions of variables used in MT publications):
\begin{eqnarray}
{\D^2\sigma_{\rm meas}\over \D\nu \D\Omega} &=& \e^{-\delta_R(\Delta)} F(Q^2)
{\D^2\sigma_{1\gamma}\over \D\nu \D\Omega}
+ {\D^2\sigma_{\rm tails}\over \D\nu \D\Omega} ,
\label{mtmaster}
\end{eqnarray}
where
\begin{eqnarray}
\delta_R(\Delta) = {\alpha \over \pi}\left(\ln{E_s\over \Delta} + \ln{E_p\over
\Delta}\right)\left(\ln{Q^2\over m^2} - 1\right)
\label{mtdel}
\end{eqnarray}
is a residuum of the cancellation of the infrared divergent terms and takes
into account all soft photon emissions in the lowest order of $\alpha$.
It is a well known fact that the infrared divergencies cancel in each
order in
$\alpha$ \cite {BD} and therefore it is possible to sum up contributions from
all soft photon emissions \cite{YFS}.
The exponential factor, $\e^{-\delta_R(\Delta)}$, in eq.(\ref{mtmaster}) is
a result of this summation. The function $F(Q^2)$ contains all
$\Delta-$independent terms:
\begin{eqnarray}
F(Q^2) = 1 + \delta_{\rm vac}^e +
\delta_{\rm vac}^{\mu} + \delta_{\rm vtx} + \delta_s ,
\label{mtf}
\end{eqnarray}
where\footnote{
$\delta^{e,\mu}_{\rm vac}$ given here is the full formula. The approximation
${2\alpha/\pi}(-{5/ 9}
+ {1/3}\ln{Q^2/m_{e,\mu}^2})$ holds for $Q^2\gg m_{e,\mu}^2$
(see e.\,g.\ eqs~5 and A1 from ref.~\cite{Tsai60}). For extremely low $Q^2$
formula (8) converges to $Q^2/15m_{e,\mu}^2$.}
\begin{eqnarray}
\delta_{\rm vac}^{e,\mu } &=& \frac{2\alpha}{\pi}\left[\frac{-5}{9}+
\frac{4m_{e,\mu}^2}{3Q^2}+\frac{1}{3}\sqrt{1+\frac{4m_{e,\mu}^2}{Q^2}}
\cdot \left(1-\frac{2m_{e,\mu}^2}{Q^2}\right) \ln \left(
\frac{\sqrt{1+4m_{e,\mu}^2/Q^2}+1}{\sqrt{1+4m_{e,\mu}^2/Q^2}-1}
\right) \right] \\
\label{mtvac}
\delta_{\rm vtx}&=&{2\alpha\over \pi}\left(-1
+ {3\over 4}\ln{Q^2\over m^2}\right) \\
\label{mtvtx}
\delta_s&=&{\alpha\over \pi}\left[{1\over 6}\pi^2 - \Phi \left(\cos^2{\theta
\over 2}\right) + \Phi \left({E_p-E_s\over E_p}\right) +
\Phi \left({E_s-E_p\over E_s}\right)\right]
\label{mts}
\end{eqnarray}
\noindent
and $\Phi$ the Spence function\footnote{In the original MT formulation
\cite{MTb} the `$q^2$' symbol was used in eqs (\ref{mtdel}) - (\ref{mtvac}).
This variable, however, was then redefined to become
the {\it measured} four momentum transfer, $q^2 = (s-p)^2$, and thus coincides
with our definition of $-Q^2$. The latter was thus used here for clarity.}.
The second term in eq.(\ref{mtmaster}), $\sigma_{\rm tails}$, accounts for
contribution from processes where the real photons of energy larger than
$\Delta$ are emitted:
\begin{equation}
{\D^2\sigma_{\rm tails}\over \D\nu \D\Omega} =
{\D^2\sigma(\omega > \Delta)\over \D\nu
\D\Omega}
= \int^{M^{\rm max}_j}_{M_j=M}\D M_j~{\D^2\sigma_{j,r}\over \D\nu
\D\Omega}\,,\\
\label{mtails}
\end{equation}
where $M$ denotes a target mass, $M^{\rm max}_j=\sqrt{M^2-Q^2+2M(\nu-
\Delta)}$ and $\sigma_{j,r}$ is given by the formula (A.24) in \cite{MTb} and
quoted in Appendix A; $M_j$ is an effective mass of the hadronic
final state. The integration over $M_j$ in eq.(\ref{mtails}) means that all
the final hadronic states contribute to the cross section measured in
a kinematical point $(Q^2,\nu)$, fig.~2, so called `radiative tails':
elastic ($M_j = M$), resonance production ($M_j = M_{res}$) ... and
deep inelastic tails, i.\,e.\ tails from the continuum.
\medskip
Equation (\ref{mtmaster}) does not take into account double-- (multi--) photon
exchange
reactions (fig.~1e) nor any radiative correction from the hadron current
(fig.~1f,g).
These effects were estimated only for the {\it elastic} $e-p$ interaction
\cite{MTa}. Application of these calculations to the inelastic interaction
is incorrect. Thus these results were discarded in practical
applications
of the MT scheme. Evaluation of these effects in the inelastic $e-p$
interactions was not done in this scheme. To do this a model
of the proton internal structure is necessary. The best framework
presently is the quark parton model (QPM).
\medskip
In the upgraded version of the MT scheme, used in the analysis of the NMC
results \cite{n/plong}, the $\tau^+\tau^-$ and $q\bar q$ loops in the vacuum
polarisation
\cite{BD,jeger} and the virtual photon $- Z^0$ boson interference \cite{TS:PR}
were also included.
\medskip\medskip\medskip
\section{Dubna Scheme}
\medskip\medskip
In the Dubna scheme the calculations of the deep inelastic processes
are based on a mixed approach which uses both a model independent
and the quark parton model treatment of the radiative corrections.
The radiative corrections to the leptonic current (fig.~1b,c) are
calculated within the same model independent approach as
in the Mo and Tsai scheme except the way of treating the soft bremsstrahlung
photons. All other corrections are calculated using the
QPM approach.
\medskip
In the inclusive electroproduction experiments the radiative and non--radiative
events cannot be distinguished and therefore the
$\sigma_{\rm meas}$ should not depend on any parameter $\Delta$,
a property, which {\it implicitly} holds in Mo and Tsai scheme.
The D scheme is {\it explicitly} $\Delta$ independent as a result
of an integration over the whole bremsstrahlung photon phase space.
The relevant procedure is described in detail in \cite{teupitz}.
\medskip
Applying the QPM and the fact that quarks are point--like
objects permit in principle to calculate all QED processes
to all orders of $\alpha$. This means that
in addition to the diagrams in fig.~1.a-d
also the hadron current corrections (fig.~1.e-g) can be
evaluated. The diagrams of fig.~1.e-g thus become as in fig.~3a-c.
All these processes were taken into account as well as the
lowest order weak corrections, not shown here, which can also be calculated
in the framework of the quark-parton model.
The outline of the radiative corrections calculations in the D scheme
is given below; the details are given in refs \cite{teupitz,disep}.
\medskip
The measured cross section is now expressed as follows%
\footnote{Additional factor $\alpha /\pi$ in the third row of
eq.~(\ref{master})
is hidden in the definitions of the $R$ functions.}:
\begin{eqnarray}
{\D^2\sigma_{\rm meas}\over \D Q^2\D x}
&=& {\D^2\sigma ^B\over \D Q^2\D x}
\Biggl\{\e^{-\delta_R(x,Q^2)} +
\delta^{VR}(x,Q^2) \Biggr\} \nonumber \\
&+&{\D^2\sigma_{\rm in. tail}\over \D Q^2\D x}-
{\D^2\sigma^{IR}\over \D Q^2\D x} \nonumber \\
&+& {2\pi\alpha^2\over Q^4}
\sum_{B=\gamma,I,Z} \sum_{b=i,q}~ \sum_{Q,\bar Q}
c_b K(B,p) \left[ V(B,p)R^V_b(B)+pA(B,p)R^A_b(B) \right]
\nonumber \\
&+& {\D^2\sigma_{\rm el. tails} \over \D Q^2\D x}.
\label{master}
\end{eqnarray}
The first two rows of this formula represent the results of the model
independent calculations of the radiative corrections to the leptonic current.
The third row represents the quark parton model description of the
lepton--hadron interactions (fig.~3a) and of the radiative corrections to the
hadronic current (fig.~3b,c) as well as certain interference terms.
Finally the last term in eq.~(\ref{master}),
${\D^2\sigma_{\rm el. tails}/\D Q^2\D x}$, describes
the elastic and resonance radiative `tails'.
\medskip
The ${\D^2\sigma ^B/\D Q^2\D x}$ cross section in eq.~(\ref{master}) denotes
the
full Born cross section for the deep inelastic scattering, i.\,e.\ the cross
section containing both the one photon-- and one $Z^0$ boson exchange
contributions:
\begin{equation}
{\D^2\sigma^{B}\over \D Q^2\D x}
= {2\pi \alpha^2 y \over Sx} \sum_{i=1}^{3}
{\cal A}_{i}(x,Q^2) {1 \over Q^4} {\cal S}_{i}^{B}(y,Q^2) ,
\label{born} \\
\end{equation}
\noindent
where the functions ${\cal S}_{i}^{B}(y,Q^2)$ are:
\begin{eqnarray}
{\cal S}_{1}^{B}(y,Q^2) &=& Q^2-2m^2, \nonumber \\
{\cal S}_{2}^{B}(y,Q^2) &=& 2[(1-y)S^2-M^{2}Q^{2}], \nonumber \\
{\cal S}_{3}^{B}(y,Q^2) &=& 2Q^{2}S(2-y). \nonumber \\
\label{rad}
\end{eqnarray}
\noindent
with $Q^2=Sxy$ and $S=2ME$. The functions ${\cal A}_{i}$ are given in
Appendix B.
\medskip
The $\delta _R$ in eq.~(\ref{master}) is responsible for those parts of the
{\it soft} and {\it hard collinear} photon emissions which could
be resummed to all orders using the covariant exponentiation procedure,
\cite{YFS,shumeiko}. It reads:
\begin{equation}
\delta_{R}=-{\alpha\over{\pi}}\left(\ln {Q^2 \over m^2}-1\right)
\ln{y^2(1-x)^2 \over (1-yx)(1-y(1-x))}.
\label{delinf}
\end{equation}
\noindent
The $\delta^{VR}$ correction factor in eq.(\ref{master}) is a remnant
of the exponentiation and of the subtraction procedure used to disentangle
the infrared divergent terms from the ${\D^2\sigma_{\rm in. tail}/
\D Q^2\D x}$ cross section \cite{teupitz}, see below. It thus contains
the vertex correction, fig.~1.c and is given by
\begin{equation}
\delta^{VR}=\delta_{\rm vtx}-{\alpha\over{2 \pi}}
\ln^2{(1-yx) \over (1-y(1-x))} +
\Phi\left[{(1-y) \over (1-yx)(1-y(1-x))} \right] - \Phi(1).
\label{delrem}
\end{equation}
\medskip
The ${\D^2\sigma_{\rm in. tail}/\D Q^2\D x}$ in eq.~(\ref{master}) describes
the
inelastic radiative tail for the {\it lepton current correction} only:
\begin{equation}
{\D^2\sigma_{\rm in. tail}\over \D Q^2\D x}
={2\alpha^3 y \over Sx}
\int \!\!\int \D Q^2_h \D M^2_h \sum_{i=1}^{3}
{\cal A}_{i}(x_h,Q_h^2) {1 \over Q^4_h}
{\cal S}_{i}(y,Q^2,y_h,Q^2_h).
\label{dtails}
\end{equation}
In this formula $M_h$ is the invariant mass of the final
hadronic system and the variables bearing a subscript
`$h$' refer to virtual photon--target
vertex in contrast to the variables measured in the inclusive
electroproduction experiment. For their definitions see Appendix B where also
the explicit expressions for the radiator functions ${\cal S}_{i}$ are given.
\medskip
It is a well known fact that the
${\D^2\sigma_{\rm in. tail}/ \D Q^2\D x}$ cross section
is infrared divergent. To regularize it a simple trick (`fixation
procedure') of adding and subtracting an extra term
\begin{equation}
{\D^2\sigma^{IR}\over \D Q^2\D x}
= {\D^2\sigma^{B}\over \D Q^2\D x}
\int\!\! \int \D Q^2_h \D M^2_h
{\cal F}^{IR}(y,Q^2,y_h,Q^2_h),
\label{dir}
\end{equation}
to ${\D^2\sigma_{\rm in.
tail}/ \D Q^2\D x}$ was employed \cite{YFS,teupitz}. In the added term
an integration over a full photon phase space was carried out,
resulting in the above given expressions for $\delta _R$ and $\delta ^{VR}$.
The subtracted term appears explicitly in the second row of eq.(\ref{master}),
so that the difference $\D^2\sigma_{\rm in. tail}/ \D Q^2\D x -
{\D^2\sigma^{IR}/ \D Q^2\D x}$ in eq.~(\ref{master}) is finite
over the full
kinematic domain of $Q^2_h$ and $M_h^2$. This method is a key point of the
D scheme, making it {\it explicitely} $\Delta$ independent.
The function ${\cal F}^{IR}$ is given in Appendix B.
\medskip
The third row in the eq.~(\ref{master}) represents the quark parton
model calculations and contains clearly visible vector ($\gamma$ and $Z^0$)
and axial (only $Z^0$) contributions.
Index $B$ runs over the photon exchange ($\gamma$), the $Z^0$ boson
exchange ($Z$) and their interference ($I$). Index $b$ stands for the
scattering with the single photon emission by the quark ($q$), fig.~3c, and its
interference with the photon emission by the lepton ($i$). The double photon
exchange (fig.~3a) as well as the vertex corrections on the quark line
(fig.~3b)
are also hidden there. Both scattering off quarks
($Q$) and antiquarks ($\bar Q$) were considered \footnote{In the original
formulation \cite{disep} the index $b$ assumes also values 0 and l which
correspond to the contributions from processes in fig.~1a,b,c.
Here they are included in the model independent parts of eq.~(\ref{master})}.
Coefficients $c_b$ are equal to $Q^2_Q$ or $Q_{\mu}Q_Q$ for the process of
bremsstrahlung photon emission from the quark or for the interference term
between the photon emission from the lepton and from the quark respectively;
$Q_\mu$ and $Q_Q$ are the charges of the lepton and of the quark given
in the electron charge units. The sign factor $p$ is defined as: $p=p_\mu p_Q$
where $p_{\mu ,Q}=\pm$ 1 for particle (antiparticle).
For a detailed form of the coupling strength factors
$K(B,p)$, modified vector, $V(B,p)$ and axial, $A(B,p)$ couplings
as well as of the functions $R_b^{V,A}(B)$ we refer the reader to ref.
\cite{disep}.
\medskip
The problem of the soft photons' emission from the quarks can be
uniquely
solved since the quarks are not observed and thus the quark states are all
summed up in the cross section. The infrared divergencies for initial--
and final state quarks as well as
the quark mass singularities for the final state quarks
cancel in each order in $\alpha$ \cite{BD,disep}. Mass singularities
associated with initial quark are included in the definition of the proton
structure function.
\medskip
The last row in eq.~(\ref{master}), ${\D^2\sigma_{\rm el. tails}/ \D Q^2\D x}$,
is in the D scheme treated essentially in the same way as in the MT
except that it is formulated in a covariant way.
Finally the vacuum polarisation, fig.~1d, was taken into
account via the `running' $\alpha(Q^2)$ which in the $Q^2 \gg m_f^2$
approximation ($m_f$ stands for the lepton and quark effective masses),
\cite{jeger} is defined as follows:
\begin{equation}
\alpha(Q^2)=\frac{\alpha}{1-{1\over 2}\sum_{f}c_f Q_f^2
\delta_{\rm vac}^f} ,
\label{alfa}
\end{equation}
where $c_{f}$ and $Q_{f}$ are the colour factor and the electric charge of
fermions $f~ (c_{f} = Q_{f} = 1$ for leptons); `$f$' runs over all leptons
and quarks.
\medskip
We shall close this section with the following remarks:
first, although not shown explicitely in eq.~(\ref{master}),
the weak loop correction contribution is also present in the D formulation.
It is calculated within the QPM framework; details are given in \cite{disep}.
Second, the ${\cal O}(\alpha^{2})$ corrections
($\alpha^4$ contributions to the cross section) not shown explicitely,
were also implemented. For the elastic radiative tail they were calculated
completely in the first paper of ref. \cite{Dubna80}, while for the inelastic
continuum they were implemented in an approximate way, described in
the second paper of ref. \cite{Dubna80}.
\medskip\medskip\medskip
\section{Comparison of the MT and D Schemes}
\medskip\medskip
Results of deep inelastic experiments were analysed using
either MT or D radiative correction schemes. Therefore it is of ultimate
importance to understand the differences and similarities between the two
approaches.
In this section we shall make a brief summary of theoretical ideas
in the two schemes; numerical comparison will be presented in the next section.
To faciliate the comparison, the relations between the variables used in
the MT and D formulae are given in Appendix C.
\medskip
The D scheme is formulated in a covariant way and its model dependent part
is based on the quark parton model. The covariant formulation means that
all formulae are expressed in terms of the Lorentz invariants and are thus
independent of the choice of the reference frame. This also means
that the D scheme is explicitly independent of the infrared cut--off
parameter $\Delta$, cf.\ eq.~(\ref{master}). The $\Delta-$
independence was obtained through a special mathematical procedure.
\medskip
The MT scheme is not covariantly formulated. It should also be
$\Delta-$independent which formally means that the
derivative over $\Delta$ of the right hand side of
eq.(\ref{mtmaster}) should be equal to zero. The calculations show that this
does not hold and that a certain dependence of the results on $\Delta$ should
be
observed. For not too small $\Delta$ this dependence is very weak.
For very small $\Delta$ (apart of numerical instabilities),
eq.(\ref{mtmaster}) depends much stronger on the value of $\Delta$ which in
this case should
be calculated separately for every kinematical point (i.e. for every $(x,y)$
value) in order to minimise the dependence.
\medskip
The elastic and resonance tails (i.\,e.\ the contribution of the reactions
with the elastic and resonance final states) are calculated in the same way
in both schemes. The inelastic tails (the contribution
of the reactions with the inelastic final states) originating from the
leptonic bremsstrahlung are treated differently but the differences are
purely mathematical.
The inelastic tail in the MT is calculated with the same formula
as the elastic one (eq.~(\ref{mtails})) where the inelasticity of the process
is taken into account
through integration over kinematically allowed final state masses.
The corresponding structure functions, $W^j_{1,2}$(see Appendix A),
are not specified;
in practical applications the electromagnetic structure functions are used.
The leptonic inelastic tail in D is also calculated with arbitrary structure
functions while tails originating from the hadronic bremsstrahlung
were calculated within the quark parton model framework.
\medskip
The usage of the parton model in the D
scheme allows to calculate the hadron current corrections in the inelastic case
as well as a double photon exchange process; they are calculated
up to $\alpha^3$. The hadron current corrections cannot be calculated
in the MT approach and therefore such corrections were not taken into account.
The $q\bar q$ loop is also naturally present in the vacuum polarisation
process in the D scheme while in the MT it was only added later. The same
is true for the weak interactions contributions ($Z^0$ exchange and the
$\gamma -Z^0$ interference). The weak contributions are
however very small as compared to the present experimental resolution.
\medskip
Finally the $\alpha^4$ lepton current corrections were partially taken
into account in the D scheme but not in the MT.
\medskip\medskip\medskip
\section{Numerical Calculations in the MT and D schemes}
\medskip\medskip
A tremendous increase of the accuracy of deep inelastic electroproduction
experiments demands a similar increase of the accuracy of the radiative
corrections calculations. Therefore the early versions of these calculations
have constantly been improved. At the same time comparisons between
the two considered schemes were made, \cite{TS:PR,TS,konkurencja}.
In the analysis of the
recent very precise mesurements by the NMC \cite{NMCF2,n/plong} both schemes
were used in their most upgraded versions and applied for nucleon and nuclear
targets.
The MT code, named FERRAD35,
apart of the processes contained in the eqs (\ref{mtmaster})--(\ref{mtails})
included also the tau lepton--
and quark loops and the photon$-Z^0$-boson
interference. The tails were treated in an exact
way in contrast to the peaking approximation \cite{MTa,MTb} applicable
for the electron deep inelastic scattering,
A detailed input information (structure functions, form factors,
nuclear structure models, etc) was introduced \cite{PhD}.
The D code, named TERAD86, was in principle used in the same version as
sketched by the formulae (\ref{master}) -- (\ref{alfa}) and employed in the
dedicated BCDMS experiment \cite{TS:PR}, except that the FERRAD35 input
information was supplied. In this section we shall compare the FERRAD35
and TERAD86 results on the radiative
correction factors, $\eta$, as well as on the elastic radiative tails.
Comparisons will be done in the kinematical region of the NMC positive
muon--proton deep inelastic scattering experiment, i.e. for 0.003$< x <$0.9
and 0.1$< y <$0.9 at the incident muon energy 280 GeV.
The input information in the calculations was as follows:
Gari and Kr\"umpelman proton form factor parametrisation, \cite{protff}
structure function $F_2$ as measured and parametrised by the NMC, \cite{NMCF2},
and finally for the $R(x,Q^2)$ the parametrisation of SLAC was taken for all
$x$ and $Q^2 > $0.35 GeV$^2$. For smaller $Q^2$ the value of $R$ was assumed
to be constant and equal to the value at $Q^2 =$ 0.35 GeV$^2$. The proton
resonances were neglected. For further details see refs \cite{n/plong} and
\cite{PhD}.
\medskip
The main numerical problem in the radiative correction programs is an
integration of the radiative tails. For example, the integrand
of eq.~(\ref{mtails}) changes by 27 orders of magnitude within the integration
interval. It is obvious that this function should be
integrated either by a high accuracy routines (e.\,g.\ the CERNLIB GAUSS
routine demands an accuracy parameter $\varepsilon <$ $10^{-12}$) or by
dividing the integration interval into many small sections. It seems however
that the best method would be to change the integration variable from
$\cos\theta_k$ to log\,$(-q^2)$ but also in this case
high accuracy of the integrating routines is needed.
This method was used in TERAD86 while in FERRAD35 dividing the
$\cos\theta_k$ integration interval into many sections was normally
employed (logarithmic
integration in FERRAD35 was also tried and gave the same results).
Due to the subtraction procedure in D the integrands in eqs~(\ref{dtails})
and (\ref{dir})
are fairly smooth and do not demand any extreme precision of integration.
\medskip
The dependence of the FERRAD35 results on the parameter $\Delta $ was carefully
studied. The results are presented in fig.~4. In the region of low $x$
and large $y$ where the radiative correction factor $\eta $ is largest,
the results are only weakly dependent on $\Delta $ for $\Delta >$200 MeV.
Results given below were obtained with $\Delta =$ 280 MeV.
\medskip
Comparison of the FERRAD35 and TERAD86 results is presented in figs~ 5--6.
Radiative corrections are very large, exceeding 50$\,\%$ at low $x$ and high
$y$, cf.\ fig.~5. In that region the agreement between the results of the two
schemes
is better than 2$\%$, cf. fig.6 (closed symbols). However the $\tau\bar\tau$
and $q\bar q$ loop contributions in the vacuum polarisation process (fig.1.d),
absent in the original version of the MT scheme and included in FERRAD35, give
up to 2$\%$ contribution to the radiative correction factor in most of
the kinematic region (fig.6, open symbols denote results of calculations
{\it without} those contributions).
Fluctuations visible in the high $x$ part of the curves in fig.6 come from
numerical instabilities of FERRAD35 in that region.
\medskip\medskip\medskip
\section{Summary}
The two existing schemes of radiative correction
procedure, the Mo and Tsai and the Dubna ones are differently formulated
and are (partially) based on a different physics approaches. Both were
extensively used in analysing the high energy experimental data. In this paper
we presented the two schemes in detail and compared them analytically
and numerically from the point of view of their effect on the results of
the deep inelastic positive muon scattering from a proton target at 280 GeV.
To this aim we used the
latest version of the D and the upgraded version of the MT programs.
The latter included the $\tau ^+\tau ^-$ and $q\bar q$
loops in the vacuum polarisation and the virtual photon$-Z^0$ boson
interference terms, all absent in the original formulation.
In contrast to the $\gamma-Z^0$ interference the quark loop contribution
turned out to be quite substantial, changing the total radiative correction
by about 2$\%$ in the measured region.
\medskip
The MT scheme contains the `infrared cut--off' parameter, $\Delta$.
The results should not depend on its value (provided it is not too large
and not too low) and indeed it is approximately so when $\Delta$ is
equal to about 0.1$\%$ of the beam energy value. The covariant
formulation of the D scheme excludes the existence of such parameter.
\medskip
The overall radiative correction reaches 50$\%$ at low $x$ and high $y$.
Calculated from the two schemes the corrections agree to better than 2$\%$
in this region. Differences are thus insignificant over the most of the
phase space covered in the fixed target DIS experiments.
They are of the order of other systematic errors in the data analysis
\cite{NMCF2}.
\medskip
Neither of the two radiative correction schemes contain a contribution
from the multiphoton exchange process to the {\it elastic} radiative tail
which may be important for heavy nuclear targets. Results of quantitative
estimates of those processes, relevant for the heavy target data currently
analysed by the NMC, are discussed in a separate paper \cite{multig}.
\medskip\medskip\medskip
\noindent
{\Large {\bf {Acknowledgements}}}
\medskip\medskip\medskip
We thank our colleagues from the EMC and NMC for never--ending discussions
of radiative corrections and for the enjoyable research collaboration.
We are indebted to A. Akhundov for critical reading of the manuscript and
important comments. DB is very much obliged to L. Kalinovskaya and T. Riemann
for valuable discussions.
This research was supported in part by the Polish Committee for Scientific
Research, grant number 2 P302 069 04 and by Bundesministerium f\"ur
Forschung und Technologie.
\medskip\medskip\medskip
\section{Appendix A}
\medskip\medskip
Below we summarize variables used in
the MT formulation. The metric used is such that $ps =E_pE_s - \bar p\bar s$
and
four vector components are in the laboratory system.
Notation is explained in fig.~7 and the coordinate system is that of fig.~8.
\noindent
\begin{tabbing}
abc\=$\alpha$qwertyuiopasdfghjklaaaarttttttt\=fine structure constant \kill
\>$s=(E_s,\overline s)$ \>four momentum of the incident lepton
\\
\>$p=(E_p,\overline p)$ \>four momentum of the scattered lepton
\\
\>$\theta(\Omega)$ \>lepton scattering angle (solid angle), \\
\> \>$\cos\theta = \overline s\overline p/\mid
\overline s\mid \mid \overline p\mid$ \\
\>$\theta_k, \phi_k$ \>bremsstrahlung photon emission angles \\
\>$\theta_v, \phi_v$ \>virtual photon emission angles \\
\>$t = (M,\overline 0)$ \>four momentum of the target proton \\
\>$k = (\omega ,\overline k)$ \>four momentum of the bremsstrahlung
photon \\
\>$p_f = s + t - p - k$ \>four momentum of the final hadronic
system \\
\>$q^2 = (s-p-k)^2 = (p_f-t)^2$ \>four momentum transfer \\
\>$-Q^2 = (s - p)^2$ \>measured four momentum transfer \\
\end{tabbing}
\medskip
In the one photon exchange approximation and assuming one photon emission,
the radiative tail from the $j$'th mass level can be written as (formula
(A.24) in \cite {MTb}):
\begin{eqnarray*}
{\D^2\sigma_{j,r}\over \D\Omega \D E_p} &=&
{\alpha^3\over 2\pi}\left({E_p\over E_s}
\right)\int ^1_{-1} {2M\omega \D (\cos\theta_k)\over q^4(u_0 - \mid
\overline u\mid \cos\theta_k)} \\
&&\left(W^j_2(q^2)\left\{ {-am^2\over x^3}\left[2E_s(E_p+\omega )
+ {q^2\over 2}\right] - {a'm^2\over y^3}\left[2E_p(E_s - \omega ) +
{q^2\over 2}\right]\right.\right. \\
&&- 2 + 2\nu (x^{-1} - y^{-1})\left\{ m^2(s\cdot p - \omega ^2) +
(s\cdot p) \left[2E_sE_p - (s\cdot p) + \omega (E_s - E_p)\right]\right\} \\
&&+ x^{-1}\left[2\left(E_sE_p + E_s\omega + E_p^2\right) + {q^2\over 2}
- (s\cdot p) - m^2\right] \\
&&- y^{-1}\left.\left[2\left(E_sE_p - E_p\omega + E_s^2\right) + {q^2\over 2} -
(s\cdot p) - m^2\right]\right\} \\
&&+ W_1^j(q^2)\left[\left({a\over x^3} + {a'\over y^3}\right)
m^2(2m^2 + q^2) + 4 \right. \\
&&+ 4\nu \left.\left.\left(x^{-1} - y^{-1}\right)(s\cdot p)\left(s\cdot p -
2m^2\right) + \left(x^{-1} - y^{-1}\right)\left(2s\cdot p +
2m^2 - q^2\right)\rule{0cm}{4ex}\right]\right)
\end{eqnarray*}
\noindent
where
\begin{eqnarray*}
\omega &=& {1\over 2}\left(u^2 - M_j^2\right)/\left (u_0 - \mid\overline u\mid
\cos\theta_k\right) \\
u &=& s + t - p\: =\: p_f + k \\
u_0 &=& E_s + M - E_p \\
\mid\overline u\mid &=& \left(u_0^2 - u^2\right)^{1/2} \\
u^2 &=& 2m^2 + M^2 - 2(s\cdot p) + 2M\left(E_s - E_p\right) \\
q^2 &=& 2m^2 - 2(s\cdot p) -2\omega \left(E_s - E_p\right) + 2\omega \mid
\overline u\mid \cos\theta_k \\
a &=& \omega \left(E_p - \mid\overline p\mid \cos\theta_p \cos\theta_k\right)
\\
a'&=& \omega \left(E_s - \mid\overline s\mid \cos\theta_s \cos\theta_k\right)
\\
b &=& -\omega\mid\overline p\mid \sin\theta_p \sin\theta_k \\
\nu &=& (a' - a)^{-1} \\
\cos\theta_p &=& (\mid\overline s\mid \cos\theta - \mid\overline p\mid)/\mid
\overline u\mid \\
\cos\theta_s &=& (\mid\overline s\mid - \mid\overline p\mid \cos\theta )/ \mid
\overline u\mid \\
x &=& \left(a^2 - b^2\right)^{1/2} \\
y &=& \left(a'^2 - b^2\right)^{1/2} \\
\end{eqnarray*}
\medskip
\noindent
$W_1$, $W_2$ denote structure functions; in particular
$W_{1,2}^j (q^2)$ are the structure functions at four momentum
transfer $q^2$ and invariant mass of the hadronic final state $M_j$.
$W_{1,2}$ are connected with the functions $F_{1,2}$ of eq.~(\ref{basic})
in the following way: $F_2 = \nu W_2$ and $F_1 = MW_1$. Observe that the
meaning of the $\nu$, $x$ and $y$
wariables used in the MT formulation is different from their generally accepted
meaning as the DIS variables.
\noindent
\medskip\medskip\medskip
\section {Appendix B}
\medskip \medskip
Below the exact expressions for certain functions in the D scheme will be
given.
The `generalized' structure functions ${\cal A}$$_{i}(x_{h},Q^{2}_{h})$,
in eq.~(\ref{born}) are:
\begin{eqnarray}
{\cal A}_{1}(x,Q^2)
&=& 2 {\cal F}_{1}^{NC}(x,Q^2)\;, \nonumber \\
{\cal A}_{2}(x,Q^2)
&=& \frac{1}{yS}
{\cal F}_{2}^{NC}(x,Q^2)\;, \nonumber \\
{\cal A}_{3}(x,Q^2)
&=& { 1 \over 2y S }
{\cal F}_{3}^{NC}(x,Q^2)\;,\nonumber
\end{eqnarray}
with
\begin{eqnarray}
{\cal F}_{1,2}^{NC}(x,Q^2)
&=& F_{1,2}(x,Q^2) + 2 |Q_{e}| \left( v_{e} + \lambda a_e \right)
\chi(Q^2) G_{1,2}(x,Q^2) \nonumber \\
& &+~\left( v_{e}^{2} + a_{e}^{2} + 2 \lambda v_e a_e \right)
\chi^2(Q^2) H_{1,2}(x,Q^2), \nonumber \\
{\cal F}_3^{\rm{NC}}(x,Q^2)
&=& -2sign(Q_e)\left \{|Q_{e}|
\left( a_{e} + \lambda v_e \right) \chi(Q^2) G_{3}(x,Q^2)\right. \nonumber \\
& & \left.+\left[~2 v_{e} a_e + \lambda \left(v_\e^2 + a_\e^2 \right) \right]
\chi^2(Q^2) H_{3}(x,Q^2)\right \}, \nonumber
\label{f123}
\end{eqnarray}
\noindent
Here structure functions $F_i$, $G_i$ and $H_i$ describe the hadronic tensor
respectively for the $\gamma$, $\gamma-Z$ and $Z$ exchange,
$\lambda=\xi Q_e / |Q_e|$, $\xi$ is the lepton beam polarisation,
$v_{e}$ and $a_{e}$ are the vector and axial-vector
couplings of the lepton to the $Z$~boson:
\begin{eqnarray*}
v_{e}=1-4 |Q_{e}| \sin^{2}\theta_{W}, \hspace{1.cm} a_{e}=1, \nonumber
\end{eqnarray*}
$\theta_{W}$ is the weak mixing angle, $Q_{e}$ is the lepton charge,
$Q_{e}=-1$, and
\begin{eqnarray*}
\chi = \chi (Q^2) =
{G_\mu \over\sqrt{2}}~{M_{Z}^{2} \over{8\pi\alpha}}~{Q^2 \over
{Q^2+M_{Z}^{2}}}, \nonumber \\
\end{eqnarray*}
with the Fermi constant, $ G_{\mu}= 1.16639\cdot 10^{-5}\,{\rm GeV}^{-2}$.
\medskip
The `radiator' functions ${\cal S}_i$ and the function ${\cal F}^{IR}$ are:
\begin{eqnarray}
{\cal S}_{1}(y,Q^2,y_h,Q^2_h)
&=& \Biggl\{ \frac{1}{\sqrt{C_{2}}} \left[ \frac{Q^2_h-Q^2}{2}
+\frac{(Q^2+2m^{2})(Q^2_h-2m^{2})}{Q^2_h-Q^2} \right] \nonumber \\
& &- m^{2}(Q^2_h-2m^{2})\frac{B_{2}}{C_{2}^{3/2}} \Biggr\}
- \Biggl\{ S \leftrightarrow - X \Biggr\} +\frac{1}{\sqrt{A_2}}, \nonumber \\
{\cal S}_{2}(y,Q^2,y_h,Q^2_h)
&=& \Biggl\{\frac{1}{\sqrt{C_{2}}} [ M^{2}(Q^2_h + Q^2)-
XS_h] \nonumber \\
& & +
\frac{1}{(Q^2_h-Q^2)\sqrt{C_{2}}} \Biggl[ Q^2_h[S(S-S_h)+X(X+S_h)
- 2M^{2}(Q^2_h+2m^2) ] \nonumber \\
& & + 2m^{2} [(S-S_h)(X+S_h)+SX] \Biggr]
\nonumber \\
& &-2m^{2} \frac{B_{2}}{C_{2}^{3/2}} [S(S-S_h)-M^{2}Q^2_h ] \Biggr\}
- \Biggl\{ S \leftrightarrow -X \Biggr\}
-\frac{2M^{2}}{\sqrt{A_2}}, \nonumber \\
{\cal S}_{3}(y,Q^2,y_h,Q^2_h) &=& \Biggl\{\frac{1}{\sqrt{C_{2}}}
\Biggl[\frac{2 Q^2_h ( Q^2_h+2m^{2})(S+X)}{Q^2_h-Q^2}
- 2XQ^2_h - S_h(Q^2_h+Q^2)\Biggr] \nonumber \\
& & - 2m^{2} Q^2_h\frac{B_{2}}{C_{2}^{3/2}} (2S-S_h)\Biggr\}
+~\Biggl\{ S \leftrightarrow -X \Biggr\}, \nonumber \\
{\cal F}^{IR}(y,Q^2,y_h,Q^2_h) &=&
\frac{Q^2+2m^{2}}{Q^2-Q^2_h}
\Biggl( \frac{1}{\sqrt{C_{1}}}-\frac{1}{\sqrt{C_{2}}} \Biggr)\
-m^{2} \Biggl(\ \frac{B_{1}}{C_{1}^{3/2}}+
\frac{B_{2}}{C_{2}^{3/2}} \Biggr)\ ,
\nonumber
\end{eqnarray}
where
\begin{eqnarray}
A_2 &=& \lambda_l \:\equiv \:A_1, \nonumber \\
B_2 &=& 2 M^2 Q^2 ( Q^2- Q^2_h ) + X(S_lQ^2_h-S_hQ^2)
\nonumber \\
& & +~SQ^2(S_l-S_h) \:\equiv \:-B_1
(S \leftrightarrow - X), \nonumber \\
C_2 &=& [XQ^2_h-Q^2(S-S_h)]^2 + 4m^2
\left[(S_l-S_h)(S_lQ^2_h-S_hQ^2) \right. \nonumber \\
& &\left. -~M^2(Q^2_h - Q^2 )^2\right]
\:\equiv \:C_1 [S \leftrightarrow -X ],
\nonumber
\end{eqnarray}
with
\begin{eqnarray*}
X=S(1-y) &=& 2ME' \\
S_h &=& Sy_h \\
\end{eqnarray*}
and the hadron defined variables, $x_h, y_h$ are given by the following
equations
\begin{eqnarray*}
M_h^2&=&M^2+Sy_h(1-x_h) \\
Q^2_h&=&Sx_hy_h \\
\end{eqnarray*}
\medskip\medskip\medskip
\section {Appendix C}
\medskip\medskip
Below we list the relations between the variables used in the MT (Appendix A)
and in the D schemes (Appendix B):
\begin{eqnarray*}
\mbox{\rm Dubna scheme} & & \mbox{Mo and Tsai scheme}\\
E & & E_s \\
E' & & E_p \\
S_l=Sy & & 2M\nu \\
Q^2_h & & -q^2 \\
\lambda_l=S_l^2 + 4M^2Q^2 & & \left(2M\mid\overline u\mid\right)^2 \\
B_2\over{2\lambda_l}& & a \\
C_2\over{4\lambda_l}& &- x^2 \\
M_h & & M_j \\
{2M\over{\left(\lambda_l\right)^{1/2}}}\left(E - E' +
M\left({Q^2_h-Q^2}\over{Q^2_h-S_l}\right)\right)&& \cos\theta_k \\
{dQ^2_h}\over{Q^4_h\lambda_l^{1/2}}&&{\D\left(\cos\theta_k\right)\omega}\over
{q^4\left(u_0 - \mid\overline u\mid \cos\theta_k\right)} \\
\end{eqnarray*}
\frenchspacing
\medskip\medskip\medskip
|
1,314,259,995,035 | arxiv | \section{Introduction}
Molecular hydrogen (H$_2$) is the most abundant molecule in the
universe and is closely linked to star formation via the star
formation surface density rate -- molecular gas surface density
relation \citep{Bigiel08}. Measuring rest-frame UV rotational
transitions from the Lyman and Werner bands in absorption against a
bright background continuum is one of the few ways to directly measure
H$_2$ \citep[see][for example]{Draine11}. This technique probes
diffuse gas with molecular fractions, \hbox{$f_{\textrm{H}_2}$}, of $\sim 10^{-6}$ to
$\sim 0.1$ -- denser molecular clouds are both dusty, and thus likely
to extinguish UV light from a background source, and compact, such
that there is a low probability of intersection with a sightline to a
background light source \citep[][]{Hirashita03, Zwaan06}. However, the
lower molecular fraction systems that are detected give valuable
insights into the environments and physical mechanisms necessary for
the formation of H$_2$. With this technique we can measure the
physical properties of cool, dense gas over a large fraction of the
age of the Universe, from the interstellar medium in the solar
neighbourhood to proto-galaxies a few Gyr after the big bang.
Since the initial detection towards the UV bright star $\xi$ Persei
\citep{Carruthers70}, a large sample of sightlines exhibiting H$_2$ in
absorption from the Milky Way and its halo has been assembled. These
observations have characterised H$_2$ in diffuse molecular gas in the
Milky Way plane \citep{Savage77}, the Magellanic clouds
\citep{Tumlinson02, Welty12}, high latitude sightlines out of the
Milky Way plane \citep{Gillmon06, Wakker06}, in intermediate and high
velocity clouds \citep[IVCs and HVCs,][]{Richter03b,Richter99}, and in
the Magellanic Stream \citep{Sembach01, Richter01b}. A physical
picture where H$_2$ formation occurs predominantly on the surface of
dust grains \citep{Shull82} in clouds with total densities of $n\sim
10-100$\,\mbox{cm$^{-3}$}\ illuminated by the local UV radiation has been
successful in reproducing both the observed H$_2$ rotational
population levels and molecular fractions in the Milky Way (for
example \citealt{Spitzer74}; \citeauthor{Jura75b}
\citeyear{Jura75a},b) and the Magellanic clouds \citep{Tumlinson02}.
H$_2$ has also been measured at redshifts $1.5 - 4.5$, corresponding
to lookback times of $\sim 9 - 12$\ Gyr, in damped
\mbox{Ly$\alpha$}\ (\hbox{$N_\textsc{H\,i}$}\ $>10^{20.3}$\,\mbox{cm$^{-2}$}, DLA) and sub-damped
\mbox{Ly$\alpha$}\ ($10^{19}$\,\mbox{cm$^{-2}$}\,$\lesssim$~\hbox{$N_\textsc{H\,i}$}~$\lesssim 10^{20.3}$\,\mbox{cm$^{-2}$},
sub-DLA) absorption systems seen towards bright background QSOs. In
this redshift range, absorption features from \hbox{{\rm H}{\sc \,i}}\ and sometimes H$_2$
are redshifted into the optical range, making them relatively easy to
detect with large ground-based telescopes. The first unambiguous
detection in a redshifted absorber was made by \citeauthor{Foltz88}
\citep[1988, see also][]{Levshakov85}, and since then at least $16$
further such systems have been discovered \citep[for example][]{Ge97,
Ge01, Levshakov02, Cui05, Ledoux06, Noterdaeme07}. Approximately
$10$ per cent of DLAs have a molecular fraction \hbox{$f_{\textrm{H}_2}$}\,$>10^{-4.5}$,
and these tend to be more metal rich and dustier \citep{Ledoux03}, and
have higher velocity widths \citep{Noterdaeme08} than DLAs without
detectable H$_2$. Several physical diagnostics are available to
measure the properties of the H$_2$ absorbing gas. Some H$_2$ systems
also show absorption from the CO molecule, revealing the presence of a
cold, dense core of gas with excitation temperatures consistent with
those expected from the cosmic microwave background
\citep[for example][]{Srianand08, Noterdaeme09}. The H$_2$ rotational level
populations and \hbox{{\rm C}{\sc \,i}}\ fine structure transitions can also be used to
measure particle densities. They are generally found to be similar to
those measured along local sightlines in the Milky Way ($\sim
10-100$\,\mbox{cm$^{-3}$}), but the ambient UV field, gas temperatures and gas
pressures tend to be higher \citep{Hirashita05, Srianand05}.
No studies currently exist of H$_2$ at lower redshifts, $z < 1.5$,
outside the Milky Way halo. Until recently, the low number of DLA and
sub-DLA systems known at low redshifts, together with the smaller
light gathering power of spaced-based UV telescopes compared to large
aperture ground-based optical telescopes, have made observing the
Lyman-Werner bands in this redshift range impractical. However, with
the availability of the far-UV sensitive Cosmic Origins Spectrograph
(COS) on the Hubble Space Telescope (HST), molecular absorption can
now be effectively detected for $0.1 \lesssim z \lesssim 0.8$.
In this paper we report the serendipitous detection of H$_2$ in a
sub-DLA at $z=0.56$, the first such system analysed at a redshift
below $1.5$ beyond the Milky Way halo. It has a high molecular
fraction given the total cloud neutral hydrogen column density, and we
show that the associated metal absorption features seen require the
presence of three phases: a cold $T\sim 100$\,K phase analogous to the
cold neutral medium observed in the Milky Way's interstellar medium
(ISM); a partially-ionised $T\sim 10^4$\,K phase, similar to the warm
neutral medium in the ISM; and a warmer, probably collisionally
ionised phase. Based on the cloud properties we argue the absorber is
likely caused by a tidally-stripped absorbing structure similar to the
Magellanic Stream embedded in a warm halo $\sim 10$\,kpc from a nearby
galaxy.
The layout of the paper is as follows. Section 2 describes the data
used; Section 3 describes how we identified lines and measured the
absorption line properties; and Section 4 describes the properties of
the H$_2$ absorption and the sub-DLA. We compare to theoretical models
and discuss our results in Section 5, and summarise the main results
of the paper in Section 6. When not explicitly shown logarithms are to
base 10, and we use a 7-year WMAP cosmology
\citep[$H_0=70.4$~\mbox{km~s$^{-1}$}$\,$Mpc$^{-1}$, $\Omega_\mathrm{M}=0.272$,
$\Omega_\Lambda=0.728$;][]{Komatsu11} where necessary. We use
transition wavelengths and oscillator strengths given by
\citet{Morton76, Morton03} and \citet{Verner94}, and H$_2$ transition
wavelengths from \citet{Bailly10}.
\section{Data}
Transitions from the sub-DLA are measured in absorption against the
continuum from the background QSO, Q~0107$-$0232, at $z_{\rm qso}=0.728$
(see Table~\ref{t_qso}). This was discovered by the Large Bright
Quasar survey \citep{Hewett95} and is one of a group of three bright
QSOs with small angular separations on the sky. Spectra of these QSOs
taken using the Faint Object Spectrograph (FOS) on the HST have been
used to measure correlations in neutral hydrogen absorption
\citep{Young01, Petry06} and in absorption with galaxy positions
\citep{Crighton10} across the three sightlines.
Here we present higher resolution far UV spectra of Q~0107$-$0232
taken with the Cosmic Origins Spectrograph on the HST, and an optical
spectrum taken with the High Resolution Echelle Spectrograph (HIRES)
on Keck I. In our analysis we also make use of $K$ band imaging of the
QSO and archival UV FOS spectra. The FOS spectra were originally
published by \citet{Young01}. We employ the combined spectrum used by
\citet{Crighton10}, covering a wavelength range of
$1572-2311\,$\AA\ at a typical signal to noise (S/N) of $31$ per
$4\,$\AA\ resolution full width at half maximum intensity (FWHM).
\begin{table}
\addtolength{\tabcolsep}{-1.5pt}
\begin{center}
\begin{tabular}{cccccc}
Name & R.A. (J2000) & Dec. (J2000) & $z_{\rm em}$ & $R$-mag \\
\hline
Q~0107$-$0232 & $01^\mathrm{h}10^\mathrm{m}14.43^\mathrm{s}$
& $-02\degr16\arcmin57.6\arcsec$ & $0.728$ & $18.4$ \\
\hline
\end{tabular}
\caption{\label{t_qso} Properties of the background QSO towards which
the sub-DLA is seen. Columns show the coordinates, emission redshift
(measured from \hbox{{\rm Mg}{\sc \,ii}}\ $\lambda\lambda\ 2296, 2803$ emission in the
HIRES spectrum) and $R$ band magnitude.}
\end{center}
\end{table}
\begin{table}
\addtolength{\tabcolsep}{-2.pt}
\begin{center}
\begin{tabular}{cccccc}
\hline
Dataset & Date obs. & Exp. Time (s) & Grating & $\lambda_{\rm c}$ (\AA) \\
\hline
LB5H12010 & 6 Nov 2010 & $13905.728$ & G160M & $1589$ \\
LB5H13010 & 18 Nov 2010 & $13905.472$ & G160M & $1589$ \\
LB5H11010 & 19 Nov 2010 & $13905.376$ & G160M & $1589$ \\
LB5H14010 & 24 Nov 2010 & $13905.536$ & G160M & $1623$ \\
LB5H15010 & 26 Nov 2010 & $13905.504$ & G160M & $1623$ \\
LB5H16010 & 7 Dec 2010 & $13905.472$ & G160M & $1623$ \\
\hline
\end{tabular}
\caption{\label{t_COS} Observations of Q~0107$-$0232 with
HST/COS. Columns show the HST archive dataset name, the date
observed, total exposure time, grating and central wavelength
setting used.}
\end{center}
\end{table}
\subsection{COS spectra reduction}
The COS spectra were obtained over a period from the 6th of November
to the 7th of December 2010, as part of the Cycle 17 proposal
11585. They represent a total exposure time of $23$ hours across $30$
orbits. Two central wavelength settings were taken with the G160M
grating, each using $4$ FP-POS positions to enable complete wavelength
coverage from $1380$ to $1850$~\AA. Details of the exposures are given
in Table~\ref{t_COS}.
We used the CALCOS pipeline\footnote{Version 2.13.6,
\url{http://www.stsci.edu/hst/cos/pipeline/}} to perform background
subtraction, wavelength calibration and extraction. The default
background extraction smoothing scale of $100$ pixels resulted in poor
background subtraction for our spectra, presumably because the
pipeline was optimised for brighter targets. We found that changing
BWIDTH in the XTRACTAB calibration table from the default value of 100
to $20$ significantly improved the background level such that the flux
in strongly saturated features broader than the COS instrument line
spread profile was consistent with zero.
Wavelength shifts are expected between visits and different wavelength
settings due to temperature differences and uncertainty in the
telescope pointing. The S/N in individual exposures is generally too
low ($\sim 2$ per pixel) to reliably measure the centres of absorption
features. Therefore we combined subsets of exposures grouping by
FP-POS position, by visit (corresponding to a single dataset name in
Table~\ref{t_COS}), by grating central wavelength and by FUV segment
to search for any shifts. Wavelength solutions were consistent across
different visits and FP-POS values, but there are significant
wavelength-dependent shifts between different central wavelength
settings. To correct these, we measured the centroid for common narrow
absorption features where two wavelength settings overlapped, and used
these centres to calculate a wavelength offset as a function of
position. We fitted these offsets with a linear dependence on
wavelength, and then corrected for them such that FUV segment A
$\lambda_c=1627$ matched the FUV segment A $\lambda_c=1589$ setting,
and FUV segment B $\lambda_c=1589$ matched the FUV segment B
$\lambda_c=1627$ setting. The largest shifts applied in this way were
$0.1$ \AA, corresponding to $20$~\mbox{km~s$^{-1}$}, but they could result in a
$\sim40$ \mbox{km~s$^{-1}$}\ internal shift between the shortest and longest
wavelengths of an exposure. These shifts are given in
Table~\ref{t_wac_shifts}.
The scores of H$_2$ absorption features distributed across the full
spectral range enable a further check of the internal consistency of
the wavelength solution. By measuring the centroid of these features
and comparing to a single-component model of H$_2$ absorption, we
discovered an additional wavelength-dependent shift (shown in
Table~\ref{t_H2_shifts}). The magnitude of this shift is smaller
($\sim 5$ \mbox{km~s$^{-1}$}) than that applied above, but still significant when
fitting an absorption system with transitions spread across a large
wavelength range. We removed this shift by subtracting a cubic spline
fitted to the offsets as a function of wavelength position from the
wavelength scale.
To match the zero points of the COS and HIRES wavelength scales, we
compared the \hbox{{\rm N}{\sc \,ii}}~$1084$ and \hbox{{\rm C}{\sc \,i}}~$945$ features from the $z=0.56$
system in the combined COS spectrum to their expected positions from
the redshifts of the \hbox{{\rm Fe}{\sc \,ii}}\ and \hbox{{\rm Mg}{\sc \,ii}}\ lines from the same system
measured in the HIRES spectrum. The wavelength zero point of the HIRES
spectrum is known to better than 1 \mbox{km~s$^{-1}$}\ relative to narrow Galactic
\hbox{{\rm Ca}{\sc \,ii}}\ absorption features seen in the spectrum. Both the \hbox{{\rm N}{\sc \,ii}}\ and
\hbox{{\rm C}{\sc \,i}}\ appear at redshifts expected from the HIRES \hbox{{\rm Fe}{\sc \,ii}}\ and
\hbox{{\rm Mg}{\sc \,ii}}\ redshifts, and \hbox{{\rm N}{\sc \,ii}}\ shows a similar component structure
(albeit at the lower COS resolution). We conclude that no correction
to the wavelength zero point of the combined COS spectrum is
necessary.
We also measured the redshift of Galactic absorption features in the
COS spectrum to confirm the zero point of the wavelength solution was
correct. These are all saturated and possibly contain multiple
components, so do not provide a stringent constraint on the zero
point. However, they show no evidence of a systematic offset.
After correcting each exposure for these wavelength shifts we made a
combined spectrum in the following way. First we rebinned each
exposure to a single wavelength scale with pixel width $0.0367$ \AA,
ensuring Nyquist sampling. We used nearest-neighbour binning to
preserve the spectra's noise properties, and checked that this did not
introduce any significant wavelength shifts. The $1\sigma$ uncertainty
on each pixel was estimated empirically as the standard error on the
mean of the contributing pixel fluxes. This is a slight overestimate
of the true uncertainty, as the exposure times were not all
identical. However, the uncertainties measured in this way are
consistent with the standard deviation of the flux in regions free
from absorption, and we believe this is a good estimate of the true
uncertainty.
Since the background level of the COS spectra is low, at small source
count rates the flux distribution may be better described by Poisson
rather than Gaussian statistics. However, in practice we find that for
regions of our spectra with the lowest number of counts -- the cores
of saturated profiles -- uncertainties in the background levels from
the many contributing exposures makes a Gaussian flux distribution a
good approximation.
Finally we estimated the unabsorbed continuum level of the combined
spectrum by fitting spline segments joining regions that appeared free
from absorption. The resulting combined spectrum has a S/N of $10$ per
$\sim 20$~\mbox{km~s$^{-1}$}\ resolution element at the continuum and covers a
wavelength range from $1380$ to $1850$~\AA.
\subsection{HIRES spectra reduction}
The HIRES observations were performed on the night of 4th of August
2011. Four 1800\,s exposures were taken using the red cross-disperser
and a 0.861\arcsec\ width slit. Two wavelength settings were used to
cover gaps in the detector. We used MAKEE to process each exposure,
which subtracts the bias level and the sky background, corrects for
the echelle blaze, generates a wavelength solution by identifying arc
lines to yield a mapping from pixel number to wavelength for each
echelle order, and extracts one-dimensional spectra for each echelle
order. We then used custom-written Python code to coadd the individual
orders for each exposure into a combined spectrum, and to infer the
unabsorbed continuum level by fitting spline segments to regions free
from absorption. The final combined spectrum has a S/N at
$5000$\AA\ of $33$ per $6.67$~\mbox{km~s$^{-1}$}\ resolution FWHM, and covers a
wavelength range $3890$ to $8330$~\AA.
\subsection{Imaging}
We acquired $K$ band imaging of a 7\arcmin\,$\times$\,7\arcmin\ field
around Q~0107$-$0232 using the High Acuity Wide field K-band Imager
(HAWK-I) on the Very Large Telescope (VLT) during program
383.A-0402. Five $180\,$s exposures were taken at four offset
positions on the 15th of September 2009. We used the HAWK-I pipeline
recipes to process each exposure to remove the bias level and correct
for sensitivity variations using a flat-field. An astrometric
solution was measured for each exposure using \textsc{Scamp}
\citep{Bertin06}, then resampled to a common world coordinate system
and coadded all the exposures with \textsc{Swarp} \citep{Bertin02}. We
determined the conversion between the measured counts and the
magnitude by comparison to 2MASS magnitudes for objects in the
field. The limiting magnitude reached is $\sim 23.5$ mag (AB) for a
$3\sigma$ detection of a point source.
\section{Analysis}
\subsection{Line identification}
Most of the transitions associated with the sub-DLA fall inside the
\mbox{Ly$\alpha$}\ forest of the background QSO, and many are blended with
absorption at different redshifts. We identified each absorption
feature in the COS and FOS spectra in the following way. We first
searched for Galactic absorption at the wavelengths of transitions
typically seen in the Galactic interstellar medium (ISM;
\hbox{{\rm Si}{\sc \,ii}}\ $\lambda1526$, \hbox{{\rm C}{\sc \,iv}}\ $\lambda\lambda1548,\,1550$,
\hbox{{\rm Fe}{\sc \,ii}}\ $\lambda1608$, \hbox{{\rm C}{\sc \,i}}\ $\lambda1657$, and
\hbox{{\rm Mg}{\sc \,i}}\ $\lambda2026$/\hbox{{\rm Zn}{\sc \,ii}}\ $\lambda2026$ were present\footnote{There
is also absorption at the expected position of \hbox{{\rm Zn}{\sc \,i}}\ $\lambda2139$,
redwards of the QSO \mbox{Ly$\alpha$}\ emission. However, since this line is only
observed in sightlines with \hbox{$N_\textsc{H\,i}$}$\gtrsim 10^{21}$~\mbox{cm$^{-2}$}\ in the Milky
Way ISM (Daniel Welty, private communication), we identify it as
\hbox{{\rm N}{\sc \,v}}\ $\lambda1238$ near the QSO redshift.}). Then we identified
systems by the presence of either \hbox{{\rm C}{\sc \,iv}}\ ($\lambda\lambda1548,\ 1550$),
\hbox{{\rm O}{\sc \,vi}}\ ($\lambda\lambda1032,\ 1038$), or \hbox{{\rm H}{\sc \,i}}\ \mbox{Ly$\alpha$}\ and \mbox{Ly$\beta$}, starting
at the emission redshift of the QSO and moving down in redshift to
$z=0$. Once these systems were identified, we searched for any
further associated metal transitions such as \hbox{{\rm Si}{\sc \,iv}}, \hbox{{\rm Si}{\sc \,iii}}, \hbox{{\rm Si}{\sc \,ii}},
\hbox{{\rm C}{\sc \,iii}}, \hbox{{\rm C}{\sc \,ii}}. We found it was necessary to iterate this process
several times, each time including line IDs from previous runs.
The $z=0.56$ sub-DLA was previously identified by \citet{Crighton10}
by its many associated strong metal transitions in the FOS spectrum.
Once we had made plausible identifications for lines at redshifts
other than the $z=0.56$ sub-DLA, we identified metals and molecular
absorption lines from the Lyman and Werner bands for this
system. Finally we assumed any remaining unidentified absorption
features were \mbox{Ly$\alpha$}. For this paper we focus on absorption features
associated with the $z=0.56$ system. Absorbers at different redshifts
are used only to identify blends with transitions from the sub-DLA.
\subsection{Kinematics and velocity structure of the sub-DLA}
H$_2$ is expected to be found in gas with temperatures less than $\sim
5000$~K -- at higher temperatures molecules are destroyed through
collisional excitation \citep{Shull82}. Therefore we expect the H$_2$
absorption features to be narrow, $<10$~\mbox{km~s$^{-1}$}, and the COS spectra will
not resolve the H$_2$-bearing components. H$_2$ components do not
necessarily coincide with the strongest \hbox{{\rm H}{\sc \,i}}\ or metal line positions
\citep[for example][]{Petitjean02,Noterdaeme10}. However, we use transitions
covered by the higher resolution HIRES spectrum to inform us about the
velocity structure of the absorbing gas, and apply this to H$_2$ and
other transitions only present in the UV spectra.
Figure \ref{f_HIRES} shows the transitions at $z=0.56$ detected in the
HIRES spectrum: \hbox{{\rm Mg}{\sc \,ii}}\ ($\lambda\lambda2796,\ 2803$),
\hbox{{\rm Mg}{\sc \,i}}\ ($\lambda2853$), \hbox{{\rm Ca}{\sc \,ii}}\ ($\lambda\lambda3934,\ 3969$), and
\hbox{{\rm Fe}{\sc \,ii}}\ ($\lambda\lambda2586,\ 2600$). We also measure upper limits on
\hbox{{\rm Al}{\sc \,i}}, \hbox{{\rm Fe}{\sc \,i}}, \hbox{{\rm Ca}{\sc \,i}}, \hbox{{\rm Na}{\sc \,i}}, \hbox{{\rm Ti}{\sc \,ii}}, and \hbox{{\rm Mn}{\sc \,ii}}. We fitted velocity
components and column densities to these transitions using
\textsc{vpfit}\footnote{\url{http://www.ast.cam.ac.uk/~rfc/vpfit.html}}. The
best-fitting values are given in Table~\ref{t_voigt}. A single common
velocity structure spanning $\sim 200$~\mbox{km~s$^{-1}$}\ provides a good fit to
all of these transitions, assuming line broadening is dominated by
Gaussian turbulent motions rather than the gas temperature. The best
fitting model is shown in Figure \ref{f_HIRES}. \hbox{{\rm Ca}{\sc \,ii}}\ and \hbox{{\rm Mg}{\sc \,i}}\ have
the lowest ionisation energies ($11.87$ and $7.65$\,eV respectively),
and so {\it a priori} we might expect them to be associated with the
cold environment where H$_2$ is found. However, the photoionisation
analysis in Section~\ref{s_cloudy} indicates that most of the
\hbox{{\rm Mg}{\sc \,i}}\ and much of the \hbox{{\rm Ca}{\sc \,ii}}\ probably arises in diffuse, photoionised
gas distinct from the H$_2$.
Component 6 has a Doppler width $b$ ($\equiv \sqrt{2} \sigma$) of
$20$\,\mbox{km~s$^{-1}$}, larger than is usually observed in low-ionisation metal
transitions. This, together with the suggestion of correlated
residuals in \hbox{{\rm Mg}{\sc \,ii}}\ near the position of this component suggests it is
in fact a blend of two or more narrower components. The quality of
even the HIRES data is not sufficient to constrain the parameters of
such heavily blended components. However, as long as the distribution
of unresolved component widths is not strongly bimodal, the column
density estimates for this component should be accurate
\citep{Jenkins86}. We also measure \hbox{$N_{\textrm{H}_2}$}\ independently of the
velocity model assumed for H$_2$ in Section \ref{s_fhtwo} to ensure
that the velocity model does not strongly bias our measurement of the
molecular fraction.
\begin{table}
\addtolength{\tabcolsep}{-3.8pt}
\begin{center}
\begin{tabular}{clccccccc}
\hline
\# & Ion & $\Delta v$ & $\log\,N$& $\sigma_{\log\,N}$ & $b$ & $\sigma_b$ & $z$ & $\sigma_z$ \\
& & {\footnotesize (km~s$^{-1}$)} & \multicolumn{2}{c}{\footnotesize ($N$ in cm$^{-2}$)} & \multicolumn{2}{c}{\footnotesize (km~s$^{-1}$)} & & {\footnotesize$\times 10^6$} \\
\hline
1 & \hbox{{\rm Fe}{\sc \,ii}} &$-110$& $<12.81$& & 6.98 & 1.20 & 0.5567157 & 3.7 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 10.72 & 0.25 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 12.02 & 0.05 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & $<11.48$& & & & & \\[\smallskipamount]
2 & \hbox{{\rm Fe}{\sc \,ii}} & $-93$& 12.18 & 0.11 & 2.05 & 0.70 & 0.5568053 & 2.0 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 10.27 & 0.54 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 12.25 & 0.08 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & $<11.40$& & & & & \\[\smallskipamount]
3 & \hbox{{\rm Fe}{\sc \,ii}} & $-72$& 12.84 & 0.04 & 11.65 & 0.86 & 0.5569132 & 2.4 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.08 & 0.14 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 13.02 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 11.32 & 0.12 & & & & \\[\smallskipamount]
4 & \hbox{{\rm Fe}{\sc \,ii}} & $-50$& 13.09 & 0.03 & 6.94 & 0.54 & 0.5570298 & 1.5 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.41 & 0.06 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 13.11 & 0.03 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 11.54 & 0.06 & & & & \\[\smallskipamount]
5 & \hbox{{\rm Fe}{\sc \,ii}} & $-26$& 12.94 & 0.09 & 9.43 & 1.04 & 0.5571530 & 3.5 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.39 & 0.09 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 13.16 & 0.06 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 11.22 & 0.18 & & & & \\[\smallskipamount]
6 & \hbox{{\rm Fe}{\sc \,ii}} & 0 & 13.68 & 0.02 & 19.81 & 0.83 & 0.5572885 & 4.6 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.84 & 0.04 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 13.54 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 12.10 & 0.03 & & & & \\[\smallskipamount]
7 & \hbox{{\rm Fe}{\sc \,ii}} & +44 & 12.81 & 0.04 & 6.51 & 0.40 & 0.5575174 & 1.2 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.24 & 0.08 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 12.72 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 11.05 & 0.17 & & & & \\[\smallskipamount]
8 & \hbox{{\rm Fe}{\sc \,ii}} & +68 & 13.42 & 0.02 & 8.41 & 0.25 & 0.5576435 & 0.8 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 11.60 & 0.04 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 13.25 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & 11.57 & 0.06 & & & & \\[\smallskipamount]
9 & \hbox{{\rm Fe}{\sc \,ii}} & +100 & 12.20 & 0.10 & 5.08 & 0.76 & 0.5578081 & 2.3 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 10.74 & 0.25 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 12.13 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & $<11.44$& & & & & \\[\smallskipamount]
10 & \hbox{{\rm Fe}{\sc \,ii}} &+124 & 12.51 & 0.08 & 9.74 & 0.56 & 0.5579336 & 1.8 \\
& \hbox{{\rm Mg}{\sc \,i}} & & 10.15 & 1.03 & & & & \\
& \hbox{{\rm Mg}{\sc \,ii}} & & 12.57 & 0.02 & & & & \\
& \hbox{{\rm Ca}{\sc \,ii}} & & $<11.56$& & & & & \\
\hline
\end{tabular}
\caption{\label{t_voigt} Best-fitting Voigt profile parameters for
each component in transitions for the $z=0.56$ system that are
covered by the HIRES spectrum.}
\end{center}
\end{table}
\begin{figure*}
\begin{center}
\includegraphics[width=0.92\textwidth]{./fig_HIRES.pdf}
\caption{\label{f_HIRES} Observed transitions and upper limits at
$z=0.56$ in the HIRES spectrum. The thin smooth curve shows our
best-fitting model using a single common velocity structure. The
darker tick marks show components 5 \& 6 that have associated
H$_2$. Points distributed around $-0.25$ show the residuals, defined
as (flux $-$ model) / ($1 \sigma$ error in flux). The two thin lines
above and below this distribution mark the $1\sigma$ deviation
levels. Only upper limits are measured for \hbox{{\rm Ca}{\sc \,i}}\ and \hbox{{\rm Fe}{\sc \,i}}.}
\end{center}
\end{figure*}
\subsection{UV transitions for the sub-DLA}
We apply the \hbox{{\rm Mg}{\sc \,ii}}\ velocity structure to models fitted to transitions
observed in the lower resolution COS and FOS spectra. Using this
velocity structure we were able to match the \hbox{{\rm N}{\sc \,ii}}, \hbox{{\rm Si}{\sc \,ii}}\ and
\hbox{{\rm O}{\sc \,i}}\ profiles by varying the component line widths and column
densities. Several of the COS transitions that have measurable
absorption and are not saturated or heavily blended with unrelated
systems are shown in Figure~\ref{f_COS}. When fitting the COS spectra
we use the tabulated line spread function provided by
STScI\footnote{\url{http://www.stsci.edu/hst/cos/performance/spectral_resolution/}},
linearly interpolated to the wavelength at the centre of each fitting
region. We also measured column densities using the apparent optical
depth (AOD) method (which assumes the transition is optically thin,
\citealt{Savage91}), including a $5$ per cent uncertainty in the
continuum level. As the individual components are not resolved by the
COS spectra, we quote these AOD measurements and give the total column
densities for all components in aggregate. For transitions \hbox{{\rm C}{\sc \,i}}, \hbox{{\rm N}{\sc \,ii}},
\hbox{{\rm O}{\sc \,i}}, \hbox{{\rm O}{\sc \,vi}}\ we were able to directly compare column densities measured
using both Voigt profile fitting and the AOD method. In each case they
are consistent with one another. \hbox{{\rm C}{\sc \,ii}}, \hbox{{\rm C}{\sc \,iii}}\ and \hbox{{\rm Si}{\sc \,iii}}\ are
saturated, and lower limits are measured using the AOD method. The FOS
spectrum provides an upper limit on $N_\textrm{\hbox{{\rm Si}{\sc \,iv}}}$. Table~\ref{t_logN_UV} gives measurements and uncertainties,
lower and upper limits for all of the transitions in the UV spectra.
The damping wings measured at \mbox{Ly$\alpha$}\ in the FOS spectrum constrain
\hbox{$N_\textsc{H\,i}$}\ $=10^{19.5 \pm 0.2}$~\mbox{cm$^{-2}$}, where the error is dominated by the
systematic uncertainty in the continuum level (see
Figure~\ref{f_NHI}).
\begin{table}
\renewcommand{\arraystretch}{1.2}
\begin{center}
\begin{tabular}{ccc}
\hline
Ion & Transition $\lambda$ (\AA) & $\log_{10} N$ (cm$^{-2}$) \\
\hline
\hbox{{\rm H}{\sc \,i}} & 1215 & $19.50^{+0.20}_{-0.20}$ \\
\hbox{{\rm C}{\sc \,i}} & 945 & $13.53^{+0.24}_{-0.77}$ \\
\hbox{{\rm C}{\sc \,ii}} & 1036 & $> 14.8$ \\
\hbox{{\rm C}{\sc \,iii}} & 977 & $> 14.3$ \\
\hbox{{\rm N}{\sc \,i}} & 1135 & $< 14.4$ \\
\hbox{{\rm N}{\sc \,ii}} & 1084 & $14.73^{+0.17}_{-0.19}$ $^{\mathrm{a}}$ \\
\hbox{{\rm O}{\sc \,i}} & 1039 & $15.53^{+0.24}_{-0.25}$ \\
\hbox{{\rm O}{\sc \,vi}} & 1031 & $14.60^{+0.16}_{-0.24}$ \\
\hbox{{\rm Si}{\sc \,ii}} & 1020 & $14.79^{+0.23}_{-0.64}$ \\
\hbox{{\rm Si}{\sc \,iii}} & 1206 & $> 13.7$ \\
\hbox{{\rm Si}{\sc \,iv}} & 1393 & $< 13.1$ \\
\hline
\end{tabular}
\caption{\label{t_logN_UV} Total column densities for transitions in
the $z=0.56$ system observed in the COS and FOS spectra. \hbox{$N_\textsc{H\,i}$}\ is
calculated from the damping wings at \mbox{Ly$\alpha$}\ in the FOS
spectrum. \hbox{{\rm C}{\sc \,ii}}, \hbox{{\rm C}{\sc \,iii}}\ and \hbox{{\rm Si}{\sc \,iii}}\ are saturated, and lower limits
are calculated using the AOD method. The \hbox{{\rm N}{\sc \,i}}\ and \hbox{{\rm Si}{\sc \,iv}}\ values are
$5\sigma$ upper limits. The remaining values were calculated using
the apparent optical depth of the transition with rest wavelength in
the second column. Uncertainties given for these values are $1
\sigma$ and include a $5$ per cent uncertainty in the continuum
level, which generally dominates the statistical
uncertainty. ${^\mathrm{a}}$: velocity models for \hbox{{\rm N}{\sc \,ii}}\ with a
saturated central component allow higher column densities than this
value and are still compatible with the data, but only by using a
more complicated velocity structure than that fitted to the HIRES
transitions.}
\end{center}
\end{table}
\begin{figure}
\begin{center}
\includegraphics[width=0.47\textwidth]{./fig_COS.pdf}
\caption{\label{f_COS} Transitions for the $z=0.56$ sub-DLA in the COS
and FOS UV spectra. The smooth red line shows Voigt profile models
of the data. Absorption that is not due to the named transition in
each panel (usually from H$_2$) is shown by dashed lines. Tick marks
show the component positions from Figure~\ref{f_HIRES}, dark ticks
show components that have associated H$_2$ absorption. All
transitions are covered by the COS spectra, apart from \hbox{{\rm Si}{\sc \,iii}}, which
is covered by the lower resolution FOS spectrum.}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=0.48\textwidth]{./fig_NHI.pdf}
\caption{\label{f_NHI} Constraints on \hbox{$N_\textsc{H\,i}$}\ from the
\mbox{Ly$\alpha$}\ transition. The histogram shows the data, the dashed line the
continuum, and the lower green points the residuals as defined in
the caption of Figure~\ref{f_HIRES}. The thick red solid curve shows
the best-fitting \hbox{$N_\textsc{H\,i}$}\,$=10^{19.5}$\,\mbox{cm$^{-2}$}\ for a single component
with $b=20$\,\mbox{km~s$^{-1}$}, and the thinner upper and lower solid curves show
\hbox{$N_\textsc{H\,i}$}\,$=10^{19.3}$ and $10^{19.7}$\,\mbox{cm$^{-2}$}.}
\end{center}
\end{figure}
\subsection{H$_2$ velocity structure}
We measure H$_2$ transitions from the $J=0-3$ rotational levels, with
upper limits on $J=4$ and $5$. The asymmetric profiles for many of the
H$_2$ lines suggest there is more than one absorbing component. We
were also unable to successfully fit the equivalent widths of the
transitions using a curve of growth analysis with a single
component. Therefore we fitted two H$_2$ components, with redshifts
close to those of the two central strong metal components at
$-26$~\mbox{km~s$^{-1}$}\ and $0$~\mbox{km~s$^{-1}$}\ (components 5 and 6 in
Table~\ref{t_voigt}). These are clearly separated in the resolution
$\sim 6$~\mbox{km~s$^{-1}$}\ HIRES spectra, but blended at the instrumental line
profile of COS. However, the large number of transitions over a range
of oscillator strengths allow us to constrain velocity structure below
the instrumental resolution. As it is not uncommon for H$_2$ to be
significantly offset from the strongest metal absorption -- indeed, in
Section~\ref{s_threephase} we show that the H$_2$ is probably produced
in a different environment to most of the metal lines -- we allow the
redshifts of each H$_2$ component to vary in our fitting procedure.
We experimented with fitting the two components using \textsc{vpfit},
and found there were large degeneracies between the Doppler $b$
parameter and column density. One way to robustly explore the
$b$-$N$-$z$ parameter space for the two components is to generate
large grids of likelihood values as a function of the fitted
parameters over plausible regions of parameter space. However, in our
case this proved to be prohibitively expensive
computationally. Instead we used a Monte-Carlo Markov Chain (MCMC)
technique to sample parameter space. This samples parameter values in
proportion to the likelihood value at any point in parameter
space. Thus from a set of initial parameter positions, a `chain' of
parameter values is generated by a stochastic walk through parameter
space with distributions approximating the Bayesian posterior
probability for each parameter.
We generated posterior parameter distributions using the package
\textsc{Emcee} \citep[The MCMC Hammer,][]{ForemanMackey12}. We fitted
for each component's redshift and $b$ parameter, and for the column
density of each rotational level using the 56 transitions shown in
Figure~\ref{f_Htwo_model}. All transitions for a component were
constrained to have the same $b$ value, which is generally observed to
be the case in local sites of H$_2$ absorption for at least $J$ levels
$<3$ \citep{Spitzer74}. Fitting transitions from each rotational level
individually also gives $b$ parameters consistent with a single
value. Even after correcting the wavelength scale, residual wavelength
shifts of $\sim 1$~\mbox{km~s$^{-1}$}\ remain, so we also allowed a small wavelength
shift for each fitted region. Thus we fit for 8 column densities, two
redshifts, two $b$ parameters, and one wavelength offset for each of
the 56 regions resulting in a total of 68 parameters.
Table~\ref{t_logN_Htwo} gives the parameter estimates and $1\sigma$
errors for the velocity offsets (with respect to metal components 5
and 6), $b$ parameters, and H$_2$ column densities for each rotational
level. The $1\sigma$ regions are determined by marginalising over all
other parameters and finding the narrowest region that encompasses
$68.3$ per cent of the samples. We choose the parameter estimates to
be at the centre of these $1\sigma$ regions. The absorption model with
the set of parameters that maximises the likelihood is shown in Figure
\ref{f_Htwo_model}.
\begin{table}
\addtolength{\tabcolsep}{-2.5pt}
\begin{center}
\begin{tabular}{cccc}
\hline
$J$ & $\log_{10} N$ (cm$^{-2}$) & $b$ (\mbox{km~s$^{-1}$}) & $\delta v$ (\mbox{km~s$^{-1}$}) \\
\hline
& \multicolumn{3}{c}{Component 5} \\
0 & $16.17\pm 0.25$ & $ 6.7\pm 0.6$ & $3.5\pm 0.6$ \\
1 & $17.05\pm 0.28$ & & \\
2 & $16.19\pm 0.19$ & & \\
3 & $15.77\pm 0.12$ & & \\
4 & $<14.5$ & & \\
5 & $<14.5$ & & \\
& \multicolumn{3}{c}{Component 6} \\
0 & $15.63\pm 0.39$ & $4.3\pm 0.7$ & $4.3\pm 0.7$ \\
1 & $16.42\pm 0.40$ & & \\
2 & $15.65\pm 0.25$ & & \\
3 & $15.47\pm 0.18$ & & \\
4 & $<14.5$ & & \\
5 & $<14.3$ & & \\
\hline
\end{tabular}
\caption{\label{t_logN_Htwo} Column densities, $b$ values and velocity
offsets for the two components showing H$_2$ based on Monte Carlo
Markov Chain fitting. Errors are $1 \sigma$ and the $J=5$ \& 6
values are $5\sigma$ upper limits. The velocity offsets are from the
redshifts of metal components 5 and 6, given in Table~\ref{t_voigt}.}
\end{center}
\end{table}
\begin{figure*}
\begin{center}
\includegraphics[width=0.92\textwidth]{./fig_model.pdf}
\caption{\label{f_Htwo_model} The H$_2$ absorption model generated
using parameters that maximise the likelihood of the data. The
histogram shows the flux divided by the continuum (the darker
portions were used to calculate the likelihood), the smooth curves
show the model. The residuals -- (data - model) / $1\sigma$ -- are
shown centred on $-0.25$, scaled such that the light gray lines
above and below $-0.25$ are the $\pm 1 \sigma$ range. A two
component model with a single $b$-parameter for each component and
redshifts corresponding to components 5 \& 6 in Table~\ref{t_voigt}
reproduces the data well.}
\end{center}
\end{figure*}
\section{Absorption system properties}
\subsection{Metallicity}
The metallicity, $Z$, can be estimated from an element X using the log
of the ratio of the abundance of element X in the absorber,
$N_\textrm{X}/N_\textrm{H}$, to the solar abundance
\begin{equation}
[\mathrm{X}/\mathrm{H}] \equiv \log_{10}
\frac{(N_\textrm{X}/N_\textrm{H})_\mathrm{obs}}{(N_\textrm{X}/N_\textrm{H})_{\sun}}.
\end{equation}
Due to a charge transfer between O and H we expect the ratio of the
number densities $n_{\rm OI}/n_{\rm O}$ and $n_{\rm HI}/n_{\rm H}$ to
be the same \citep{Field71}, provided the majority of O is in the form
of \hbox{{\rm O}{\sc \,i}}\ and \hbox{{\rm O}{\sc \,ii}}. In the presence of many high energy ionising
photons, this is no longer true due to different absorbing cross
sections of \hbox{{\rm O}{\sc \,i}}\ and \hbox{{\rm H}{\sc \,i}}\ \citep[see][]{Prochter10}, but the absence
of \hbox{{\rm Si}{\sc \,iv}}\ argues against a hard radiation field for this system, and
the best fitting \textsc{Cloudy} models do not predict significant
amounts of O in higher ionisation states (\hbox{{\rm O}{\sc \,vi}}\ is seen, but we argue
this occurs in a hotter, collisionally-ionised phase). Oxygen also
shows little depletion ($< 0.3$~dex) onto dust grains across a range
of environments in the ISM of the Milky Way \citep{Jenkins09}, so
should provide a good estimate of the metallicity.
We find [\hbox{{\rm O}{\sc \,i}}/\hbox{{\rm H}{\sc \,i}}]\,$=-0.72\pm 0.32$, or $\sim 0.19\,Z_\odot$. For the
photoionisation analysis in Section~\ref{s_cloudy} we assume the
metallicity is the same across the entire complex. In one of the few
cases where the metallicity has been measured for individual
components in a single absorption system, metallicity differences of a
factor of ten have been observed \citep{Prochter10}. However, given
that the dispersion in $N_\textrm{\hbox{{\rm Mg}{\sc \,ii}}} / N_\textrm{\hbox{{\rm Fe}{\sc \,ii}}}$ across the
system is not excessively large (the largest log difference between
components is $0.38$), the assumption of a constant metallicity seems
reasonable. In particular, the two components with H$_2$ do not show
significantly different ion abundance ratios compared to the entire
system.
\subsection{Dust}
\label{s_kappa}
\citet{Noterdaeme08} have found a correlation between the presence of
dust and the likelihood of observing H$_2$ in DLAs, consistent with
the main formation mechanism for H$_2$ being on the surface of dust
grains. We can measure the dust content by comparing elements known to
deplete strongly onto dust grains (Fe, Mg) to those with low depletion
(O). We assume negligible ionisation corrections, but applying
corrections from the best-fitting \textsc{Cloudy} model in
Section~\ref{s_cloudy} does not change our conclusions. We find
[Fe/O]~$= -0.25^{+0.21}_{-0.29}$ and [Mg/O]~$= -0.38 \pm 0.28$ for the entire
system, indicating mild dust depletion. If we also assume solar
abundance ratios we can estimate the dust-to-gas ratio normalised by
the value in the solar neighbourhood as
\begin{equation}
\kappa = 10^\mathrm{[X/H]} ( 1 - 10^\mathrm{[Fe/X]} )\, ,
\end{equation}
where X is an element that does not deplete strongly onto dust
\citep[for a derivation of this expression see the appendix
of][]{Wolfe03}. Using oxygen gives $\log_{10} \kappa < -0.44$,
compatible with values found in other higher redshift systems showing
H$_2$ \citep{Ledoux03}.
\subsection{Temperature constraints from line widths}
The linewidths of absorption components in the HIRES spectra can be
used to constrain the temperature of the gas using the relation $b =
\sqrt{2kT/m}$, where $m$ is the mass of the ion and $T$ is the
temperature. This constraint is an upper limit, as there can be
large-scale turbulent motions in addition to thermal broadening, or
the line may not be resolved. Indeed, we fitted each component in
\hbox{{\rm Mg}{\sc \,ii}}, \hbox{{\rm Fe}{\sc \,ii}}, and \hbox{{\rm Ca}{\sc \,ii}}\ with a single $b$ parameter value across all
three transitions, consistent with turbulent broadening dominating
over thermal broadening. Due to the relatively large masses for these
elements, only component 2 gives a constraining upper limit of
$6\,000$~K, typical of temperatures in the warm neutral medium in the
Milky Way ISM. The H$_2$ linewidths give upper limits to the
temperature of $6\,500$~K and $5\,400$~K for the blue and redder
components, but we argue below that the physical conditions in the
H$_2$ gas are probably different to those of the gas where most of the
metal lines arise. It is also possible that the H$_2$ widths are
substantially broadened due to turbulent motions.
In conclusion, there are no strong temperature constraints from the
linewidths. The relative column densities of the H$_2$ rotational
levels provide an independent measure of the temperature, discussed in
the next section.
\subsection{H$_2$ excitation temperature}
\label{s_Tex}
The ratios of H$_2$ column densities in different rotational levels
can be expressed as excitation temperatures, assuming a Boltzmann
distribution across the levels \citep[see][]{Draine11}:
\begin{equation}
\frac{N_J}{N_{J=0}}=\frac{g_J}{g_{J=0}} \exp\left(\frac{-B_vJ(J+1)}{T_{J0}}\right).
\end{equation}
Here $N_J$ is the column density for molecules in rotational state
$J$, and $g_J \equiv (2J + 1)(2I + 1)$, where $I=0$ if $J$ is odd or 1
if $J$ is even, is the statistical weight of $J$. $B_v = 85.36$~K and
$T_{J0}$ is the excitation temperature from level $J$ to $J=0$.
Fig.~\ref{f_Tex} shows an excitation diagram for the column densities
of the $J=0 - 3$ transitions for the two H$_2$ components. If the
collisional timescale for the $J=0$ and $J=1$ transitions is much
shorter than the photodissociation timescale, which occurs above
densities of $\sim 100$~\mbox{cm$^{-3}$}\ when H$_2$ is sufficiently
self-shielded from dissociating photons, then $T_{10}$ represents the
kinetic temperature of the gas \citep[see for
example][]{Dalgarno73}. The $z=0.56$ system is likely only partially
self-shielded, but assuming it satisfies these requirements we find a
lower limit on $T_{10}$ for each component at $1\sigma$ ($2\sigma$)
limits of $123$~K ($64$~K) for component 5 and $77$~K ($37$~K) for
component 6. Two illustrative temperatures corresponding to the
populations for $J= 0 - 3$ in each component are shown in
Fig.~\ref{f_Tex}. However, different physical processes affect the
populations of these levels \citep{Jura75b}, so it is not expected
that a single temperature should match all four levels.
\begin{figure}
\begin{center}
\includegraphics[width=0.485\textwidth]{./fig_Tex.pdf}
\caption{\label{f_Tex} Excitation diagram for the two H$_2$
components. The error bars show the $1 \sigma$ uncertainties. The
slope of a line joining each pair of points is inversely
proportional to the excitation temperature between those two $J$
levels. An illustrative temperature is shown for each component, but
the population levels are not expected to follow a Boltzmann
distribution characterised by a single temperature.}
\end{center}
\end{figure}
\subsection{\textsc{Cloudy} modelling}
\label{s_cloudy}
In this section we attempt to generate a simple single-cloud model
illuminated by a UV radiation field that can reproduce all the
observed column densities. We compare to the total column densities
for all components, since the individual component columns are not
well constrained for the \hbox{{\rm O}{\sc \,i}}, \hbox{{\rm C}{\sc \,i}}, and \hbox{{\rm Si}{\sc \,ii}}\ transitions or the
saturated transitions (\hbox{{\rm C}{\sc \,ii}}, \hbox{{\rm C}{\sc \,iii}}\ and \hbox{{\rm Si}{\sc \,iii}}). Given the large range
of transitions present with widely differing ionisation energies, it
is likely that there are several different phases present, and a
single cloud model is unlikely to be able to reproduce all the
observed species. Below we find that a single model can reproduce the
majority of the low-ionisation metal transitions, but
Section~\ref{s_threephase} shows that multiple phases with different
densities and temperatures are required to explain all the absorption.
We use models generated with version 8.01 of \textsc{Cloudy}, last
described by \citet{Ferland98}, to estimate the physical conditions in
the absorption system. All models assume solar abundance ratios,
constant gas density, and an absorbing geometry of a thin slab
illuminated on one side by an incident radiation field perpendicular
to the slab surface. The radiation field includes the cosmic
microwave background at the redshift of the absorber. We then compare
four scenarios: a cloud in an intergalactic medium-like (IGM)
environment, in an ISM-like environment, close to a starburst galaxy,
and illuminated by an AGN-dominated spectrum. We chose the
AGN-dominated spectrum to estimate the effect of a nearby AGN that may
be present in one of the galaxy candidates described in
Section~\ref{s_gal}, and to see if a spectrum with more high-energy UV
photons can produce the observed \hbox{{\rm O}{\sc \,vi}}\ column density in addition to
that of the low ionisation transitions. The IGM-like model is free of
dust with a radiation field given by the UV background spectrum from
\citet{Haardt12}, including contributions from quasars and
star-forming galaxies at the redshift of the absorber. It has a
radiation field strength at $912$\,\AA,
$J_{\nu,\,\mathrm{IGM}}^{\,912} =
6.08\times10^{-23}$erg~s$^{-1}$cm$^{-2}$Hz$^{-1}$sr$^{-1}$. The
ISM-like models have a radiation field similar to the Galactic ISM
($J_\nu^{\,912} \sim 400 J_{\nu,\,\mathrm{IGM}}^{\,912}$), which is
dominated at UV wavelengths by the spectral shape of hot stars, and a
dust grain composition similar to that measured in the ISM. Even
though the ISM-like models use solar relative abundances, the gas
phase abundance ratios are substantially different from solar due to
differential depletion of metals onto grains. The starburst
\textsc{Cloudy} models assume the absorbing cloud is 10\,kpc from the
galaxy and an escape fraction for UV light of 3 per cent, in addition
to the IGM-like radiation field described above, without dust grains.
They have $J_\nu^{\,912}\sim 2700 J_{\nu,\,\mathrm{IGM}}^{\,912}$.
The starburst galaxy spectrum used was generated using
\textsc{Starburst99} \citep{Leitherer99} for a star formation rate of
$20$\,\mbox{M$_{\sun}$~yr$^{-1}$}. The AGN models use the default tabulated AGN spectrum
from \textsc{Cloudy} with a normalisation $J_\nu^{\,912} \sim 3000
J_{\nu,\,\mathrm{IGM}}^{\,912}$ and do not include dust grains.
For each scenario we generate a grid of models for a range of
ionisation parameters, metallicities and total \hbox{$N_\textsc{H\,i}$}. We estimate the
ionisation parameter $U$, defined as the ratio of the densities of
ionising photons to hydrogen atoms, using the observed total column
density ratios $N_\textrm{\hbox{{\rm Mg}{\sc \,i}}}/N_\textrm{\hbox{{\rm Mg}{\sc \,ii}}}$,
$N_\textrm{\hbox{{\rm N}{\sc \,i}}}/N_\textrm{\hbox{{\rm N}{\sc \,ii}}}$, and
$N_\textrm{\hbox{{\rm Si}{\sc \,ii}}}/N_\textrm{\hbox{{\rm Si}{\sc \,iii}}}$. Using ratios of ionisation states
for the same element avoids any effects that might alter the column
densities of ions for different elements in different ways, such as
non-solar abundance ratios or differential dust depletion. We
generate the likelihood of each parameter ($U$, $Z$ and $\hbox{$N_\textsc{H\,i}$}$) for
the grid of models based on the observed ratios, and include a
Gaussian prior on the metallicity centred on the [\hbox{{\rm O}{\sc \,i}}/\hbox{{\rm H}{\sc \,i}}] metallicity
measurement with width $\sigma$ equal to the $1 \sigma$ uncertainty on
the metallicity. For all three scenarios, only a relatively narrow
range of $U$ values correctly reproduces the observed ratios. The
likelihoods are only weakly dependent on the total \hbox{$N_\textsc{H\,i}$}; we assume
\hbox{$N_\textsc{H\,i}$}\,$=10^{19.5}$\,\mbox{cm$^{-2}$}, which results in models that best reproduce
the observed metal column densities.
Once we have found the most likely $U$ value, we compare the predicted
column densities to the observed transitions with measurements or
limits, and assess which scenario reproduces the observations best. We
first compare to the AGN models. These are the only models with a hard
enough spectrum to produce sufficient $N_\textrm{\hbox{{\rm O}{\sc \,vi}}}$ to match the
observed value. However, at the same time they overpredict the amount
of \hbox{{\rm Si}{\sc \,iv}}, \hbox{{\rm Fe}{\sc \,ii}}, \hbox{{\rm Mg}{\sc \,i}}\ and \hbox{{\rm Mg}{\sc \,ii}}\ by one or more orders of
magnitude. Thus it is more likely the \hbox{{\rm O}{\sc \,vi}}\ arises in a
collisionally-ionised phase separate from the low-ionisation
transitions, as is observed in other systems \citep[for
example][]{Fox07,Ribaudo11}, and we do not compare to the high-ionisation
species (\hbox{{\rm Si}{\sc \,iv}}, \hbox{{\rm O}{\sc \,vi}}) for the remaining models.
From the mild depletions measured in Section \ref{s_kappa}, we already
expect that the ISM-like case will not match the observed
abundances. The most likely model does indeed underpredict the
\hbox{{\rm Fe}{\sc \,ii}}\ abundance by more than an order of magnitude, and \hbox{{\rm Ca}{\sc \,ii}}\ by
many orders of magnitude, as both are expected to be heavily depleted
onto dust grains. This confirms that the depletion pattern in the
$z=0.56$ sub-DLA is different from that in the Milky Way ISM. This
model also underpredicts \hbox{{\rm C}{\sc \,i}}. The starburst scenario fails to
reproduce the observed $N_\textrm{\hbox{{\rm N}{\sc \,i}}}/N_\textrm{\hbox{{\rm N}{\sc \,ii}}}$, and also
severely underpredicts \hbox{{\rm Ca}{\sc \,ii}}, \hbox{{\rm Fe}{\sc \,ii}}\ and \hbox{{\rm C}{\sc \,i}}. The IGM-like model gives
the best fit to the observed data, and its predictions along with
observed column densities are shown in Figures~\ref{f_cloudy1} \&
\ref{f_cloudy2}. Figure~\ref{f_cloudy1} shows that column densities
for \hbox{{\rm O}{\sc \,i}}, \hbox{{\rm Mg}{\sc \,i}}, \hbox{{\rm Mg}{\sc \,ii}}, \hbox{{\rm Fe}{\sc \,ii}}, \hbox{{\rm N}{\sc \,i}}\ and \hbox{{\rm N}{\sc \,ii}}\ are reasonably well
matched. The remaining small deviations from the predictions could be
due to a slightly different incident UV continuum from the one
assumed, or enhanced or depleted elemental abundances relative to the
solar values assumed. For example, $0.2$ dex less $N_\textrm{\hbox{{\rm N}{\sc \,ii}}}$ is
observed than is predicted. This may be due to a nitrogen
underabundance, often observed in similar \hbox{$N_\textsc{H\,i}$}\ systems at low
redshifts \citep[for example][]{Battisti12} and in DLAs at higher
redshifts \citep{Pettini02, Prochaska02}. Figure~\ref{f_cloudy2} shows
that \hbox{{\rm Si}{\sc \,ii}}, \hbox{{\rm Si}{\sc \,iii}}, \hbox{{\rm C}{\sc \,ii}}\ and \hbox{{\rm C}{\sc \,iii}}\ columns are reproduced
well. However, there is still $0.5$ dex too little $N_\textrm{\hbox{{\rm Ca}{\sc \,ii}}}$
and 1 dex too little $N_\textrm{\hbox{{\rm C}{\sc \,i}}}$ predicted. In all three
scenarios, we also find that the \hbox{$N_{\textrm{H}_2}$}\ predicted is more than an
order of magnitude below the observed value. In
Section~\ref{s_threephase} we suggest a scenario to explain this
discrepancy between the models and observations.
We also ran \textsc{Cloudy} models using constant pressure clouds instead of
constant density. The motivation for these was to simultaneously
include cool, lower density gas at the edge of the cloud, and higher
density, cold $\sim100$\,K gas at the core of the cloud where H$_2$
can survive. In these models we also included contributions from
cosmic rays, which can be important for cold molecular
regions. Although significant amounts of H$_2$ can co-exist with many
of the metal transitions observed for these models, they still cannot
correctly reproduce the \hbox{{\rm Ca}{\sc \,ii}}\ or \hbox{{\rm C}{\sc \,i}}\ columns.
\begin{figure}
\begin{center}
\includegraphics[width=0.4\textwidth]{./fig_cloudy1.pdf}
\caption{\label{f_cloudy1} \textsc{Cloudy} predictions (lines) and
observed column densities (points) for \hbox{{\rm Mg}{\sc \,i}}, \hbox{{\rm Mg}{\sc \,ii}}, \hbox{{\rm N}{\sc \,i}}, \hbox{{\rm N}{\sc \,ii}},
\hbox{{\rm O}{\sc \,i}}\ and \hbox{{\rm Fe}{\sc \,ii}}\ for the IGM-like model with $Z =
0.13\,Z_\odot$. Points are plotted at the most likely $U$ values
based on constraints from the $N_\textrm{\hbox{{\rm Mg}{\sc \,i}}}/N_\textrm{\hbox{{\rm Mg}{\sc \,ii}}}$,
$N_\textrm{\hbox{{\rm N}{\sc \,i}}}/N_\textrm{\hbox{{\rm N}{\sc \,ii}}}$, and
$N_\textrm{\hbox{{\rm Si}{\sc \,ii}}}/N_\textrm{\hbox{{\rm Si}{\sc \,iii}}}$ ratios (\hbox{{\rm Fe}{\sc \,ii}}\ is offset for
clarity). Filled points correspond to solid lines of the same
colour, open points to dashed lines. Errors on the column densities
are smaller than the marker sizes. Given the uncertainties in the
shape of the ionising spectrum and uncertain relative abundances,
the predicted column densities are very close to the observed
values.}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=0.4\textwidth]{./fig_cloudy2.pdf}
\caption{\label{f_cloudy2} \textsc{Cloudy} predictions (lines) and
observed column densities (points) for \hbox{{\rm C}{\sc \,i}}, \hbox{{\rm C}{\sc \,ii}}, \hbox{{\rm C}{\sc \,iii}}, \hbox{{\rm Si}{\sc \,ii}},
\hbox{{\rm Si}{\sc \,iii}}\ and \hbox{{\rm Ca}{\sc \,ii}}\ for the IGM-like model with $Z =
0.13\,Z_\odot$. Points are plotted at the most likely $U$ values
based on constraints from the $N_\textrm{\hbox{{\rm Mg}{\sc \,i}}}/N_\textrm{\hbox{{\rm Mg}{\sc \,ii}}}$,
$N_\textrm{\hbox{{\rm N}{\sc \,i}}}/N_\textrm{\hbox{{\rm N}{\sc \,ii}}}$, and
$N_\textrm{\hbox{{\rm Si}{\sc \,ii}}}/N_\textrm{\hbox{{\rm Si}{\sc \,iii}}}$ ratios (Si and \hbox{{\rm C}{\sc \,iii}}\ points are
offset for clarity). Filled points correspond to solid lines of the
same colour, open points to dashed lines, and open points with
dotted edges to dotted lines. Where error bars are not shown they
are smaller than the marker size. With the exception of \hbox{{\rm C}{\sc \,i}}\ and
\hbox{{\rm Ca}{\sc \,ii}}, the column densities are reproduced well. As discussed in
Section~\ref{s_threephase}, the excess $N_{\hbox{{\rm C}{\sc \,i}}}$ is likely due to
\hbox{{\rm C}{\sc \,i}}\ being in a self-shielded cold phase where the H$_2$ and
possibly part of the \hbox{{\rm Ca}{\sc \,ii}}\ resides.}
\end{center}
\end{figure}
\subsection{\hbox{{\rm C}{\sc \,ii}}\ fine structure absorption}
Singly ionised carbon (C$^+$) has electronic structure $2s^2 2s^2 2p$
where the outer shell has a configuration $^2$P$^\mathrm{o}_J$, and
thus fine structure splitting occurs between the $J=1/2$ and $J=3/2$
levels. Transitions from these two ground state levels produce
\hbox{{\rm C}{\sc \,ii}}\ and \hbox{{\rm C}{\sc \,ii}$^*$}\ absorption respectively.
The ratio of C{\sc \,ii}$^*$ to \hbox{{\rm C}{\sc \,ii}}\ column densities has been used
to estimate the star formation rate inferred from the cooling rate in
DLAs at high redshift \citep{Wolfe03}. We find an upper limit on
\hbox{{\rm C}{\sc \,ii}$^*$}\ from $\lambda 1036$ of $N_\textrm{\hbox{{\rm C}{\sc \,ii}$^*$}} < 10^{14.5}$\,
\mbox{cm$^{-2}$}\ and $N_\textrm{\hbox{{\rm C}{\sc \,ii}}} > 10^{14.6}$\, \mbox{cm$^{-2}$}. Assuming constant
$n($\hbox{{\rm C}{\sc \,ii}$^*$})$/n($\hbox{{\rm C}{\sc \,ii}}) over the entire complex, we find the ratio
$N_\textrm{\hbox{{\rm C}{\sc \,ii}$^*$}}/N_\textrm{\hbox{{\rm C}{\sc \,ii}}} < 0.8$, consistent with ratios
measured in higher redshift DLAs \citep[for example][]{Srianand05} and
local environments. Following the assumptions described by
\citet{Morris86}, we can estimate the electron density $n_e$ in
\mbox{cm$^{-3}$}\ using the expression:
\begin{equation}
\frac{N_\textrm{\hbox{{\rm C}{\sc \,ii}$^*$}}}{N_\textrm{\hbox{{\rm C}{\sc \,ii}}}} = 3.9 \times 10^{-2}\, n_e
\,[1 + (0.22 \frac{n_p}{n_e})]
\end{equation}
We use $n_p/n_e$ corresponding to the ionised H fraction of $0.70$
from the best-fitting \textsc{Cloudy} ionisation model to find $n_e <
10$\,\mbox{cm$^{-3}$}. Using \hbox{$N_\textsc{H\,ii}$}\ we can estimate the thickness of the
absorbing cloud as \hbox{$N_\textsc{H\,ii}$}$/n_e$. This gives a lower limit on the
cloud size of $3$\,pc. This limit is not necessarily related to the
density or size of the H$_2$ gas, as we argue in the discussion that
most of the \hbox{{\rm C}{\sc \,ii}}\ is due to a warm ionised phase separate from the
H$_2$.
\subsection{Molecular fraction}
\label{s_fhtwo}
The molecular mass fraction is estimated by
\begin{equation}
f_{\textrm{H}_2} = 2N_{\textrm{H}_2} / (N_{\textrm{\hbox{{\rm H}{\sc \,i}}}} +
2N_{\textrm{H}_2}),
\end{equation}
assuming most of the hydrogen associated with the H$_2$ is neutral. In
this case, as for many other QSO absorption systems, it is not clear
how to divide the total \hbox{$N_\textsc{H\,i}$}\ measured from the damping wings between
different absorbing components, and in principle each
\hbox{$N_{\textrm{H}_2}$}\ component could have a different \hbox{$f_{\textrm{H}_2}$}\ value. To calculate
\hbox{$f_{\textrm{H}_2}$}\ we use the total \hbox{$N_{\textrm{H}_2}$}\ from both components and
conservatively assume all \hbox{$N_\textsc{H\,i}$}\ is associated with the H$_2$, meaning
\hbox{$f_{\textrm{H}_2}$}\ is effectively a lower limit. This gives a molecular fraction
of $\log_{10}$\,\hbox{$f_{\textrm{H}_2}$}\,$= -1.93 \pm 0.36$. As we discuss in the next
section, given the total \hbox{$N_\textsc{H\,i}$}, this is an usually high molecular
fraction compared to most other higher redshift systems and sightlines
in the Local Group. Therefore we may be concerned that a different
velocity model to the one we have used permits a much lower \hbox{$f_{\textrm{H}_2}$}. To
calculate a lower limit on \hbox{$f_{\textrm{H}_2}$}\ independent of the velocity model,
we measure the column density of the lowest oscillator strength
transition available for each rotational level ($J=0$, $\lambda
1108.1$; $J=1$, $\lambda 1008.5$; $J=2$, $\lambda 934.1$ and $J=3$,
$\lambda 952.3$) using the AOD method. This gives a lower limit of
\hbox{$N_{\textrm{H}_2}$} $ = 10^{16.5}$~\mbox{cm$^{-2}$}\ or \hbox{$f_{\textrm{H}_2}$}\,$> 10^{-3}$, again assuming all
of the \hbox{$N_\textsc{H\,i}$}\ is associated with the \hbox{$N_{\textrm{H}_2}$}. This is still a high value
relative to local H$_2$ systems with similar \hbox{$N_\textsc{H\,i}$}.
\section{Discussion}
\subsection{Physical conditions in the H$_2$ cloud}
\label{s_phys}
To consider this system in the context of other H$_2$ detections in
absorption, we plot \hbox{$f_{\textrm{H}_2}$}\ for local and higher redshift H$_2$
sightlines as a function of the total hydrogen column density and
\hbox{$N_{\textrm{H}_2}$}\ in Figure \ref{f_fntot}. It is apparent that the $z=0.56$
sub-DLA (solid circle) has an unusually high \hbox{$f_{\textrm{H}_2}$}\ given its
\hbox{$N_\textsc{H\,i}$}\ compared to sightlines through the plane of the Milky Way (cyan
inverted triangles), or through the Magellanic clouds (green squares
and red diamonds).
\begin{figure*}
\begin{center}
\includegraphics[width=0.97\textwidth]{./fig_f_vs_ntot.pdf}
\caption{\label{f_fntot} The molecular fraction versus the total
$N_\textrm{H}$. Compared to measurements in the Magellanic clouds
\citep{Tumlinson02, Welty12}, along the disk of the Milky Way
\citep{Savage77} and in intermediate- \citep{Richter03b} and
high-velocity clouds \citep{Richter99}, the $z=0.56$ H$_2$ system
has an unusually large \hbox{$f_{\textrm{H}_2}$}\ for its total H column. The local
systems that appear most similar to this absorber are seen along
sightlines through the Magellanic Stream \citep{Sembach01,
Richter01b}. Error bars at the corner of each plot show the
typical uncertainties on \hbox{$f_{\textrm{H}_2}$}, $N_\mathrm{tot}$ and \hbox{$N_{\textrm{H}_2}$}.
Shading shows the regions in which H$_2$ cannot be detected for a
limit of \hbox{$N_{\textrm{H}_2}$}\, $\lesssim 10^{14.5}$\, \mbox{cm$^{-2}$}, a typical threshold
for the spectra used for the different surveys. The thin lines at
the top right in each panel show analytic models from Krumholz and
McKee for two illustrative metallicities. The three thin lines at
the lower left are the analytic models described in
Section~\ref{s_phys}. H$_2$ detections in $z > 1.5$ QSO absorption
systems (from the compilation by \citealt{Noterdaeme08} with
additions from \citealt{Petitjean02}, \citealt{Reimers03},
\citealt{Noterdaeme10}, \citealt{Srianand08, Srianand10,
Srianand12}, \citealt{Tumlinson10}, \citealt{Jorgenson10} and
\citealt{Guimaraes12}) are also shown.}
\end{center}
\end{figure*}
Before we discuss the likely origin of the $z=0.56$ sub-DLA, we
examine the physics underlying the \hbox{$f_{\textrm{H}_2}$}\ distribution as a function
of $N_\textrm{H}$ and \hbox{$N_{\textrm{H}_2}$}. The left panel shows a clear bimodality
in the \hbox{$f_{\textrm{H}_2}$}\ $- N_\textrm{H}$ distribution between high
$N_\textrm{H}$, high \hbox{$f_{\textrm{H}_2}$}\ sightlines at the top right, and lower
\hbox{$f_{\textrm{H}_2}$}\ sightlines, generally with much lower $N_\textrm{H}$. The
right panel shows this is actually a bimodality between \hbox{$N_{\textrm{H}_2}$}\ values
$\lesssim 10^{16}$\,\mbox{cm$^{-2}$}\ and $\gtrsim 10^{18}$\,\mbox{cm$^{-2}$}. This can be
understood as the onset of H$_2$ self-shielding against UV
dissociating photons \citep[for example][]{Hirashita05,Gillmon06}. An
analytic approximation from \citet{Draine96} shows that $>97$ per cent
of H$_2$-dissociating photons are blocked by self-shielding once
\hbox{$N_{\textrm{H}_2}$}\ $\sim 10^{16}$\,\mbox{cm$^{-2}$}. Once H$_2$ becomes
self-shielded\footnote{Dust shielding only becomes important at total
H columns of $\sim10^{21}$\,\mbox{cm$^{-2}$}, assuming solar metallicity.}, the
dissociation rate drops and H$_2$ accumulates rapidly to the
formation-destruction equilibrium value predicted by the models by
\citet{McKee10}. These models are shown at the top right in each panel
for two metallicities; solar and $Z=0.2Z_{\odot}$, the metallicity of
the SMC. They were calculated using equations 4, 5, 7 and 8 from
\citet{Kuhlen12} and assume the ISM is in a two phase equilibrium
between a cold neutral medium and a warm neutral medium \citep[for
example][]{Wolfire95}. The solar metallicity model reproduces the
mean \hbox{$f_{\textrm{H}_2}$}\ for the Milky Way sightlines through shielded H$_2$
regions reasonably well, although \citet{Welty12} point out that these
models overpredict the $N_\textrm{H}$ at which this transition occurs.
The $N_\textrm{H}$ at which there is sufficient shielding from the UV
field to form large amounts of H$_2$ varies depending on the dust to
gas ratio, the strength of the UV field, and the H$_2$ linewidth, so
the $N_\textrm{H}$ at which the transition from optically thin to
optically thick occurs can change from sightline to sightline. In the
plane of the MW disk, the transition from low to higher \hbox{$f_{\textrm{H}_2}$}\ takes
place around $N_\textrm{H} = 10^{20.5} - 10^{21}\,$\mbox{cm$^{-2}$}. It occurs at
higher $N_\textrm{H}$ in the LMC and SMC, both because their lower
metallicities \citep[$0.5$ and $0.2Z_{\odot}$ respectively, see the
appendices from][]{Welty97, Welty99} result in a lower H$_2$
formation rate on grains, and due to an increased UV field compared to
the Milky Way \citep{Tumlinson02}. \citet{Gillmon06} have also shown
the large variation in \hbox{$f_{\textrm{H}_2}$}\ along different sightlines implies that
each sightline intersects a small number of molecule-bearing clouds.
In addition to comparing to the two-phase equilibrium models of McKee
\& Krumholz, we can compare to simple analytic models that apply to
diffuse H$_2$ in the partially shielded regimes. Following the
Appendix from \citet{Jorgenson10}, the dissociation rate in s$^{-1}$
due to photons with energies corresponding to the Lyman-Werner bands
is given by
\begin{equation}
R_\textrm{diss} = 1.1\times 10^8\, 4\pi
J_{\nu}^\textrm{LW}\,S_\textrm{shield},
\end{equation}
where $J_{\nu}^\textrm{LW}$ is the strength of the incident radiation
field in erg~s$^{-1}$cm$^{-2}$Hz$^{-1}$sr$^{-1}$ at 1000\,\AA, and $1
- S_\textrm{shield}$ is the fraction of Lyman-Werner photons processed
by dust or scattered due to \hbox{{\rm H}$_2$}\ shielding, which can be calculated
using the analytic expressions from \citet{Draine96} and
\citet{Hirashita05}. In formation-dissociation equilibrium, the
molecular fraction can be approximated by
\begin{equation}
f_{\textrm{H}_2} = 2\, \frac{ \kappa \mathcal{R}
n_\textrm{\hbox{{\rm H}{\sc \,i}}}}{R_\textrm{diss}},
\end{equation}
where $\mathcal{R}$ is the formation rate of H$_2$ on dust grains in
cm$^3$\,s$^{-1}$, $n_\textrm{\hbox{{\rm H}{\sc \,i}}}$ is the \hbox{{\rm H}{\sc \,i}}\ particle density in
\mbox{cm$^{-3}$}\ and $\kappa$ is the dust to gas ratio relative to that in the
solar neighbourhood as defined in Section~\ref{s_kappa}. Note that the
formation rate term used by \citet{Hirashita05}, $R_\mathrm{dust}$, is
equal to $\kappa \mathcal{R}$. Rearranging these expressions, we can
estimate the particle density in the cloud as:
\begin{equation}
n_\textrm{\hbox{{\rm H}{\sc \,i}}} = 74\, \mbox{cm$^{-3}$} \kappa^{-1}
\left(\frac{\mathcal{R}}{\mathcal{R}_\mathrm{SN}}\right)^{-1}
\left(\frac{f_{\textrm{H}_2}}{0.01}\right)\left(\frac{J_{\nu}^\textrm{LW}}{J_{\nu,\,\mathrm{SN}}^\textrm{LW}}
\right)\left(\frac{S_\mathrm{shield}}{0.01}\right),
\end{equation}
where $J_{\nu,\,\mathrm{SN}}^\textrm{LW}=3.2\times
10^{-20}$\,erg~s$^{-1}$\,cm$^{-2}$\,Hz$^{-1}$\,sr$^{-1}$ and
$\mathcal{R}_\mathrm{SN}=3 \times 10^{-17}$\,cm$^3$\,s$^{-1}$ are
typical values measured in the solar neighbourhood \citep{Habing68,
Jura74}.
The three curves at the lower left of each panel in
Figure~\ref{f_fntot} show the molecular fractions estimated with
equation (7) for illustrative combinations of $\kappa \mathcal{R}
n_\textrm{\hbox{{\rm H}{\sc \,i}}} / R_\textrm{diss}$. The upper curve and middle curves
each have $n_\textrm{\hbox{{\rm H}{\sc \,i}}}=10$\,\mbox{cm$^{-3}$}\ with $\kappa=0.04$,
$0.1\mathcal{R}_\mathrm{SN}$, $10 J_{\nu,\,\mathrm{SN}}^\textrm{LW}$
and $\kappa=1$, $0.33\mathcal{R}_\mathrm{SN}$, $2
J_{\nu,\,\mathrm{SN}}^\textrm{LW}$ respectively. The lower curve has
$\kappa=0.04$, $n_\textrm{\hbox{{\rm H}{\sc \,i}}}=5$\,\mbox{cm$^{-3}$}, $\mathcal{R}_\mathrm{SN}$ and
$0.01 J_{\nu,\,\mathrm{SN}}^\textrm{LW}$. This lower curve has
qualitatively different behaviour from the upper two curves, because
at such low molecular fractions, dust shielding from dissociating
photons becomes important before H$_2$ self-shielding. Therefore the
observed variation in \hbox{$f_{\textrm{H}_2}$}\ can be explained by reasonable
variations in the combination of UV field strength, particle density
and H$_2$ dust formation rate. If the $z=0.56$ system is in H$_2$
formation-dissociation equilibrium, the combination of low \hbox{$N_\textsc{H\,i}$}\ and
high molecular fraction suggests that it is either in a weaker UV
field, has an increased H$_2$ formation rate, a higher $n_{\rm H}$ compared
to the solar neighbourhood, or some combination of these three. We can
use limits on the column densities of the $J=4$ and $5$ levels to put
upper limits on $J_{\nu}^\textrm{LW}$ \citep{Jura75b}. These upper
limits are not very stringent, however, due to weak limits on the
column densities, and constrain $J_{\nu}^\textrm{LW} \lesssim
10^{-18}$\,erg~s$^{-1}$cm$^{-2}$Hz$^{-1}$sr$^{-1}$. This is consistent
with the different UV background values assumed in the \textsc{Cloudy}
models, which range from $\sim 10^{-22}$ for the IGM-like scenario to
$\sim 10^{-20}$\,erg~s$^{-1}$cm$^{-2}$Hz$^{-1}$sr$^{-1}$ for the
ISM-like scenario.
Using \hbox{$f_{\textrm{H}_2}$}\ and \hbox{$N_{\textrm{H}_2}$}\ measured in the $z=0.56$ sub-DLA, the
measured metallicity and equation (8), we find densities of $\sim
1-4$\,\mbox{cm$^{-3}$}\ for a UV background incident radiation field, and $\sim
70-480$\,\mbox{cm$^{-3}$}\ for a Milky Way ISM-like radiation field. The lower
density range corresponds to cloud thicknesses of $\sim 3 - 10$\,pc,
the high density range to $\sim 0.002 - 0.15$\,pc. The only direct
measurement of the size of a redshifted H$_2$ absorber is by
\citet{Balashev11}, through partial covering of a background QSO broad
line region. They find the region producing H$_2$ is $\sim0.15$~pc and
its surrounding neutral envelope $\sim8$~pc, both of which are
consistent with our size estimates. The upper end of our density range
is consistent with values measured for higher redshift H$_2$ systems
using \hbox{{\rm C}{\sc \,i}}\ fine structure transitions, but would result in extremely
small cloud sizes.
The low total column density of the $z=0.56$ system suggests that it
does not pass through the ISM of a galaxy. Returning to
Figure.~\ref{f_fntot}, we see that local systems with similarly low
total $N_\textrm{H}$ and almost as high \hbox{$f_{\textrm{H}_2}$}\ are sightlines through
a high velocity cloud \citep{Richter99} and the Magellanic Stream
\citep{Sembach01,Richter01b}. These clouds have sub-solar
metallicities ($0.3-0.5$ solar), and are most likely tidally stripped
from the Magellanic Clouds (for the Magellanic Stream) or the Milky
Way. \citet{Sembach01} estimate the density of the H$_2$-bearing cloud
they observed in the Magellanic Stream to be $0.3-3$\,\mbox{cm$^{-3}$}\ with a
photoionisation rate at least a factor of 10 smaller than the Milky
Way ISM value. The H$_2$ formation timescale for these low densities
is around $1$ Gyr, a large fraction of the estimated lifetime of the
Magellanic Stream \citep[for example][]{Besla10}. Therefore they
favour a scenario where H$_2$ is not formed in place, but has survived
the tidal stripping process and persists due to a combination of
self-shielding and the lower ambient UV field compared to the LMC
ISM. Such a scenario could also be responsible for the $z=0.56$
absorber.
\subsection{Comparison to higher-redshift H$_2$ absorbers}
Unlike the local sightlines, there is no clear bimodality in the
\hbox{$f_{\textrm{H}_2}$}\ -- $N_\textrm{H}$ distribution for higher-$z$ H$_2$ systems.
This could be due to each higher-$z$ absorber being comprised of
several clouds, or to a much wider range of incident UV and H$_2$
formation rates, both of which may smooth away an underlying
distribution.
The three high-$z$ systems with \hbox{$f_{\textrm{H}_2}$}\ and $N_\textrm{H}$ most
similar to this system are those described by \citet{Petitjean02} (at
$z=1.973$ towards Q0013-0029 with $\hbox{$N_\textsc{H\,i}$} \le 10^{19.4}$~\mbox{cm$^{-2}$},
$\hbox{$f_{\textrm{H}_2}$}=10^{-2.63}$), \citet{Reimers03} ($z=1.51$ towards
HE0515$-$4414 with $\hbox{$N_\textsc{H\,i}$} = 10^{19.88}\,$\mbox{cm$^{-2}$}, $\hbox{$f_{\textrm{H}_2}$}=10^{-2.64}$),
and \citet{Tumlinson10} and \citet{Milutinovic10} ($z=2.059$ towards
Q2123$-$0500 with $\hbox{$N_\textsc{H\,i}$} = 10^{19.18}\,$\mbox{cm$^{-2}$}, $\hbox{$f_{\textrm{H}_2}$}=10^{-1.54}$). The
$z=1.973$ system is a sub-DLA component that is highly depleted to the
same extent as is observed for cool gas in the Milky Way. It has a
solar metallicity and the gas pressure is even higher than is
typically measured in Milky Way ISM. The $z=1.51$ system has a
metallicity of 0.3 solar, and dust to gas ratio of $0.89\pm 0.19$
relative to solar. It also shows evidence of a higher
photodissociation rate than is seen locally. The final sub-DLA at
$z=2.059$ has a metallicity of 0.5 solar, and HD absorption is
observed in addition to H$_2$. It exhibits a multi-phase medium of
cold and warm gas, similar to the system we have presented is this
paper. Unfortunately none of these absorbers have associated imaging
to suggest a typical impact parameter of any nearby galaxy producing
the absorption.
Therefore, the three higher redshift systems showing a similarly high
\hbox{$f_{\textrm{H}_2}$}\ and low \hbox{$N_\textsc{H\,i}$}\ tend to have larger metallicities and dust to
gas ratios than the $z=0.56$ absorber. However, it is possible that
the components producing H$_2$ in the $z=0.56$ system have a higher
metallicity and dust-to-gas ratio than that averaged over the whole
absorber.
\subsection{Connection to galaxies}
\label{s_gal}
The $K$ band imaging around Q~0107$-$0232 has a seeing FWHM of
0.8\arcsec, and shows two possible galaxy candidates less than
1.2\arcsec\ from the QSO sightline. Figure \ref{f_im} shows a
$5$\arcsec$\times5$\arcsec\ region centred on the QSO. The QSO image
has been subtracted using the point spread function of a nearby
star. The two galaxy candidates are seen to the North-West (G1) and
South-West (G2). Assuming they are at the redshift of the absorber,
they have luminosities of $0.7L^*$ (G1) and $2L^*$ (G2), and impact
parameters of $10$~kpc (G1) and $11$~kpc (G2), both smaller than the
median impact parameter of $33$~kpc for galaxies associated with
sub-DLAs found by \citet{Rao11}. Therefore it is likely that at least
one is associated with the absorber, on scales typical of the
separations between the Milky Way and high-velocity clouds (10-60~kpc,
see \citealt{Putman12} and the references therein).
\begin{figure}
\begin{center}
\includegraphics[width=0.47\textwidth]{./fig_sub.pdf}
\caption{\label{f_im} A K-band image of the QSO showing two nearby
galaxy candidates, G1 and G2. The QSO image has been subtracted
using the point spread function of a nearby star. The region near
the centre of the QSO still shows significant residuals and has been
greyed out. If G1 and G2 are at the redshift of the QSO then they
have impact parameters of $10$~kpc and $11$~kpc, and luminosities of
$0.7L^*$ and $2L^*$ respectively.}
\end{center}
\end{figure}
\subsection{Three different gas phases in the sub-DLA}
\label{s_threephase}
Figures~\ref{f_cloudy1} \& \ref{f_cloudy2} show the total hydrogen
particle density for the majority of metals observed in this system
corresponding to the ionisation parameter, assuming the normalisations
of the incident radiation fields are correct. The most likely model
corresponds to hydrogen densities from $10^{-3}$ to
$10^{-2}$~\mbox{cm$^{-3}$}. Even assuming a factor of ten uncertainty in the
radiation field strength, this is much lower than the typical
densities where H$_2$ is seen in both our galaxy and in other
H$_2$-bearing DLAs ($n_{\rm H} = 10-100$~\mbox{cm$^{-3}$}). This is confirmed by our
\textsc{Cloudy} modelling, which shows that there is no single cloud
model that can simultaneously reproduce both the \hbox{{\rm C}{\sc \,i}}\ and H$_2$ column
densities, in addition to those of the other low-ionisation metal
transitions. Therefore the gas traced by most of the metal absorption
is probably in a different environment to that in which the H$_2$
resides. This is also likely the cause of the excess $N_\textrm{\hbox{{\rm C}{\sc \,i}}}$
over that predicted by the \textsc{Cloudy} models. \hbox{{\rm C}{\sc \,i}}\ is often seen
in dense components showing H$_2$ \citep[for example][]{Srianand05}
and can have extremely narrow linewidths corresponding to temperatures
of $\sim 100$\,K \citep{Jorgenson09,Carswell11}, indicating it occurs
in the same environment as H$_2$. Thus most of the \hbox{{\rm C}{\sc \,i}}\ and some
\hbox{{\rm Ca}{\sc \,ii}}\ may be from a high density region co-spatial with the H$_2$.
As discussed in Section~\ref{s_cloudy}, the presence of \hbox{{\rm O}{\sc \,vi}}\ is
unlikely to be explained by photoionisation by a hard UV field. At the
metallicity of the absorber ($\sim 0.1$ solar), significant \hbox{{\rm O}{\sc \,vi}}\ is
only produced via collisional ionisation for temperatures larger than
$10^5$\ K, even in non-equilibrium cases \citep{Gnat07}. Thus it is
likely a hotter medium than that producing the H$_2$ and metal lines
is also present.
We conclude that the absorption is due to gas in three phases: a
photoionised medium at $\sim 10^4$\,K in which most of the metal
transitions we see are produced, a cold neutral medium at $\sim
100$\,K where the H$_2$ and \hbox{{\rm C}{\sc \,i}}\ absorption occurs, and a hotter phase
where \hbox{{\rm O}{\sc \,vi}}\ is produced. The \hbox{{\rm H}{\sc \,i}}\ column is likely split between the
two cooler phases. A similar multi-phase environment is also seen in
other higher redshift sub-DLAs that show molecular absorption
\citep{Milutinovic10}.\footnote{Milutinovic et al. did not report an
\hbox{{\rm O}{\sc \,vi}}\ detection, but as this system was at a higher redshift, it may
have been heavily blended with \mbox{Ly$\alpha$}\ forest absorption.}
The Magellanic Stream and many other HVCs comprise $10^4$~K ionised
gas that is seen in H$\alpha$ emission, T~$>10^5$~K hot gas producing
\hbox{{\rm O}{\sc \,vi}}\ absorption, and they can also contain cold neutral gas with
H$_2$ \citep{Sembach03, Fox10}. Taken together, the existence of these
three phases, the high molecular fraction with a low total column
density, and the proximity of a possible $\sim L^*$ galaxy suggest the
$z=0.56$ absorber is due to a tidally stripped feature analogous to
the Magellanic Stream.
\subsection{Incidence rate of H$_2$ in low redshift sub-DLAs}
Due to the need for bright targets observable with space-based UV
spectroscopy and their low incidence rate, very few DLAs and sub-DLAs
have been found at low redshift. Until recently only $\sim 10$ DLAs at
redshifts $<1$ were known, and only a handful of these have coverage
of H$_2$ Lyman-Werner bands. With the availability of COS, the number
of such systems is being increased dramatically, and due to its far UV
wavelength coverage the presence of H$_2$ can be easily detected.
\citet{Battisti12} present a sample of $2$ DLAs and $6$ sub-DLAs at $z
< 0.35$, serendipitously discovered along sightlines as part of a
large COS program. Like the sub-DLA presented here, they were not
pre-selected by the strength of their metal lines or other properties
that might influence the likelihood of detecting
molecules. Interestingly, they also discovered a sub-damped system
with H$_2$ absorption at $z=0.2477$. Taking this sample together with
the system in this paper and assuming binomial statistics, we find the
expected incidence rate of DLAs and sub-DLAs showing molecular
hydrogen at \hbox{$N_{\textrm{H}_2}$}\,$\gtrsim10^{14}$\,\mbox{cm$^{-2}$}\ at low redshift to be $2 /
9 = 22$ per cent (with a $95$ per cent confidence level lower limit of
$4$ per cent), rising to $33$ ($5$) per cent if we consider only the
sub-damped systems with \hbox{$N_\textsc{H\,i}$}\,$< 10^{20}$\,\mbox{cm$^{-2}$}. This is a
surprisingly large fraction given that sub-DLAs are often found to be
highly ionised absorbers with $\lesssim 10$ per cent of their hydrogen
in the form of \hbox{{\rm H}{\sc \,i}}.
If we think that the absorption cross section for H$_2$ is dominated
by cold gas associated with Local Group-type systems (the Magellanic
Stream for example), then this may be consistent with this high
incidence rate. \citet{Richter12} shows that one can explain 30-100
per cent of the observed incidence rate of systems with \hbox{$N_\textsc{H\,i}$}\,$>
10^{17.5}$\, \mbox{cm$^{-2}$}\ as intermediate- and high-velocity clouds
distributed around galaxies with \hbox{{\rm H}{\sc \,i}}\ masses between $10^{8.5}$ and
$10^{10}$ M$_{\sun}$ in a similar way as is seen around M31 and the
Milky Way. As discussed in the previous section, some HVCs also show
relatively high molecular fractions, and in terms of \hbox{$N_\textsc{H\,i}$}\ and
\hbox{$f_{\textrm{H}_2}$}\ HVCs are the local systems most analogous to the system
analysed in this paper.
It would be interesting to perform a systematic search for H$_2$ in
further $10^{19}$\,\mbox{cm$^{-2}$}\ $< $~\hbox{$N_\textsc{H\,i}$}~$ < 10^{20.3}$\,\mbox{cm$^{-2}$}\ sub-DLAs at
both high and low redshifts that have metal absorption consistent with
a cool, dusty environment. Sub-DLAs tend to have both higher
metallicities and larger velocity widths than DLAs, and H$_2$ is more
likely to be found in DLAs with both these characteristics
\citep{Noterdaeme08}.
\subsection{Evolution in $\bmath{\hbox{$f_{\textrm{H}_2}$}}$}
\begin{figure}
\begin{center}
\includegraphics[width=0.4\textwidth]{./fig_fz.pdf}
\caption{\label{f_fz} The molecular mass fraction as a function of
lookback time for sites where H$_2$ absorption has been
detected. The Local Group point is a median for all values from the
plane of the Milky Way \citep{Savage77}, the LMC and SMC
\citep{Welty12}. The error bars show the 10th and 90th percentile
level. There is no evidence for evolution in \hbox{$f_{\textrm{H}_2}$}, but the number
of measurements at $z > 0$ is small.}
\end{center}
\end{figure}
We plot the \hbox{$f_{\textrm{H}_2}$}\ values as a function of cosmic time in
Figure~\ref{f_fz}. There is no evidence for evolution in \hbox{$f_{\textrm{H}_2}$},
though more measurements are needed, particularly at intermediate
redshifts, given the large scatter in \hbox{$f_{\textrm{H}_2}$}\ seen along both local
sightlines in the Milky Way halo and in higher $z$ DLAs.
\section{Summary}
We have analysed a sub-damped \mbox{Ly$\alpha$}\ system with \hbox{$N_\textsc{H\,i}$} $=10^{19.5 \pm
0.2}$\,\mbox{cm$^{-2}$}\ at $z=0.56$ that shows associated molecular hydrogen
absorption in the Lyman and Werner bands. Using velocity components
determined from a high resolution spectrum covering metal transitions
falling in the optical, we fit a two-component model to the H$_2$
absorption and find a lower limit to the molecular fraction of
$\log_{10} \hbox{$f_{\textrm{H}_2}$} = -1.93 \pm 0.36$, and a lower limit independent of
the assumed velocity structure of \hbox{$f_{\textrm{H}_2}$}\,$>10^{-3}$. This is higher
than other sightlines with similar \hbox{$N_\textsc{H\,i}$}\ where H$_2$ has been measured
in the Milky Way halo. We find a metallicity for the cloud $\log_{10}
Z = -0.72\pm 0.32$, or $0.19^{+0.21}_{-0.10}$ solar. The dust-to-gas
ratio relative to the solar neighbourhood is $\log_{10} \kappa <
-0.44$, or $\kappa < 0.36$.
We modeled the observed transitions using \textsc{Cloudy} and were
unable to find a single solution that can simultaneously reproduce all
the observed transitions. However, a model for the absorber of a
$10^4$\,K cloud illuminated by a radiation field dominated by the UV
background can broadly reproduce all the observed column densities
apart from those of H$_2$, \hbox{{\rm C}{\sc \,i}}, and \hbox{{\rm O}{\sc \,vi}}. We conclude that there are
three phases in the absorber; a $T\sim 100$\,K phase where the
\hbox{{\rm C}{\sc \,i}}\ and H$_2$ arise, a $T\sim 10^4$\,K phase where the
low-ionisation metal absorption occurs, and a hotter, collisionally
ionised phase associated with \hbox{{\rm O}{\sc \,vi}}.
Using simple models of H$_2$ formation-dissociation equilibrium, we
calculate densities for the H$_2$ absorbing region from $\sim
1-4$\,\mbox{cm$^{-3}$}\ to $\sim 70-480$\,\mbox{cm$^{-3}$}, depending on the incident
strength of the radiation field. The lower density range corresponds
to cloud thicknesses of $\sim 3 - 10$\,pc, the high density range to
$\sim 0.002 - 0.15$\,pc. Given the \hbox{$N_\textsc{H\,i}$}, the presence of a three phase
medium, the molecular fraction, metallicity and two galaxy candidates
near the QSO sightline with impact parameters of $\sim 10$\,kpc, we
conclude this system may be a tidally stripped feature similar to the
Magellanic Stream.
Finally, we remark that of the seven sub-DLAs observed at $z < 0.7$
for which there is the possibility to detect \hbox{$N_{\textrm{H}_2}$}\,$\gtrsim
10^{14.5}$\,\mbox{cm$^{-2}$}, two H$_2$ detections were found. A survey for H$_2$
in low-redshift sub-damped systems could be a fruitful way to measure
the physical conditions giving rise to these absorbers.
\vspace{0.5cm}
We acknowledge helpful correspondence with Jason Tumlinson and thank
Andrew Fox, Joe Hennawi, Mark Krumholz, Kate Rubin, Karin Sandstrom
and Todd Tripp for illuminating conversations, and Dan Welty for
comments on an earlier version of this paper. Gabor Worseck kindly
helped us to obtain the HIRES spectrum of Q~0107$-$0232. We thank the
anonymous referee for comments that helped improve the paper.
Some of the data presented here were taken at the W.M. Keck
Observatory, which is operated as a scientific partnership among the
California Institute of Technology, the University of California and
the National Aeronautics and Space Administration. The Observatory was
made possible by the generous financial support of the W.M. Keck
Foundation. This analysis made use of observations from the NASA/ESA
Hubble Space Telescope, obtained at the Space Telescope Science
Institute, which is operated by the Association of Universities for
Research in Astronomy, Inc., under NASA contract NAS 5-26555 (program
11585) and of observations collected at the European Organisation for
Astronomical Research in the Southern Hemisphere, Chile (program
383.A-0402).
The authors wish to recognize and acknowledge the significant cultural
role and reverence that the summit of Mauna Kea has always had within
the indigenous Hawaiian community. We are most fortunate to have the
opportunity to conduct observations from this mountain.
N.T. acknowledges grant support by CONICYT, Chile (PFCHA/{\it
Doctorado al Extranjero 1$^{\rm a}$ Convocatoria}, 72090883). Most
of the programs particular to this analysis were written using the
NumPy and SciPy packages (\url{http://www.scipy.org}), and plots were
produced using Matplotlib
\citep[][\url{http://www.matplotlib.sourceforge.net}]{Hunter07}.
\footnotesize{
\bibliographystyle{./mn2e}
|
1,314,259,995,036 | arxiv | \section{Introduction}
The Galactic Globular Cluster (GGC) \hbox{$\omega$ Cen~} is a
fundamental laboratory to address several long-standing astrophysical
problems. It is the most massive GGC ($M=5\times10^6\, M_\odot$,
Meylan et al.\ 1995) and the one that most clearly shows a
well-defined spread in metallicity. According to recent estimates
based on sizable samples of evolved red giant and sub-giant stars, the
metallicity distribution shows three peaks around ${[\rm Fe/H}]=-1.7$,
$-1.5$, and $-1.2$ together with a tail of metal-rich stars
approaching ${[\rm Fe/H}]\approx-0.5$ (Norris et al.\ 1996; Hilker et
al.\ 2004; Pancino 2004). During the last few years it has also been
suggested that \hbox{$\omega$ Cen~} harbors multiple stellar
populations (Lee et al.\ 1999) characterized by different ages
(Ferraro et al.\ 2004; Hughes et al.\ 2004), helium abundances (Bedin
et al.\ 2004; Norris 2004), and distances (Bedin et al.\ 2004;
Freyhammer et al.\ 2004). From a kinematical point of view the
properties of \hbox{$\omega$ Cen~} appear well-established: it moves
along a high-eccentricity, retrograde orbit (Geyer et al.\ 1983), and
shows differential rotation (Merritt et al.\ 1997). The occurrence of
a tidal tail in \hbox{$\omega$ Cen~} was suggested by Leon et al.\
(2000) but questioned by Law et al.\ (2003) on the basis of 2MASS
data. This problem has not been settled yet, because recent detailed
N-body simulations of the tidal interaction between \hbox{$\omega$
Cen~} and the Galaxy do suggest the occurrence of extended tails
(Chiba \& Mizutani 2004). Current empirical and theoretical evidence
do not allow us to establish whether \hbox{$\omega$ Cen~} is the core
of a galaxy that was partially distrupted by the gravitational
interaction with the Galaxy (Lee et al.\ 1999; Pancino 2004) or the
aftermath of the merging of two GCs (Icke \& Alcaino 1988).
Even though \hbox{$\omega$ Cen~} presents several properties that need to be properly
understood, its stellar content is a gold mine to investigate some open
problems concerning the dependence on the metallicity. This applies not
only to evolved stars such as RR Lyrae, hot HB stars, and the tip of
the RG branch, but also to the different expected population(s) of
white dwarfs. The search for WDs in GGCs has been quite successful,
and several cooling sequences have already been identified
(Hansen et al.\ 2002; Moehler et al.\ 2004, and references therein).
The detection of WDs in \hbox{$\omega$ Cen~} dates back to Ortolani \&
Rosino (1987) and to Elson et al.\ (1995) on the basis of
ground-based and space (HST) data, respectively. In this paper,
we present preliminary results concerning the identification
of the WD cooling sequence in \hbox{$\omega$ Cen~} on the basis of
data collected with the Advanced Camera for Survey (ACS) on board
HST, and publicly available on the HST archive.
\section{Data Reduction and Color-Magnitude Diagrams}
Current data were collected with nine pointings of the ACS camera
across the center of the cluster. The $3\times3$ mosaic covers a field
of view of $\approx9\arcmin \times9\arcmin$. Four images per field
have been acquired in three different bands, namely F435W, F625W, and
F658N (hereinafter $B$, $R$, and $H_\alpha$). Three deep (340s) and
one shallow (8s, 12s) exposure were secured for the B and R-band,
respectively, while the exposure time for the four $H_\alpha$ images
was 440s. The nine fields were independently reduced with the
DAOPHOTII/ALLFRAME package (Stetson 1994). An individual PSF has been
extracted for each frame by adopting, on average, $\approx$200 bright
isolated stars. The individual catalogues were rescaled to a common
geometrical system with DAOMATCH/DAOMASTER. The final catalogue
includes approximately 1.2$\times 10^6$ stars. The photometric
calibration was performed in the Vega System
(http://www.stsci.edu/hst/acs/documents). The same data set was
adopted by Haggard et al.\ (2004) to identify the optical counterpart
to a quiescent neutron star originally detected on X-ray data
collected with Chandra.
\begin{figure}[!ht]
\vspace*{0.5truecm}
\centerline{\epsfxsize= 9.0 cm \epsfysize= 10.0 cm \epsfbox{bono_fig1.eps}}
\vspace*{-0.3truecm}
\caption{Color-Magnitude Diagram, $B,B-R$, based on data collected
with ACS@HST. The photometric catalogue was selected by assuming a
separation index $sep \ge 0.1$. Thick points display WDs with
photometric errors smaller than 0.2 mag. The solid line shows a WD
isochrone for t=14 Gyr and Z=0.0001. See text for more details.}
\end{figure}
Figure 1 shows the CMD of \hbox{$\omega$ Cen~} in the $B-R, B$ plane. Data plotted in
this figure have been selected by using the `separation index' {\em
sep} introduced by Stetson et al.\ (2003). We adopted this parameter
($sep \ge 0.1$) because crowding errors in the innermost regions of
GGCs dominate the photometric errors. The WDs candidates have been
selected among the objects with photometric errors in the three bands
smaller than 0.2 mag. We end up with a sample of half a million cluster
stars and roughly 600 WDs (thick points). To compare observed WDs with
theoretical predictions we adopted the recent WD theoretical models
computed by Prada Moroni \& Straniero (2002). Predicted luminosities
and effective temperatures have been transformed into the ACS bands by
adopting the pure H atmosphere models kindly provided by Bergeron
(private communication). The solid line plotted in Fig. 1 shows the WD
isochrone for t=14 Gyr and Z=0.0001. We adopted the same distance
modulus (DM=13.7) and cluster reddening (E(B-V)=0.12) adopted by
Freyhammer et al.\ (2004). The extinction in the ACS bands was
estimated using the Cardelli et al.\ (1989) relation. Theory and
observations appear to agree at fainter magnitudes and cooler
effective temperatures. However, we are faced with a substantial
discrepancy in the bright region. At present it is not clear whether
this mismatch is caused either by the assumed composition of the
atmosphere models, or by the inner structure of the brightest WD
models. More quantitative constraints do require a thorough
comparison between theory and observations.
To further investigate the evolutionary properties of cluster WDs we plotted
the same sample in the $H_\alpha-R, R$ plane (see Fig. 2).
\begin{figure}[!ht]
\vspace*{0.5truecm}
\centerline{\epsfxsize= 9.0 cm \epsfysize= 10.0 cm \epsfbox{bono_fig2.eps}}
\vspace*{-0.3truecm}
\caption{Same as Fig. 1, but for $H_\alpha$ and R-band photometry.}
\end{figure}
This plane is generally adopted to detect stars with strong $H_\alpha$
emission, namely Cataclysmic Variables (CVs) or BY Draconis stars.
The former group is characterized by flicker variations, while
the latter group contains chromospherically active Main Sequence
stars (dK,dM) with variability of the order of days caused by
fast rotation. Recent photometric (Cool et al.\ 1998) and
spectroscopic (Edmonds et al.\ 1999) measurements of a few blue stars
in the GGC NGC6397 suggest that He WDs also show $H_\alpha$ emission,
but they lack the flickering variations. Needless to say, that He WDs
are excellent tracers of the dynamical properties of GGCs, since they
are the aftermath of compact binary evolution (Taylor et al.\ 2001).
Data plotted in Fig. 2 show that the WD sequence detected in the
$B-R, B$ plane shows up also in this plane. The broadening in color when
moving from $R\approx 22$ to $R\approx 24.5$ is mainly caused by
photometric errors.
\begin{figure}[!ht]
\centerline{\epsfxsize= 10.5 cm \epsfbox{bono_fig3.ps}}
\vspace*{-0.3truecm}
\caption{Intrinsic photometric error in $R$ (thin points) and $H_{\alpha}$
(thick points) bands. Open circles mark WDs with $H_{\alpha}-R\le -0.2$, and
$|sharp|\le0.1$.}
\end{figure}
However, data plotted in this plane show quite clearly that a small sample
of WDs are $H_\alpha$-bright. The identification of these objects
is quite easy, since they show R magnitudes of the order of $R\approx 24-25$
but they appear systematically bluer than typical WDs. It is noteworthy
that the identification of $H_\alpha$-bright WDs becomes
more robust when moving toward bluer colors. In order to supply a more
quantitative estimate of the photometric errors affecting current mean
magnitudes, Fig. 3 shows the standard deviations for the R (thin points)
and the $H_\alpha$ (thick points) mean magnitudes. To further improve the photometric
characterization of $H_\alpha$-bright WDs we selected (open circles) the WDs
with $H_\alpha-R \le -0.2$ and $|sharpness|\le 0.1$ (this parameter estimates
the sharpness of the detected object). We end up with a sample of $\sim 70$
candidates. Data plotted in Fig. 3 show that typical errors for these objects
range from less than 0.1 to $\sim 0.2$ mag when approaching the faintest
limiting magnitude.
\begin{figure}[!ht]
\vspace*{0.5truecm}
\centerline{\epsfxsize= 9.0 cm \epsfysize= 10.0 cm \epsfbox{bono_fig4.eps}}
\vspace*{-0.3truecm}
\caption{Same as Fig. 1, but for $B,B-H_{\alpha}$ photometry.
Open circles mark WDs with $R-H_{\alpha}\ge 0.2$, and $|sharpness|\le0.1$.}
\end{figure}
Finally, we plotted the entire sample of WDs in the $B-H_{\alpha}, B$ plane.
Once again we found that a small sample appears to be
systematically brighter in $H_{\alpha}$ (open circles), since they are
redder than the bulk of the WDs and of the WD isochrone (solid line).
Current data indicate that a small sample of bonafide WDs are
$H_{\alpha}$-bright. These objects do not appear to be located at
a fixed color range in the $B-R, B$ plane. This evidence suggests that
this phenomenon is not correlated with the occurrence of a circumstellar
envelope caused by pulsation properties of DA and DB pulsating WDs
(Nitta et al.\ this conference).
Moreover, if independent photometric or spectroscopic measurements
confirm that $H_{\alpha}$-bright WDs are truly He WDs, the size of the
sample would pose a new puzzle concerning their origin. According to
recent N-body simulations the occurrence of compact binaries is
tightly connected with the dynamical evolution of the cluster and they
should peak in post-core collapsed clusters, such as NGC6397. However,
\hbox{$\omega$ Cen~} presents a low central density.
\acknowledgements{This work was partially supported by MIUR-COFIN~2003 under
the project "Continuity and Discontinuity in the Galaxy Formation".}
|
1,314,259,995,037 | arxiv | \section{Introduction}
\IEEEPARstart{M}{ost} of the existing distributed storage network models assume a very simple structure that the network itself is viewed as a collection of "identical" storage nodes and that the transmission cost between any two nodes are identical \cite{NCDSS,DScodeRepairTransfer,ExactMDScodeIA,LocalReg,LRCDimakis}. However, this model cannot perfectly represent the real world storage networks. In reality, a typical data centre can easily house hundreds of racks each of which contains numerous storage disks \cite{DSNArchProtManag}. While all storage nodes (or disks here) in the network can communicate with each other, the transmission costs in terms of latency or overheads can differ vastly. For example, the transmission latency between storage disks in the same rack is usually much smaller, when compared with the case that both disks are not in the same rack \cite{DiskLocal}.
Recently, heterogeneous distributed storage networks (including the multi-rack models) has received a fair amount of attention \cite{RackModelDSN,Gaston2013A-Realistic,NonHomoRack,NonHomogenDSN,CostBandwith} due to the heterogeneous nature of the practical storage networks and their various applications such as hybrid storage systems \cite{HybridOceanstore}, video-on-demand systems \cite{p2pvod}, and heterogeneous wireless networks \cite{WirelessD2D}. A heterogeneous model for distributed storage networks is introduced in \cite{CostBandwith} where a static classification of the storage nodes is proposed. In this model the storage nodes are partitioned into two groups with "cheap" and "expensive" bandwidth. In other word, the data download cost, to repair a failure, from the nodes in the "cheap bandwidth" group will be lower than the data download cost from the "expensive bandwidth" group. The model in \cite{CostBandwith} partially addressed the issues that the communication costs among nodes are not all equal where the download cost from one of the groups is always cheaper than the other group. However, this model does not fit well into a multi-rack model, where the transmission cost should depend on both where the transmitting and the receiving (or the failed) storage nodes are located.
A more realistic rack model of a distributed storage network has been investigated in \cite{Gaston2013A-Realistic}, in which the authors considered a two-rack model. In their model, the communication cost between the nodes in the same rack is smaller than between two different racks. Therefore, The main difference of this model compared to the one in \cite{CostBandwith} is that the classification of the storage nodes depends on the location of the failed node. More precisely, the data download cost from the nodes in the same rack (i.e., group) of the failure is lower (i.e., cheap bandwidth) than the download cost from the other group (i.e., expensive bandwidth). As such, it is desirable that more data should be transmitted by nodes in the same rack of the failed node during the repair process. Using information flow graph, \cite{Gaston2013A-Realistic} identifies if certain choice of parameters are achievable or not, leading to the characterisation of the tradeoff between storage cost and repair cost. In \cite{CostBandwith} and \cite{Gaston2013A-Realistic}, information flow graph is employed to characterise the fundamental tradeoff between various system parameters such as storage cost and repair cost \cite{NCDSS}. The tradeoff is asymptotic (without restriction on the size of the symbol alphabet) and functional repair is always assumed (i.e., the failed node is not required to recover exactly what it previously stored, as long as the whole storage system is still robust after repair).
However, no general coding scheme is suggested for these rack model storage networks. It is worth to mention that any storage code, specially the locally repairable codes (e.g., pyramid codes \cite{Pyramid}), designed for generic model of distribute storage networks can also be used for the rack model networks. However, these coding schemes won't be able to provide optimal methods to repair the failures in the network since they do not take into consideration the different transmission cost between the nodes in the multi-rack storage networks.
A non-homogenous storage system is considered in \cite{NonHomogenDSN} where there exists a super node in the network with higher storage capacity, reliability and availability probability than the other nodes. It has been shown that this model can achieve the optimal bandwidth-storage trade-off bound in \cite{NCDSS} with a smaller file and alphabet size than the traditional homogeneous storage network in \cite{RepairHadamard}.
Data retrieval problem in a heterogeneous storage system is studied in \cite{DSAllocate}. In this model it is assumed that each node has a different storage size where any amount of encoded data can be stored in each node such that the total allocated storage remains less than a threshold. The optimal allocation to retrieve the original data is studied such that the data collector can access to only a random group of nodes. A combination of repair problem with data allocation is investigated in \cite{RepairAlloc1} and \cite{RepairAlloc}. In these works a general model of a heterogeneous storage network is considered where each node has a different storage and download cost. The amount of data allocated to each storage node and the amount of data to be downloaded from each survived node to repair a failure has been investigated using the information flow graph to minimise the storage and repair cost and establishing a storage-repair trade-off.
The capacity of heterogenous storage networks is studied in \cite{CapacityHeterogeneous}. The proposed network in this work consists of storage nodes with different storage capacities and repair bandwidths. It is assumed that the repair bandwidth of each node depends on the repair group that the helper nodes belong to. The functional repair of node failures is assumed and the capacity of this network as the maximum amount of stored information in order to reach a level of reliability is studied.
A common approach of storing data in multi-rack storage networks is storing each encoded symbol of a data block in distinct nodes located in distinct racks \cite{WinAzur,XorElephant}. Consequently, repairing any node failure requires transferring data from survived nodes across the racks. Due to the high across rack data transferring during the repair process, this approach could be highly costly \cite{NetCharDataCener}. A regenerating code \cite{NCDSS} is proposed in \cite{HierarchicalDataCenter} in order to minimise the across rack repair bandwidth. In this coding scheme, each rack stores multiple encoded symbols (rather than one symbol) of a data block in distinct storage nodes. To repair a failed node, first a regeneration will be occurred within each rack and then the regenerated data from each rack will be transferred to the failed rack to regenerate the failed node.
\begin{figure}[t]
\centering
\includegraphics[width=.4\textwidth]{Figs/Multi-Rack}
\caption{A rack model for distributed storage network}
\label{fig:rack}
\end{figure}
In this paper we introduce a realistic multi-rack storage network which represents the real data storage networks more generally and practically. We focus on a storage code-design framework, specifically tailored for multi-rack data storage networks. Our storage network model depicted in Figure \ref{fig:rack} consists of $M$ racks each of which contains $N$ storage nodes. We will assume that each rack has a "Processing Unit (PU)" which is directly connected to all storage nodes in the same rack. The rack processing unit is responsible for both computation on the stored data and communication between the nodes in the rack. Moreover, the processing units of racks can communicate to each other in order to transmit data from one rack to another. In other words, storage nodes in two different racks can only communicate via their respective processing units. It is very common in realistic systems that the communication cost between the storage nodes within a rack (via its PU) is much lower than the communication cost between two different processing units (and hence located in two different racks) \cite{DiskLocal}. Therefore, it is desirable and in fact critical that a failed node could be repaired by only the survived nodes within the same rack in order to keep the repair cost low. We further assume that the system bottleneck is at the PU of each rack. Therefore, in our code-design framework, the focus is to design distributed storage codes that minimise the communication costs between nodes and the processing unit of the rack. It is only for some occasional severe failure patterns that it will require nodes from other racks to assist in the repair process.
The main contributions of this paper are:
\begin{itemize}
\item
A code-design framework for multi-rack storage networks: our proposed scheme is able to locally repair any node failure within the rack in order to minimise the repair cost. Moreover, in the case of sever failure patterns that the failures cannot be repaired only by the survived nodes in the rack, our scheme is able to participate the other racks in the network in helping of the failures repair.
\item
Establishing linear programming bounds on the code size: we show that maximising the rate of the multi-rack storage code is equivalent to maximising the code size. We establish a linear programming problem on the code size based on the definition and criteria of our multi-rack storage code.
\end{itemize}
This paper is extended from our earlier work on multi-rack distributed storage codes \cite{RMSC} which is presented in IEEE Information Theory workshop (ITW 2014). The rest of this paper is organised as follows. In Section \ref{Sec:IV} we present the code-design framework for multi-rack storage networks and give a detailed description of its criteria and the failures repair processes. We also derive the code rate in this section. Then, In Section \ref{Sec:V}, we establish a linear programming problem to upper bound the code size. Moreover, in this section, we exploit symmetry in our code in order to reduce the complexity of the problem. The paper is concluded in Section \ref{Sec:Conc}.
\section{Multi-rack Storage Code -- Design Framework} \label{Sec:IV}
Consider the rack model storage network depicted in Figure \ref{fig:rack}. This multi-rack data storage network consists of $M$ racks each of which contains $N$ storage nodes (or storage disks). We will represent each node as
\[
\left(X_{m,n}, ~ \forall m \in \mathcal M \text{ and } \forall n \in {\cal N} \right)
\]
where $\mathcal M=\{1, \ldots, M\}$, ${\cal N}=\{1, \ldots, N\}$, and $X_{m,n}$ is referred to the $n$th node in the $m$th rack. Abusing notations, $X_{m,n}$ will also be referred to the content stored at that particular storage node. We define
\[
X_{m,*} \triangleq [X_{m,1} , \ldots, X_{m,N}],
\]
whose entries are from $\mathbb{F}_q$. Particularly, $X_{m,*}$ is the vector of encoded data stored in the rack $m$.
Collecting all the stored contents from each rack, we have
\begin{align} \label{eq:codewordmatrix}
{X} \triangleq
\left[
\begin{array}{ccc}
X_{1,1} & \cdots & X_{1,N} \\
\vdots & \ddots & \vdots \\
X_{M,1} & \cdots & X_{M,N}
\end{array}
\right].
\end{align}
In this paper, we assume that each rack has a processing unit, which is responsible for all computations required in nodes repair. In other words, contents stored in a failed node will be regenerated in the processing unit, before sending all the regenerated content to the failed node (or its replica).
\begin{definition}[Multi-rack storage codes] \label{def:multi-rack parities}
A {{\bf f}{\emph{multi-rack storage code}}} is defined by three parity check matrices $({{\bf f} H} , {{\bf f} K}, {{\bf f} G})$ over $\mathbb{F}_q$ of respectively sizes $S_{1} \times N$, $S_{2} \times N$ and $L \times M$. The three matrices induce a storage code such that ${X}$ must satisfy the following parity-check equations
\begin{align}
{{\bf f} H} {X}^{\top} & = {{\bf f} 0} \label{eq:IRP}\\
{{\bf f} K} {X}^{\top} {{\bf f} G}^{\top} & = {{\bf f} 0}.\label{eq:ARP}
\end{align}
We will call ${{\bf f} H}, {{\bf f} K}$ respectively the intra-rack and inter-rack parity matrices. The matrix ${{\bf f} G}$ will be called helper-rack parity check matrix.
\end{definition}
In multi-rack storage code, it is expected that most of the node failures should be recovered and repaired locally within their own racks. However, in the special case where local repair is not possible, redundancies added among rack will be used in the recovery. As there is a much lower probability that nodes in a rack cannot be recovered locally within the rack, this paper focuses on the special case where only one rack has node failures (or that all failed nodes in other racks can be completely repaired locally).
We now consider the first case where failures in a rack can be repaired by using only nodes within the rack.
\subsection{{Intra-Rack Repair}}
In this subsection, we will describe how to repair nodes locally within a rack. Assume without loss of generality that rack 1 fails (i.e., a group of nodes fails inside the rack). Let ${pat}$ be the index set for the nodes in rack 1 that fail. In other words, the values of $\{X_{1, n} , n\in{pat}\}$ (i.e., the node content) are unknown to the processing unit in rack 1.
Let
\[
{{{\bf f} x}} =
\left[
\begin{array}{c}
x_{1} \\ \vdots \\ x_{N}
\end{array}
\right]
\]
where
$$
x_{n} =
\begin{cases}
X_{1,n} & \text{ if } n \not \in {pat} \\
0 & \text{ otherwise.}
\end{cases}
$$
In other words, ${{{\bf f} x}}$ is obtained from $X_{1,*}^{\top}$ by replacing $X_{1, n} $ with $0$ for all $n\in{pat} $.
Define ${EM}^{\beta}_{N}$ as an $N\times N$ diagonal matrix such that its $(n,n)^{th}$ entry is 1 if $n \in\beta$ and is 0 otherwise. For simplicity, we will drop the subscript $N$ if it is understood from the context. Let $\bar{pat}$ be the complement set of ${pat}$. Therefore, $\bar{pat}$ will be the set of survived nodes in the rack $1$. Consequently,
$
{EM}^{\bar{pat}} X_{1,*}^{\top} = {{{\bf f} x}}
$.
Recall that
$
{{\bf f} H} X_{1,*}^{\top} = {{\bf f} 0}.
$
Therefore, rack 1 can repair ALL its failed nodes by the local rack survived nodes if and only if the following system of linear equations
\begin{align} \label{eq:21}
\begin{cases}
{EM}^{\bar{pat}} X_{1,*}^{\top} = {{{\bf f} x}} \\
{{\bf f} H} X_{1,*}^{\top} = {{\bf f} 0}
\end{cases}
\end{align}
has a unique solution.
For notation simplicity, we will use $\langle {EM}^\beta,{{\bf f} H} \rangle$ to denote the vector space spanned by rows of ${EM}^\beta$ and ${{\bf f} H}$. The set of linear equations in \eqref{eq:21} has a unique solution if and only if
\begin{align}\label{eq:22}
\dim \langle {EM}^{\bar{pat}} , {{\bf f} H} \rangle = N.
\end{align}
\def{\text{Dist}}{{\text{Dist}}}
Let ${pat}_{o}$ be the smallest set such that
$
\dim \langle {EM}^{\bar{{pat}_{o}}} , {{\bf f} H} \rangle < N
$. We will denote its size $|{pat}_{o}|$ as $ {\text{Dist}}({{\bf f} H}) $.
By definition, if $|{pat}| < {\text{Dist}}({{\bf f} H}) $, then it is sufficient to use intra-rack repair
to repair all failed nodes.
\begin{remark} It is well known that
$ {\text{Dist}}({{\bf f} H}) $ is equal to the minimum distance of a linear code defined by the parity check matrix ${{\bf f} H}$.
\end{remark}
\begin{definition}[support] \label{def:support}
The support $\lambda(\mathbf{v})$ of a vector $\mathbf{v} = \left[v_1, v_2, \ldots, v_N\right]$ is a subset of $\{1, 2, \ldots, N\}$ such that $i \in \lambda({\bf f}{v})$ if and only if $v_i \neq 0$, $\forall i \in \{1, 2, \ldots, N\}$.
\end{definition}
\begin{definition} \label{def:RepairGroup}
Consider any matrix ${{\bf f} H}$ and vector ${{\bf f} r}$ (such that both have $N$ columns).
For any $j = 1, \ldots, N$, let
\[
\Omega({{\bf f} H},{{\bf f} r},j) = \left\{\lambda(\mathbf{h}) \setminus j: \mathbf{h} \in \langle {{\bf f} H} , {{\bf f} r}\rangle \textnormal{ and } j \in \lambda(\mathbf{h}) \right\}.
\]
If ${{\bf f} r}$ is the zero vector, we will simply denote $\Omega({{\bf f} H},{{\bf f} r},j)$ by $\Omega({{\bf f} H},j)$.
\end{definition}
\begin{remark}
As we shall see, $\Omega({{\bf f} H},{{\bf f} r},j)$ plays a fundamental role in determining whether failures in a rack can be repaired or not. Specifically, $\Omega({{\bf f} H},j)$ contains all intra-rack repair groups for $X_{1,j} $. If there exists a set (or group) $\beta \in \Omega({{\bf f} H},j)$ such that all $X_{1,\ell} $ are survived for all $\ell \in \beta $, then the failed node $X_{1,j} $ can be repaired by
using only $X_{1,\ell} $ for all $\ell \in \beta $. The general case where ${{\bf f} r}$ is non-zero vector will be used in the inter-rack repair and will be explained soon.
\end{remark}
\begin{example}
Suppose ${{\bf f} H}$ is the intra-rack parity check matrix and is given by
\begin{equation} \label{eq:IRPM4}
{{\bf f} H}=
\left[
\begin{array}{cccccccc}
1 & 1 & 1 & 0 & 1 & 0 & 0 & 0 \\
1 & 1 & 0 & 1 & 0 & 1 & 0 & 0 \\
0 & 1 & 1 & 1 & 0 & 0 & 1 & 0 \\
1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 \\
\end{array}
\right].
\end{equation}
From Definition \ref{def:RepairGroup}, $\Omega({{\bf f} H},1)$ will be given by
\begin{multline*}
\Omega({{\bf f} H},1) = \Big\{\{2,3,5\},\{2,4,6\},\{3,4,8\},\{2,7,8\},\{3,6,7\},\\
\{4,5,7\},\{5,6,8\},\{2,3,4,5,6,7,8\}\Big\},
\end{multline*}
where the entries are the index set of a group of nodes in each rack. Each subset in $\Omega({{\bf f} H},1)$ denotes a intra-rack repair group for repairing $X_{1,1}$ (or $X_{m,1}$ in general).
\end{example}
\begin{lemma}\label{lem:lemma1_4}
If $\beta \in \Omega({{\bf f} H},{{\bf f} r},j)$, then there exists vectors ${{\bf f} y} $, ${{\bf f} y}' $ and $a\in \mathbb{F}_q$ such that
\begin{align}\label{eqlemma4}
{{\bf f} e}_{j} = {{\bf f} y} {{\bf f} H} + a {{\bf f} r} + {{\bf f} y}' {EM}^{\beta}
\end{align}
where
${{\bf f} e}_{j}=[e_{j,1} , \ldots, e_{j,N}]$ is a length $N$ row vector such that
\begin{align}
e_{j,\ell} =
\begin{cases}
1 & \text{ if } \ell = j \\
0 & \text{ otherwise.}
\end{cases}
\end{align}
\end{lemma}
\begin{proof}
Since $\beta \in \Omega({{\bf f} H},{{\bf f} r},j)$, then there exists ${{\bf f} u}= [u_1,\ldots,u_N]$ such that
1) $ {{\bf f} u} = {{\bf f} y}{{\bf f} H} + a{{\bf f} r} $ for some vector ${{\bf f} y}$ and $a \in \mathbb{F}_q$,
2) $u_{j} = 1 $ and 3) $\lambda({{\bf f} u}) \setminus \{j\} = \beta$.
Let ${{\bf f} v} = - {{\bf f} u} {EM}^{\beta} $.
Since $\lambda({{\bf f} u}) \setminus \{j\} = \beta$,
$
{{\bf f} v} = - {{\bf f} u} + {{\bf f} e}_{j}
$.
Hence,
\begin{align*}
{{\bf f} e}_{j} & = {{\bf f} v} + {{\bf f} u} \\
& = {{\bf f} y}{{\bf f} H} + a{{\bf f} r} + {{\bf f} v} \\
& = {{\bf f} y}{{\bf f} H} + a{{\bf f} r} - {{\bf f} u}{EM}^{\beta}.
\end{align*}
The lemma thus follows by letting ${{\bf f} y}' = - {{\bf f} u}$.
\end{proof}
\begin{remark}
Conversely, if there exists vectors ${{\bf f} y} $, ${{\bf f} y}' $ and $a\in \mathbb{F}_q$ such that \eqref{eqlemma4} holds, then it can be proved easily that there exists $\delta \subseteq \beta $ such that $\delta \in \Omega({{\bf f} H},{{\bf f} r},j)$. The details of the proof will be omitted.
\end{remark}
Based on Lemma \ref{lem:lemma1_4}, the following theorem specifies conditions for intra-rack repair.
\begin{theorem}[Intra-rack Repair]\label{thm1}
Suppose node $j$ fails in rack $m=1$. Let
${pat}_{j}$ be the index set for all failed nodes\footnote{
${pat}_{j}$ can be interpreted as the set of failed nodes at the moment when
the node $j$ is repaired.} (hence, $j \in {pat}_{j}$).
If $\beta_{j} \subseteq \{1, \ldots, N\}$ satisfies the following two criteria,
\begin{enumerate}
\item \label{item:IRRP_1}
$\beta_{j} \in \Omega({{\bf f} H} , j)$, and
\item $\beta_{j} \cap {pat}_{j} = \emptyset$,
\end{enumerate}
then there exists $c_{j,n}$ for $n\in \beta_{j}$ such that
\begin{align}\label{eq:thm1}
X_{1,j} = \sum_{n\in\beta_{j}} c_{j,n} X_{1,n}.
\end{align}
\end{theorem}
\begin{proof}
By Lemma \ref{lem:lemma1_4} and criterion \ref{item:IRRP_1}, there exists
${{\bf f} y} $ and ${{\bf f} y}' $ such that
\begin{align} \label{eq:evector}
{{\bf f} e}_{j} = {{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}}.
\end{align}
Hence,
\begin{align}
{{\bf f} e}_{j} X_{1,*}^{\top} & = ({{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}}) X_{1,*}^{\top} \\
& = {{\bf f} y} {{\bf f} H} X_{1,*}^{\top}+ {{\bf f} y}' {EM}^{\beta_{j}} X_{1,*}^{\top} \\
& = {{\bf f} y}' {EM}^{\beta_{j}} X_{1,*}^{\top},
\end{align}
where the last equality follows from \eqref{eq:IRP}.
Finally, let
\begin{align} \label{eq:CoefVec}
[c_{j,1} , \ldots, c_{j,N}] = {{\bf f} y}' {EM}^{\beta_{j}} .
\end{align}
As the columns of ${EM}^{\beta_{j}}$ indexed by $\bar\beta_{j}$ are zero, $c_{j,n} = 0$ if $n \not\in \beta_{j}$. Therefore, we prove the theorem.
\end{proof}
Equation \eqref{eq:thm1} essentially defines how to regenerate the content of a failed node $X_{1,j}$ from $X_{1,n}$ for $n \in \beta_{j}$ (i.e., the nodes in its repair group). In other words, node $X_{1,j}$ is a linear combination of the nodes in its repair group where the coefficients are $c_{j,n}$. In this case, $|\beta_{j}|$ symbols are transmitted to the processing unit in rack 1, which can then repair the failed node $X_{1,j}$ by \eqref{eq:thm1}. Clearly, the choice of $\beta_{j}$ will affect the repair cost. It is always desirable to pick $\beta_{j}$ such that its size is as small as possible.
\begin{example}
Let ${{\bf f} H}$ be the intra-rack parity check matrix over $\mathbb{F}_3$ such that
\[
{{\bf f} H} =
\left[
\begin{array}{cccc}
0 & 1 & 1 & 1 \\
1 & 0 & 1 & 2
\end{array}
\right].
\]
Assume nodes $X_{1,1}$ and $X_{1,2}$ are failed. Thus, the failure pattern will be ${pat}=\{1,2\}$. Suppose we want to repair node $X_{1,1}$. The repair groups of node $j=1$ is given by
\[
\Omega({{\bf f} H},1)=\Big\{\{3,4\},\{2,3\},\{2,4\}\Big\}.
\]
A repair group $\beta_1 \in \Omega({{\bf f} H},1)$ is eligible for intra-rack repair process such that $\beta_1 \cap {pat}=\emptyset$. Therefore, $\beta_1=\{3,4\}$. Moreover, we choose ${{\bf f} y}=[0~1]$. Then, $\mathbf{u}={{\bf f} y} {{\bf f} H} = [1~0~1~2]$ and ${{\bf f} y}'=-\mathbf{u}=[-1~0~-1~-2]$ satisfying the conditions in Lemma \ref{lem:lemma1_4}. Therefore, the repair coefficients vector in \eqref{eq:CoefVec} is given by
\[
{{\bf f} y}' {EM}^{\beta_{1}} = [0~~0~-1~-2].
\]
Consequently,
\[
X_{1,1} = - X_{1,3} - 2X_{1,4}.
\]
The remaining failure $X_{1,2}$ can also be repaired by the same procedure.
\end{example}
\subsection{Inter-Rack Repair}
Communications across racks in a multi-rack storage network are in general more expensive. Consider the extreme case where each rack physically represents a data center, each of which is geographically distant from each other. In this case, data transmission across long distance is clearly more expensive than transmission within each rack. Therefore, it is often desirable to design codes such that more repairs can be done locally within racks.
However, in some rare cases (e.g., burst failure within a rack), nodes failure cannot be repaired locally. For example, this may occur when node $X_{m,j}$ fails and for all
$\beta \in \Omega({{\bf f} H} , j)$, there is at least another node $X_{m,k}$ for $k\in\beta$ which also fails.
When intra-rack repair fails, inter-rack repair can be done. The idea is described below.
Let
$
\mathbf{h} = (h_{1}, \ldots, h_{N})
$,
$
\mathbf{k} = (k_{1}, \ldots, k_{N})
$
and
$
\mathbf{g}=(g_{1}, \ldots, g_{M})
$
be respectively vector spanned by the rows of the matrices ${{\bf f} H}$, ${{\bf f} K}$ and ${{\bf f} G}$. Then, it can be verified directly from \eqref{eq:IRP} and \eqref{eq:ARP} that
\begin{align}
\mathbf{h} X_{m,*}^{\top} &= 0 \\
\mathbf{k} X_{m,*}^{\top} & = -g_{m}^{-1} \sum_{i\in\tau\setminus\{ m\}} \mathbf{k} \:g_{i}X_{i,*}^{\top}
\end{align}
where $\tau = \{i \in \mathcal M:\: g_{i} \neq 0 \}$ and is assumed to contain $m$.
Suppose
$\beta = \lambda(\mathbf{h} + \mathbf{k})$
and $j \in\beta$. Then
\begin{align}
(h_{j} +k_{j}) X_{m,j}
= - \sum_{i\in \beta \setminus \{j\}} (h_{i}+k_{i}) X_{m,i} -g_{m}^{-1} \sum_{i\in \tau\setminus \{m\}} \mathbf{k} g_{i}X_{i,*}^{\top} \label{eq7}
\end{align}
can be used to recover $X_{m,j}$. Equation \eqref{eq7} consequently defines the across rack repairs. To be more precise, In order to repair the failed node $X_{m,j}$, one would need 1) code symbols $X_{m,i}$ from the failing rack $m$ for $i \in \beta \setminus \{j\}$, and 2) code symbols $X_{i,\ell}$ from rack $i$ for $i\in \tau\setminus \{m\}$ and $\ell \in \{i \in {\cal N}:\: k_{i} \neq 0 \}$. In other words, to repair a failed node $X_{m,j}$ a group of helper racks $\tau$ are identified by parity matrix ${{\bf f} G}$. Also, a group of helper nodes in each helper rack is identified by parity matrix ${{\bf f} K}$. The helper nodes in each helper rack will send their content to the rack process unit. Each helper rack process unit calculates a linear combination of the helper nodes content and send it to the process unit of the failed rack $m$. The process unit of the failed rack $m$ calculates the sum of this information received from helper racks. A group of survived nodes from the failed rack which are specified by ${{\bf f} H}$ and ${{\bf f} K}$ send their content to the rack process unit. The process unit then calculates a linear combination of the information from these nodes and adds it to the information from the helper racks. This results in the information content of the failed node $X_{m,j}$.
\begin{theorem}\label{thm:2}
Suppose node $j$ fails in rack $m=1$. Let
${pat}_{j}$ be the index set for all failed nodes (hence, $j \in {pat}_{j}$).
If $(\beta_{j} , \mu_{j} , {{\bf f} r}_{j}, \tau )$ satisfies the following criteria,
\begin{enumerate}
\item \label{item:ARRPcr1}
${{\bf f} r}_{j} \in \langle {{\bf f} K}\rangle$
\item \label{item:ARRPcr2}
$\mu_{j} = \lambda({{\bf f} r}_{j})$ (i.e., $\mu_{j} = \left\{ n \in \{1, \ldots, N \}: r_{j,n} \neq 0 \right\}$)
\item \label{item:ARRPcr3}
$\beta_{j} \in \Omega({{\bf f} H}, {{\bf f} r}_{j} , j)$, and
\item \label{item:ARRPcr4}
$\beta_{j} \cap {pat}_{j} = \emptyset$,
\item \label{item:ARRPcr5}
$\tau \subseteq \{1, \ldots, M \} \in \Omega({{\bf f} G}, 1)$
\end{enumerate}
then there exists $c_{j,n}$ for $n\in \beta_{j}$ and $d_{j,m,s}$ for $m\in\tau, s\in\mu_{j}$ such that
\begin{align}\label{eq:thm2}
X_{1,j} = \sum_{m\in\tau} \left(
\sum_{s\in \mu_{j}} d_{j,m,s} X_{m,s}
\right)
+ \sum_{n \in \beta_{j}} c_{j,n} X_{1,n}.
\end{align}
\end{theorem}
\begin{proof}[Proof of Theorem \ref{thm:2}]
By Criterion 1), there exists a row vector ${{\bf f} u}$ of length $S_{2}$ such that
\begin{align} \label{eq:Kspan}
{{\bf f} r}_{j} = {{\bf f} u} {{\bf f} K}.
\end{align}
In other words, ${{\bf f} r}_j$ is a vector from row space of ${{\bf f} K}$. From Lemma \ref{lem:lemma1_4}, as $\beta_{j} \in \Omega({{\bf f} H}, {{\bf f} r}_{j} , j)$ by Criterion 3, there exist row vectors ${{\bf f} y}$, ${{\bf f} y}'$ and $a\in \mathbb{F}_q$ such that
\begin{align} \label{eq:eVector}
{{\bf f} e}_{j} = {{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}} + a {{\bf f} r}_{j}.
\end{align}
Now, notice
\begin{align}
X_{1,j} & = {{\bf f} e}_{j} X_{1,*}^{\top} \\
& = ({{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}} + a {{\bf f} r}_{j}) X_{1,*}^{\top} \\
& = {{\bf f} y}' {EM}^{\beta_{j}} X_{1,*}^{\top} + a {{\bf f} r}_{j} X_{1,*}^{\top}
\end{align}
where the last equality follows from \eqref{eq:IRP}. Let
\begin{align} \label{eq:cVector}
[c_{j,1} , \ldots, c_{j,N} ] = {{\bf f} y}' {EM}^{\beta_{j}}.
\end{align}
Then
\[
{{\bf f} y}' {EM}^{\beta_{j}} X_{1,*}^{\top} =
\sum_{n\in\beta_{j}} c_{j,n} X_{1,n}.
\]
Consequently,
\begin{align}
X_{1,j} & = \sum_{n\in\beta_{j}} c_{j,n} X_{1,n} + a {{\bf f} r}_{j} X_{1,*}^{\top}.
\end{align}
Let ${{\bf f} f}_{1} =[f_{1} , \ldots, f_{M}]$ be a length $M$ row vector such that
\begin{align} \label{eq:fVector}
f_{\ell} =
\begin{cases}
1 & \text{ if } \ell = 1 \\
0 & \text{ otherwise.}
\end{cases}
\end{align}
Then
\[
X_{1,*} = {{\bf f} f} {X}
\]
and hence
\begin{align}
a {{\bf f} r}_{j} X_{1,*}^{\top} & = a X_{1,*} {{\bf f} r}_{j}^{\top} \\
& = a {{\bf f} f} {X} {{\bf f} r}_{j}^{\top}.
\end{align}
From Lemma \ref{lem:lemma1_4}, since $\tau \in \Omega({{\bf f} G}, 1)$, there exist vectors ${{\bf f} z}$ and ${{\bf f} z}'$ such that
\begin{align} \label{eq:fVector2}
{{\bf f} f}_{1} = {{\bf f} z} {{\bf f} G} + {{\bf f} z}' {EM}^{\tau} .
\end{align}
Now, notice that ${EM}^{\tau}$ is a $M\times M$ matrix over $\mathbb{F}_q$, as ${{\bf f} G}$ has only $M$ columns.
Consequently,
\begin{align}
a {{\bf f} r}_{j} X_{1,*}^{\top} & = a ( {{\bf f} z} {{\bf f} G} + {{\bf f} z}' {EM}^{\tau} ){X} {{\bf f} r}_{j}^{\top} \\
& = a {{\bf f} z}' {EM}^{\tau} {X} {{\bf f} r}_{j}^{\top}
\end{align}
where the last equality follows from that
\begin{align}
{{\bf f} z} {{\bf f} G}{X} {{\bf f} r}_{j}^{\top} & = {{\bf f} z} {{\bf f} G} {X} {{\bf f} K}^{\top} {{\bf f} u}^{\top} \\
& = {{\bf f} z} \left( {{\bf f} G} {X} {{\bf f} K}^{\top} \right) {{\bf f} u}^{\top} \\
& = 0,
\end{align}
where the last equality follows from \eqref{eq:ARP}.
Let the matrix
\begin{align} \label{eq:dmatrix}
\left[
\begin{array}{ccc}
d_{j,1,1} & \cdots & d_{j,1,N} \\
\vdots & \ddots &\vdots \\
d_{j,M,1} & \cdots, & d_{j,M,N} \\
\end{array}
\right] & = a({{\bf f} z}' {EM}^{\tau} )^{\top} {{\bf f} r}_{j} \\
& = a {EM}^{\tau} ({{\bf f} z}')^{\top} {{\bf f} r}_{j},
\end{align}
as ${EM}^{\tau} $ is a diagonal matrix. If
${{\bf f} r}_{j} = [r_{j,1} , \ldots, r_{j,N}] $ and
${{\bf f} z}' {EM}^{\tau} \triangleq {{\bf f} v} \triangleq [v_{1}, \ldots, v_{M} ]$, then it can be verified easily that
\begin{enumerate}
\item $d_{j,m,n} = a v_{m} r_{n}$ and hence
$d_{j,m,n} = 0$ if either $m \not\in \tau$ or $n \not \in \mu_{j}$.
\item
\begin{align}
a {{\bf f} r}_{j} X_{1,*}^{\top} & = \sum_{m\in\tau} \left(
\sum_{n\in \mu_{j}} d_{j,m,n} X_{m,n}
\right).
\end{align}
\end{enumerate}
Thus the theorem is then proved.
\end{proof}
\begin{remark}
The interpretation of the theorem is as follows:
The support of ${{\bf f} r}_j \in \langle {{\bf f} K}\rangle$ corresponds to index of nodes in the ``helper racks''. Clearly, the smaller is the support the better, in order to minimise transmission cost. However, we would also point out that the transmission costs required to transmit across racks does not depend on the support size of
${{\bf f} r}_j $. More precisely, for each helper rack, only the sum
$ \sum_{s\in \mu_{j}} d_{j,m,s} X_{m,s}$ is required to be transmitted, instead of specific individual $X_{m,s}$.
On the other hand, the set $\beta_j$ denotes the set of nodes which can be used to repair $X_{1,j}$. Consequently, $\beta_j$ and ${pat}_{j}$ (index set for the failed nodes in rack $1$) must be disjoint. Finally, $\tau $ is the index set of the helper racks.
Note also that Theorem \ref{thm:2} reduces to Theorem \ref{thm1} if $\tau = \mu_{j}= \emptyset$ and ${{\bf f} r}_{j}$ is the zero vector.
\end{remark}
\begin{remark}
In the proof of Theorem \ref{thm:2}, vectors are chosen such that
\begin{align}
{{\bf f} e}_{j} &= {{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}} + a {{\bf f} r}_{j} \\
{{\bf f} f}_{1} & = {{\bf f} z} {{\bf f} G} + {{\bf f} z}' {EM}^{\tau} .
\end{align}
such that
\begin{align}
X_{1,j} & = {{\bf f} y}' {EM}^{\beta_{j}} X_{1,*}^{\top} + a {{\bf f} z}' {EM}^{\tau} X {{\bf f} r}_{j}^{\top}
\end{align}
\begin{align}
\left[
\begin{array}{ccc}
d_{j,1,1} & \cdots & d_{j,1,N} \\
\vdots & \ddots &\vdots \\
d_{j,M,1} & \cdots, & d_{j,M,N} \\
\end{array}
\right]
& = a {EM}^{\tau} ({{\bf f} z}')^{\top} {{\bf f} r}_{j},
\end{align}
\end{remark}
\def{{\bf f} Supp}{{{\bf f} Supp}}
\begin{example} \label{examp:ARRP}
Consider a rack model storage network with $M=5$ racks, each of which contains $N=8$ storage nodes. Suppose the parity check matrices are as follows:
\[
{{\bf f} H} =
\left[
\begin{array}{cccccccc}
1 & 1 & 1 & 0 & 1 & 0 & 0 & 0 \\
1 & 1 & 0 & 1 & 0 & 1 & 0 & 0 \\
0 & 1 & 1 & 1 & 0 & 0 & 1 & 0 \\
1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 \\
\end{array}
\right],
\]
\[
{{\bf f} K} =
\left[
\begin{array}{cccccccc}
1 & 1 & 0 & 1 & 1 & 0 & 0 & 1 \\
0 & 1 & 1 & 0 & 1 & 0 & 1 & 1 \\
\end{array}
\right],
\]
and
\[
{{\bf f} G} =
\left[
\begin{array}{ccccc}
1 & 1 & 1 & 1 & 0 \\
0 & 1 & 1 & 1 & 1 \\
1 & 1 & 0 & 1 & 1 \\
\end{array}
\right].
\]
Suppose node $X_{1,1}$ fails. Then by Definition \ref{def:RepairGroup}, it can be verified that
\begin{multline*}
\Omega({{\bf f} H},1) = \Big\{\{3,4,8\}, \{2,7,8\}, \{2,4,6\}, \{3,6,7\}, \{2,3,5\}, \\
\{4,5,7\}, \{5,6,8\}, \{2,3,4,5,6,7,8\}\Big\}.
\end{multline*}
In particular, any subset of nodes in rack $1$ indexed by $\Omega({{\bf f} H},1)$ can be used to repair $X_{1,1}$.
Now, suppose that the following nodes $ \{X_{1,1}, X_{1,2}, X_{1,4}, X_{1,6}\}$ failed in rack 1 (i.e., ${pat} = \{1,2,4,6 \}$). In this case, $X_{1,1}$ cannot be repaired via intra-rack repair since there exist no intra-rack repair group $\beta_1 \in \Omega({{\bf f} H},1)$ such that $\beta_1 \cap \gamma=\emptyset$. Therefore, inter-rack repair is needed.
Choose ${{\bf f} y} = [1 ~ 0 ~ 0 ~0]$, $a=-1$, ${{\bf f} z} = [1 ~ 0 ~ 0]$ and ${{\bf f} z}' = [0 ~ -1 ~ -1~ -1~ 0]$,
\begin{align*}
{{\bf f} r}_1 &=
\left[
\begin{array}{cccccccc}
0 & 1 & 1 & 0 & 1 & 0 & 1 & 1
\end{array}
\right], \\
{{\bf f} y}' &=
\left[
\begin{array}{cccccccc}
0 & 0 & 0 & 0 & 0 & 0 & -1 & -1
\end{array}
\right],
\end{align*}
Then
\begin{align}
{{\bf f} e}_{j} &= {{\bf f} y} {{\bf f} H} + {{\bf f} y}' {EM}^{\beta_{j}} + a {{\bf f} r}_{j} \\
{{\bf f} f}_{1} & = {{\bf f} z} {{\bf f} G} + {{\bf f} z}' {EM}^{\tau}.
\end{align}
It can be verified directly that ${{\bf f} r}_{1}$ satisfies the criterion \ref{item:ARRPcr1} in Theorem \ref{thm:2}. Then, following criterion \ref{item:ARRPcr2}, its support is $\mu_1= \{2, 3, 5, 7, 8\}$. Also, let $\beta_1 = \{7,8\}$ and $\tau = \{2,3,4\}$ following from criteria \ref{item:ARRPcr3}--\ref{item:ARRPcr5}, respectively. Note that, $\tau$, $\mu_1$, and $\beta_1$ indicate the group of helper racks, the group of helper nodes in the helper racks, and a repair group in rack 1 which will participate in repairing the failed node $X_{1,1}$. By choosing the parameters $(\beta_1, \mu_1, {{\bf f} r}_1, \tau)$ as above and the proper values for ${{\bf f} y}$, ${{\bf f} y}'$, $a$, ${{\bf f} z}$, and ${{\bf f} z}'$ respectively satisfying \eqref{eq:eVector} and \eqref{eq:fVector2} we have
\[
[c_{1,1} , \ldots, c_{1,8}] =
\left[
\begin{array}{cccccccc}
0 & 0 & 0 & 0 & 0 & 0 & 1 & 1
\end{array}
\right],
\]
and
\[
\left[
\begin{array}{ccc}
d_{1,1,1} & \cdots & d_{1,1,8} \\
\vdots & \ddots &\vdots \\
d_{1,5,1} & \cdots, & d_{1,5,8} \\
\end{array}
\right] =
\left[
\begin{array}{cccccccc}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1& 1 & 0 & 1 & 0 & 1 & 1 \\
0 & 1& 1 & 0 & 1 & 0 & 1 & 1 \\
0 & 1& 1 & 0 & 1 & 0 & 1 & 1 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0
\end{array}
\right].
\]
Now, the failed node $X_{1,1}$ can be recovered by \eqref{eq:thm2} such that
\begin{align} \nonumber
X_{1,1} & = \sum_{m\in\{2,3,4\}} \left(
\sum_{s\in \{2,3,5,7,8\}} d_{1,m,s} X_{m,s}
\right) + \sum_{n \in \{7,8\}} c_{1,n} X_{1,n}.
\end{align}
\end{example}
\begin{remark}
As a consequence of Theorem \ref{thm:2}, The processing unit in rack $m$ where $m\in \tau$, will retrieve $|\mu_{j}|$ symbols. The processing unit of rack 1, will need to retrieve $|\beta_{j}|$ symbols within the rack. Also, one symbol transmission is needed for the processing unit of rack 1 to send the recovered symbol back to the failed storage node $X_{1,j}$. Finally, each helper rack indexed in $\tau$ will transmit 1 symbol to the processing unit of rack 1. Summing up all these transmissions, there are in total 1)
$
|\beta_{j}| + |\mu_{j}||\tau| + 1
$
symbol transmission within racks, and 2) $|\tau|$ symbol transmissions across racks.
\end{remark}
\begin{example}
In Example \ref{examp:ARRP}, the total number of 18 symbol transmissions within racks and 3 symbol transmissions across racks are needed to repair the failed node $X_{1,1}$.
\end{example}
When we need to repair multiple failed nodes (say $|{pat}_{j}|$ of them) via inter-rack repair, the cost is not simply $|{pat}_j|$ times: First, it is possible that a transmission from inter-rack can be used to repair for more than one node. Second, once inter-rack repair has been achieved, nodes which are previously not repairable via intra-rack repair may become repairable.
To be more precise, suppose nodes $(X_{1, j} , j \in {pat})$ fail where $|{pat}| \ge {\text{Dist}}({{\bf f} H}) $. In this case, nodes failure may not be able to be recovered merely via intra-rack repairs. Let $\alpha\subseteq {pat}$ be of size ${\text{Dist}}({{\bf f} H}) - 1$. In that case,
in the worst case scenario, one can at least aim to recover variables $X_{1, j}$ for $j \in {pat}\setminus\alpha$ via inter-rack repair first. Once this is achieved, the remain nodes
$(X_{1, j}, j \in \alpha)$ can be recovered via intra-rack repair.
Following the idea, the following theorem gives an upper bound on the repair transmission cost.
\begin{theorem}[Upper bound on transmission costs] \label{thm:cost}
Let ${pat}$ be the index set of all failed nodes in rack $1$.
Suppose that 1) for any $j \in {pat}\setminus \alpha$, there exists $(\beta_{j}, \mu_{j} , {{\bf f} r}_{j}, \tau)$ satisfying the criteria in Theorem \ref{thmLPbd} where ${pat}_{j} \triangleq {pat}$ and 2) for any $j \in \alpha$, there exists $\beta_j$ satisfying the criteria in Theorem \ref{thm1} where ${pat}_{j} \triangleq \alpha$.
Then the required total transmissions within a rack $\theta_{intra}$ and across racks $\theta_{inter}$ are respectively upper bounded by
\begin{align}
\theta_{intra} &\leq |\tau| \left| \bigcup_{j\in{pat}\setminus\alpha}\mu_{j}\right| +
\left|\bigcup_{j \in {pat} }\beta_{j} \right| + |{pat}| \label{thm3:eqa}\\
\theta_{inter} & \leq |\tau| \dim \langle {{\bf f} r}_j , j\in {pat}\setminus\alpha \rangle. \label{thm3:eqb}
\end{align}
\end{theorem}
\begin{proof}
First, we will consider intra-rack transmissions. In each helper rack (say $m \in \tau$) involved in the inter-rack repair, $|\mu_{j}|$ nodes in it will need to transmit its data ($X_{m,s}$ where $s\in\mu_{j}$) to the processing unit in the helper rack. So, the set of symbols sent from the nodes to the processing unit is
$
\bigcup_{j\in{pat}\setminus \alpha}\mu_{j}
$.
Consequently, the total number of symbols sent to the processing units of helper rack is equal to
$
|\tau||\bigcup_{j\in{pat}\setminus \alpha}\mu_{j}|
$ where $|\tau|$ is the number of helper racks. That explains the first term in LHS of \eqref{thm3:eqa}.
Next, at each helper rack $m$, it will need to transmit
\[
\{ {{\bf f} r}_{j} X_{m,*} : j \in {pat}\setminus \alpha\}
\]
to the failing rack.
As these symbols may be linearly dependent, the actual number of symbols that really needed to be transmitted is only $\dim \langle {{\bf f} r}_j , j\in {pat}\setminus\alpha \rangle$. Therefore, the total number of inter-rack transmission is $|\tau|\dim \langle {{\bf f} r}_j , j\in {pat}\setminus\alpha \rangle$. This explains the upper bound on \eqref{thm3:eqb}.
After receiving the transmissions from the helper racks, the processing unit of the failed rack can now aim to recover the failed nodes. For each $j\in {pat}$, it requires transmission from nodes in the set $\beta_{j}$. Therefore, the number of intra-rack transmissions in rack 1 from nodes to the processing unit is
$
|\cup_{j\in{pat}} \beta_{j}|
$. This corresponds to the second term in RHS of \eqref{thm3:eqa}.
Finally, receiving all the symbols, the processing nodes can recover the contents of all the failed nodes. It requires $|{pat}|$ intra-rack transmissions from the processing unit of the failed rack to the failed nodes for recovery, explaining the last term of LHS of \eqref{thm3:eqa}. The theorem thus proved.
\end{proof}
\subsection{Code Rate}
In this subsection we derive the rate of our proposed code for multi-rack storage networks. The code rate will later be employed to establish the upper bound of the code size in Section \ref{Sec:VI}. First, we will need the following lemma, which is a fundamental result in linear algebra.
\begin{lemma} \label{lemm:SolNum}
Let ${{\bf f} A}$ be a $(k \times n)$ full rank matrix and $\mathbf{b}$ a column vector of length $k$ over $\mathbb{F}_q$ such that $k \leq n$. If ${{\bf f} x}=[x_1,\ldots,x_n]$ be a subset of $\mathbb{F}_{q}^n$, then there exists $q^{n-k}$ solutions for the system of linear equations ${{\bf f} A}{{\bf f} x}=\mathbf{b}$.
\end{lemma}
The following theorem gives the rate of the multi-rack storage code.
\begin{theorem} \label{thm:CodeRate}
Let ${{\bf f} H}$, ${{\bf f} K}$, and ${{\bf f} G}$ be respectively $S_{1} \times N$, $S_{2} \times N$, and $L \times M$ matrices. Then the rate of the multi-rack storage code
$({{\bf f} H}, {{\bf f} K}, {{\bf f} G})$ is lower bounded by
\begin{align} \label{eq:RackCodeRate1}
R \geq \frac{MN - MS_{1} - LS_{2}}{MN}.
\end{align}
Equality holds if rows in ${{\bf f} H}$ and ${{\bf f} K}$ are linearly independent, and ${{\bf f} G}$ is a full rank matrix.
\end{theorem}
\begin{proof}
According to \eqref{eq:IRP} and \eqref{eq:ARP}, the total number of parity check equations is at most $MS_{1} + LS_{2}$ while the number of variables is $MN$.
Therefore, the rate of the code is at least
\[
\frac{MN - MS_{1} - LS_{2}}{MN}.
\]
Now, Consider the following set
$$
\S_m ({\mathbf{b}}_{m})=
\left\{
X_{m,*}^{\top} : \: {{\bf f} H} X_{m,*}^{\top} = {{\bf f} 0} \text{ and } {{\bf f} K} X_{m,*}^{\top} = {\mathbf{b}}_{m}
\right\}.
$$
In other words, $\S_m({\mathbf{b}}_{m})$ is the set of solutions for the system of linear equations specified by parity check matrices ${{\bf f} H}$ and ${{\bf f} K}$ in \eqref{eq:IRP} and \eqref{eq:ARP}, respectively. According to Lemma \ref{lemm:SolNum}, the number of these solutions for any $\mathbf{b}_m$ is given by
\[
|\S_m({\mathbf{b}}_{m})| = q^{N - S_{1} - S_{2}},
\]
where $q$ is the field size. Similarly, let
\begin{align}
S({{\bf f} B}) = \left\{{X} :\: {{\bf f} H} X^{\top} = {{\bf f} 0} \text{ and } {{\bf f} K} X^{\top} = {{\bf f} B}\right\}.
\end{align}
where ${{\bf f} B} = [{\mathbf{b}}_{1}, \ldots, {\mathbf{b}}_{M}]$.
Then
$
|S({{\bf f} B})| = q^{M(N - S_{1} - S_{2})}
$
for any ${{\bf f} B}$.
Now, consider the set
\[
{\cal S} \triangleq \{
{X}:\: {X} \text{ satisfies \eqref{eq:IRP} and \eqref{eq:ARP}}
\}.
\]
Then it is clear that
\begin{align*}
|{\cal S}| & = \sum_{{{\bf f} B} : \: {{\bf f} B}{{\bf f} G}^{\top} = {{\bf f} 0} } |S({{\bf f} B})| \\
& = |\Delta| q^{M(N - S_{1} - S_{2})}
\end{align*}
where
$
\Delta =
\left\{
{\bf f} B: \: {\bf f} B {{\bf f} G}^{\top} = {{\bf f} 0}
\right\}.
$
As the rank of ${{\bf f} G}$ is $L$ and ${\bf f} B$ is a matrix of size $S_{2}\times M$,
$
|\Delta| = q^{(M-L)S_{2}}
$.
Consequently,
\begin{align}
|{\cal S}| & = q^{(M-L)S_{2}}q^{M(N - S_{1} - S_{2})} \\
& = q^{MN - MS_{1} - LS_{2}}.
\end{align}
The theorem then follows.
\end{proof}
\begin{comment}
The rack model storage code in \eqref{eq:paritya} and \eqref{eq:helperinf} resembles the concept of the product codes introduced by Ellias \cite{ProductCode}. Let ${{\bf f} H}_1$ and ${{\bf f} H}_2$ be the parity check matrices of a $(n_1,k_1)$ and a $(n_2,k_2)$ linear erasure code $\mathcal C_1$ and $\mathcal C_2$, respectively. Then, a standard product code $\mathcal P(\mathcal C_1,\mathcal C_2)$ is a $(n_1n_2,k_1k_2)$ linear erasure code, such that
\begin{align} \label{eq:ProductCode}
\mathcal P(\mathcal C_1,\mathcal C_2)=\{{X} \in \mathbb{F}^{n_1n_2}_q: {{\bf f} H}_1{X}=\mathbf{0}, {X}{{\bf f} H}_2^\top=\mathbf{0}\}.
\end{align}
It has been shown in \cite{ProductCodeSym} that if $d_1$ and $d_2$ be the minimum distances of the codes $\mathcal C_1$ and $\mathcal C_2$,respectively, then the minimum distance of the product code $\mathcal P(\mathcal C_1,\mathcal C_2)$ will be $d=d_1d_2$. Therefore, product codes are useful to construct erasure codes with large minimum distances by combining the erasure codes with small minimum distances. However, the main difference of product codes compared to our rack model storage code is the role of the matrix ${{\bf f} B}$ in \eqref{eq:helperinf}. Our coding scheme has the ability to construct a relation between each set of encoded symbols stored in a rack with the information stored in the other racks via matrix ${{\bf f} B}$. This property is the key feature of our code which makes it suitable for rack model storage networks.
Suppose ${{\bf f} H}$ and ${{\bf f} K}$ of size $S_1\times N$ and $S_2\times N$ be the inside and across rack parity matrix of the rack model storage code, respectively. Let ${{\bf f} H}$ and ${{\bf f} K}$ be used to construct a product code defined in \eqref{eq:ProductCode}. Then, the product code $\mathcal P(\mathcal C_1,\mathcal C_2)$ will be a $\left(N^2,(N-S_1)(N-S_2)\right)$ code. Therefore, the rate of the constructed product code will be
\begin{align} \label{eq:ProdCodeRate}
R_{\prod}=\frac{(N-S_1)(N-S_2)}{N^2}.
\end{align}
For computational simplicity, suppose the entries of the matrix ${{\bf f} B}$ in the rack model storage code is predetermined. Therefore, according to \eqref{eq:RackCodeRate1}, the rate of the rack model storage code (a detailed description of the code rate is presented in Chapter \ref{Chapter5}) will be
\begin{align} \label{eq:RackCodRate2}
R_{rack}=\frac{N-S_1-S_2}{N}.
\end{align}
Consequently, with a slight manipulation of \eqref{eq:ProdCodeRate} and \eqref{eq:RackCodRate2} we have
\[
R_{\prod}=R_{rack}+\frac{S_1S_2}{N^2}.
\]
\end{comment}
\section{Bounds} \label{Sec:V
\subsection{Linear Programming Bound}
In the previous section, we introduced a class of storage codes called multi-rack storage codes and explained how to repair nodes failure via intra-rack or inter-rack repairs. In this section, we will develop bounds for this class of codes.
Recall our code construction and definition. We will notice the following:
\begin{enumerate}
\item
The intra-rack parity check matrix ${{\bf f} H}$ or more precisely the support of the dual codewords spanned by the rows of ${{\bf f} H}$ determines how failed nodes can be repaired locally within a rack. Alternatively, the dual codewords spanned by intra-rack parity check matrix ${{\bf f} H}$ and inter-rack parity check matrix ${{\bf f} K}$ together defines the inter-rack repair process.
\item
The helper-rack parity check matrix ${{\bf f} G}$ specifies which racks can be used in the inter-rack repair process. Naturally, one would prefer to involve only a small number of racks to minimize the inter-rack transmission cost.
\end{enumerate}
Assuming without loss of generality that all rows of ${{\bf f} H}$ and ${{\bf f} K}$ are independent and that ${{\bf f} G}$ is full rank,
Theorem \ref{thm:CodeRate} shows that
\begin{align}\label{eq24}
R(\Lambda_{\mathcal C}) = \frac{MN - MS_{1} - LS_{2}}{MN},
\end{align}
or equivalently,
\begin{align}\label{eq25}
R(\Lambda_{\mathcal C}) = \frac{ N - S_{1} - S_{2} }{N}+\frac{S_{2}(M-L)}{MN}.
\end{align}
The rate of the multi-rack storage code is essentially determined by the size of the matrices ${{\bf f} H}, {{\bf f} K}$ and ${{\bf f} G}$.
Understanding their roles, we can immediately recognise that one can separately design ${{\bf f} G}$ and $({{\bf f} H}, {{\bf f} K})$. The design of ${{\bf f} G}$ will affect the number of helper racks needed in inter-rack repair. In fact, it is very similar to the design of locally repairable codes. The idea is to design a linear code (specified by the parity check matrix ${{\bf f} G}$) such that for any $m \in \mathcal M$, there are dual codewords $\mathbf{g}$ with a small support containing $m$. If the racks are geographically separated and connected to a network, the design of ${{\bf f} G}$ may take into account the network topology and the costs of the transmission links. For example, an algorithm for the design of linear binary locally repairable codes over a network can be found in \cite{LRCTopo}. There are also previous works including our own work in \cite{LPLR} and \cite{RLLC} which discussing the design and bounds for locally repairable codes.
On the other hand, the design of ${{\bf f} H}$ and ${{\bf f} K}$ will affect the code's ability in intra-rack and inter-rack repair. The focus of the remaining paper is on understanding the fundamental limits of the best design of these two matrices.
Separating the design of $({{\bf f} H}, {{\bf f} K})$ from ${{\bf f} G}$, we can simply consider a simple special case where there are only two racks (i.e., $M = 2$) and that
$$
G = [1,-1].
$$
Assume without loss of generality, we may characterise our multi-rack storage code via the following parity-check equations:
\begin{align}
{{\bf f} H} {X}_{1,*}^{\top} & = \textbf{0} \\
{{\bf f} H} {X}_{2,*}^{\top} & = \textbf{0} \\
{{\bf f} K} {X}_{1,*}^{\top} & = {{\bf f} K} {X}_{2,*}^{\top}.
\end{align}
As such, we will simply refer a multi-rack storage code as $({{\bf f} H},{{\bf f} K})$.
\begin{definition}\label{def2}
We call a multi-rack storage code $({{\bf f} H},{{\bf f} K})$ as a
$(\beta_{1}, \Gamma_{1}, r_1, \beta_{2}, \Gamma_{2}, r_2, a)$ linear multi-rack storage code if it satisfies the following conditions:
\begin{enumerate}
\item (Intra-rack resilience)
Any $\beta_{1}$ node failures in a rack can be repaired via intra-rack repair;
\item (Intra-rack locality)
For any $\Gamma_{1} + 1$ node failure pattern in a rack, each node can be repaired via intra-rack repair, involving at most $r_{1}$ surviving nodes;
\item (Inter-rack resilience) Any $\beta_{2}$ node failures in a rack can be repaired via inter-rack repair.
\item (Inter-rack locality) For any $\Gamma_{2} + 1$ node failure pattern in a rack, each node can be repaired via inter-rack repair such that involving $i$) at most $r_{2}$ surviving nodes in the failing rack and $ii$) at most $a$ nodes from each helper rack.
\end{enumerate}
\end{definition}
\def\mathbf{f}{{{\bf f} f}}
\def\mathbf{g}{{{\bf f} g}}
The definition for $(\beta_{1}, r_1, \Gamma_{1}, r_2, \Gamma_{2}, a, \beta_{2})$ linear multi-rack storage code
can be made more precise via the use of support enumerators, to be described as follows.
To simplify our notation, we may use ${{\bf f} x}$ and ${{\bf f} y}$ instead of $X_{1,*}^{\top}$ and $X_{2,*}^{\top}$.
Let
\begin{align} \label{eq:TwoRackCode}
\mathcal C =
\{
({{\bf f} x}, {{\bf f} y}) : {{\bf f} H} {{\bf f} x} = {{\bf f} H} {{\bf f} y} = {{\bf f} 0}, \: {{\bf f} K} {{\bf f} x} = {{\bf f} K} {{\bf f} y}
\}.
\end{align}
We call $\mathcal C$ the codebook. Clearly, the design of $\mathcal C$ and the design of $({{\bf f} H},{{\bf f} K})$ are equivalent.
\begin{definition}[Support]
Consider any codeword $({{\bf f} x}, {{\bf f} y}) \in \mathcal C$ where
${{\bf f} x}=(x_{1},\ldots, x_{N})$ and ${{\bf f} y}=(y_{1},\ldots, y_{N})$. Its "support" $\lambda({{\bf f} x},{{\bf f} y})$ is a tuple $(\mathbf{w} , \mathbf{s})$ such that $\mathbf{w} = (w_1, \ldots, w_N)$ and $\mathbf{s} = (s_1, \ldots, s_N)$, where
\begin{align}
w_{i} =
\begin{cases}
1 & \text{ if } x_{i} \neq 0 \\
0 & \text{ if } x_{i} = 0
\end{cases} \label{eq12}\\
s_{i} =
\begin{cases}
1 & \text{ if } y_{i} \neq 0 \\
0 & \text{ if } y_{i} = 0,
\end{cases}\label{eq13}
\end{align}
for all $i=1, \ldots, N$. For notation simplicity, we will simply denote that
$$
\lambda({{\bf f} x},{{\bf f} y}) = (\mathbf{w},\mathbf{s}).
$$
\end{definition}
\begin{remark}
While $\mathbf{w}$ and $\mathbf{s}$ are subsets of ${\cal N}$, it is sometimes simpler and more practical to represent them as vectors, as in \eqref{eq12} and \eqref{eq13}.
\end{remark}
\begin{definition}[Support enumerator]
The enumerator function of the code $\Lambda_\mathcal C(\mathbf{w} ,\mathbf{s})$ is defined as
\begin{align}
\Lambda_\mathcal C(\mathbf{w},\mathbf{s})
\triangleq \left|\left\{({{\bf f} x},{{\bf f} y}) \in \mathcal C : \lambda({{\bf f} x},{{\bf f} y}) = (\mathbf{w},\mathbf{s})\right\}\right|
\end{align}
for all $\mathbf{w}, \mathbf{s} \subseteq {\cal N}$.
\end{definition}
The below theorem gives properties of a multi-storage code. As we shall see, these properties will form constraints in our linear programming bound.
\begin{theorem}\label{thm5}
For any $(\beta_{1}, \Gamma_{1}, r_1, \beta_{2}, \Gamma_{2}, r_2, a)$ multi-rack storage code $\mathcal C$,
the support enumerators of $\mathcal C$ and its dual $\mathcal{C}^\perp$ satisfy the following properties:
\begin{enumerate}
\item \label{item:RackModelCodeProperty1}
{{\bf f}{\emph{Dual codeword support enumerator:}}}
\begin{align}
\Lambda_{\mathcal{C}^\perp}(\mathbf{w} ,\mathbf{s})
= \frac{1}{|\mathcal C|} \sum_{\mathbf{w}' , \mathbf{s}' \subseteq {\cal N}} \Lambda_\mathcal C(\mathbf{w}', \mathbf{s}') \prod_{j\in {\cal N}} \kappa_q (w'_j ,w_j)\kappa_q (s'_j,s_j) \label{eqThm:a}
\end{align}
where
\[
\kappa_{q}\left(u,v\right)=
\begin{cases}
1 & \text{ if } v=0 \\
q-1 & \text{ if } u=0 \text{ and } v=1 \\
-1 & \text{ otherwise. }
\end{cases}
\]
\item \label{item:RackModelCodeProperty2}
{{\bf f}{\emph{Symmetry:}}}
For all $\mathbf{w},\mathbf{s} \subseteq {\cal N}$,
\begin{align} \label{eqThm:b}
\Lambda_\mathcal C(\mathbf{w}, \mathbf{s}) = \Lambda_\mathcal C( \mathbf{s},\mathbf{w}).
\end{align}
\item \label{item:RackModelCodeProperty3}
{{\bf f}{\emph{Intra-rack resilience:}}}
For all $\mathbf{w} \subseteq {\cal N}$ such that $1 \le |\mathbf{w}| \le \beta_{1}$
\begin{align}\label{eqThm:c}
\Lambda_\mathcal C(\mathbf{w}, \mathbf{s}) = 0
\end{align}
\item \label{item:RackModelCodeProperty5}
{{\bf f}{\emph{Intra-rack locality:}}}
For any
$
(i,\gamma) \in \Phi(\Gamma_{1})\triangleq \{
(i,\gamma) : \: i \in {\cal N}, i \not \in \gamma \text{ and } |\gamma| = \Gamma_{1}\}
$,
\begin{align}\label{eqThm:d}
\displaystyle
\sum_{\mathbf{w} \in \Theta_{1}(i,\gamma,r_1)}
\Lambda_{\mathcal{C}^\perp}(\mathbf{w},\emptyset) \geq (q-1),
\end{align}
where
$
\Theta_{1}(i,\gamma,r_{1})
\triangleq
\{
\mathbf{w} : \: i\in\mathbf{w}, \mathbf{w} \cap \gamma = \emptyset \text{ and } |\mathbf{w}| \le r_{1}+1
\}.
$
\item \label{item:RackModelCodeProperty4}
{{\bf f}{\emph{Inter-rack resilience:}}}
For all $\mathbf{w} \subseteq {\cal N}$ such that $1 \le |\mathbf{w}| \le \beta_{2}$
\begin{align}\label{eqThm:e}
\Lambda_\mathcal C(\mathbf{w}, \emptyset) = 0
\end{align}
\item \label{item:RackModelCodeProperty6}
{{\bf f}{\emph{Inter-rack locality:}}}
For any
$
(i,\gamma) \in \Omega(\Gamma_{2})
$,
\begin{align}\label{eqThm:f}
\displaystyle
\sum_{ (\mathbf{w},\mathbf{s}) \in \Theta_{2}(i,\gamma, r_{2} , a) }
\Lambda_{\mathcal{C}^\perp}(\mathbf{w},\mathbf{s}) \geq (q-1),
\end{align}
where
$
\Theta_{2}(i,\gamma, r_{2} , a)
\triangleq
\{
(\mathbf{w},\mathbf{s}) : \: i\in \mathbf{w}, \mathbf{w} \cap \gamma = \emptyset ,
|\mathbf{w}| \le r_{2}+1 \text{ and } |\mathbf{s} | \le a
\}.
$
\end{enumerate}
\end{theorem}
The proof of Theorem \ref{thm5} is given in Appendix \ref{append:A}.
\begin{lemma} Consider a multi-rack storage code $({{\bf f} H}, {{\bf f} K} , {{\bf f} G})$. Suppose the dimensions of the matrices ${{\bf f} H}, {{\bf f} K} , {{\bf f} G}$ are respectively $S_{1} \times N$, $S_{2} \times N$, and $L\times M$. Then
\begin{align}\label{eq62}
N-S_{1}-S_{2} = \log_{q} \sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset)
\end{align}
and
\begin{align}\label{eq63}
S_{2} = \log_{q} \sum_{\mathbf{w},\mathbf{s}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\mathbf{s}) - 2 \log_{q} \sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset).
\end{align}
Hence, the rate of the code is
\begin{multline} \label{eq:CodeRateCodeSize}
R(\Lambda_{\mathcal C}) = \frac{ \log_{q} \sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset)}{ N}
\\
+
\frac{M-L}{MN} \left(
\log_{q} \sum_{\mathbf{w},\mathbf{s}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\mathbf{s}) - 2 \log_{q} \sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset)
\right).
\end{multline}
\end{lemma}
\begin{proof}
First, it is clear that
$
\sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset)
$
is equal to the size of the following set
\[
|\{
({{\bf f} x}, {{\bf f} 0}) : {{\bf f} H} {{\bf f} x} = {{\bf f} 0}, \: {{\bf f} K} {{\bf f} x} = {{\bf f} 0}
\}
|
\]
As the dimensions of ${{\bf f} x}, {{\bf f} H}$ and ${{\bf f} K}$ are respectively
$N \times 1$, $S_{1} \times N$ and $S_{2} \times N$, the size of the set is
obviously $ q^{N-S_{1}-S_{2}}$ leading to \eqref{eq62}
Since ${{\bf f} H}$ and ${{\bf f} K}$ are respectively $S_1 \times N$ and $S_2 \times N$ parity matrices of the code $\mathcal C$ in \eqref{eq:TwoRackCode}, $i$) the total number of parity equations is $2S_1+S_2$, and $ii$) the length of codeword $({{\bf f} x},{{\bf f} y})$ is $2N$. Hence,
the total number of codewords satisfying \eqref{eq:TwoRackCode} is
\[
\sum_{\mathbf{w},\mathbf{s}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\mathbf{s})=q^{2N-2S_1-S_2}.
\]
Together with \eqref{eq62}, we have \eqref{eq63} and \eqref{eq:CodeRateCodeSize}.
\end{proof}
\def{O_{1}}{{O_{1}}}
\def\TOT{{T}}
\begin{remark}
According to \eqref{eq:CodeRateCodeSize}, the rate of the storage code is clearly nonlinear, with respect to the support enumerator $\Lambda_{\mathcal C}$. However, if we fix
$$
\sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset) = {O_{1}},
$$
then maximising $R(\Lambda_{\mathcal C})$ is equivalent to maximising
$\sum_{\mathbf{w},\mathbf{s}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\mathbf{s})$.
\end{remark}
\begin{theorem}[Upper bound]\label{thmLPbd}
Consider fixed $N$, $M$ and $L$ and a $(\beta_{1}, r_1, \Gamma_{1}, \beta_{2}, r_2, \Gamma_{2}, a)$ multi-rack storage code $\mathcal C$ such that
$$
\sum_{\mathbf{w}\subseteq{\cal N}}\Lambda_{\mathcal C}(\mathbf{w},\emptyset) = {O_{1}}.
$$
Let $O^{*}$ be the maximum of the following
linear programming problem.
\medskip
\noindent
\underline{Linear Programming Problem (LP1)}
\begin{align}
\displaystyle
&\textbf{Maximize} \quad \sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} A_{\mathbf{w},\mathbf{s}} \nonumber\\
&\textbf{subject to} \nonumber\\
& \quad
A_{\mathbf{w},\mathbf{s}} \geq 0, \quad \forall \mathbf{w},\mathbf{s} \subseteq {\cal N} \tag{C1}\\
& \quad
A_{\mathbf{w},\mathbf{s}} = A_{\mathbf{s},\mathbf{w}} \tag{C2}\\
&\quad C_{\mathbf{w},\mathbf{s}} = \sum_{\mathbf{w}' , \mathbf{s}' \subseteq {\cal N}} A_{\mathbf{w}',\mathbf{s}'} \prod_{j\in {\cal N}} \kappa_q (w'_j ,w_j)\kappa_q (s'_j,s_j),
\quad \forall \mathbf{w},\mathbf{s} \subseteq {\cal N} \tag{C3}\label{eq:C3} \\
& \quad C_{\mathbf{w}, \mathbf{s}} \geq 0, \quad \forall \mathbf{w},\mathbf{s} \subseteq {\cal N} \tag{C4} \label{eq:C4} \\
& \quad A_{\emptyset,\emptyset} = 1 & \tag{C5}\label{eq:C5} \\
& \quad
A_{\mathbf{w},\mathbf{s}} = 0,
\quad \forall 1 \le |\mathbf{w}| \le \beta_{1} \tag{C6} \label{eq:C6}\\
& \quad
A_{\mathbf{w},\emptyset} = 0,
\quad \forall 1 \le |\mathbf{w}| \le \beta_{2} \tag{C7}\\
& \quad
\sum_{\mathbf{w} \in \Theta_{1}(i,\gamma,r_1)}
C_{\mathbf{w},\emptyset} \geq (q-1) \sum_{\mathbf{w},\mathbf{s}} A_{\mathbf{w},\mathbf{s}},
\quad \forall (i,\gamma) \in \Phi(\Gamma_{1}) \tag{C8} \\
& \quad
\sum_{ (\mathbf{w},\mathbf{s}) \in \Theta_{2}(i,\gamma, r_{2} , a) }
C_{\mathbf{w},\mathbf{s}} \geq (q-1) \sum_{\mathbf{w},\mathbf{s}} A_{\mathbf{w},\mathbf{s}},
\quad \forall (i,\gamma) \in \Omega(\Gamma_{2}) \tag{C9} \label{eq:C9}\\
& \quad
\sum_{\mathbf{w} \subseteq {\cal N}}A_{\mathbf{w},\emptyset} = {O_{1}} \tag{C10
\end{align}
Then $R(\Lambda_{\mathcal C})$ is upper bound by
\[
\frac{\log_{q} {O_{1}}}{N} +
\frac{M-L}{MN}
\left(
\log_{q} O^{*} - 2 \log_{q}{O_{1}}
\right).
\]
\end{theorem}
\begin{proof}[Proof of Theorem \ref{thmLPbd}]
We define
\begin{align*}
A_{\mathbf{w},\mathbf{s}} & \triangleq \Lambda_\mathcal C(\mathbf{w},\mathbf{s}) \\
C_{\mathbf{w},\mathbf{s}} & \triangleq |\mathcal C| \Lambda_{\mathcal{C}^\perp}(\mathbf{w},\mathbf{s}).
\end{align*}
As mentioned earlier, maximizing the code rate is equivalent to maximize the code size which is the objective function of the linear programming problem LP1. The first constraint of the optimization problem LP1 follows from the fact that the number of codewords are non-negative. The second constraint follows from the symmetry property of the code. The constraint \eqref{eq:C3} follows from the dual code support enumerator property (MacWilliam's identity) in Theorem \ref{thm5}. Constraint \eqref{eq:C4} follows from the fact that the number of dual codewords are non-negative. Constriant \eqref{eq:C5} follows from the fact that there exists only one zero codeword in code $\mathcal C$. Constraints \eqref{eq:C6}--\eqref{eq:C9} follow from the properties \ref{item:RackModelCodeProperty3}--\ref{item:RackModelCodeProperty6} in Theorem \ref{thm5}.
\end{proof}
\begin{remark}
Strictly speaking, to optimise $R(\Lambda_{\mathcal C})$, one also needs to optimise the choice of $O_{1}$, which is generally unknown. However, as $O_{1} \in \{q^{i}, \: i=0, \ldots, N \}$. Hence,
we have
\[
R(\Lambda_{\mathcal C}) \le \max_{ i=0, \ldots, N}
\frac{i}{N} +
\frac{M-L}{MN}
\left(
\log_{q} O^{*}(i) - 2 i
\right)
\]
where $O^{*}(i)$ is the maximum of (LP1) when ${O_{1}} = q^{i}$.
\end{remark}
\subsection{Bound Simplification via Symmetry}
The complexity (in terms of the number of variables and the number of constraints) of the linear programming problem LP1 in Theorem \ref{thmLPbd} will increase exponentially with the number of storage nodes $N$ in each rack.
However, if we notice the LP1 carefully, we can observe that the problem itself has much symmetries in it such that exploiting this inherent symmetry can significantly reduce the problem complexity.
Let $S_{\cal N}$ be the symmetric group on ${\cal N}$, whose elements are all the permutations of the elements in ${\cal N}$ which are treated as bijective functions from the set of symbols to itself. Clearly, $|S_{\cal N}| = N!$.
The variables in the optimization problem LP1 are
$$
(A_{\mathbf{w},\mathbf{s}}, C_{\mathbf{w},\mathbf{s}}, \: \mathbf{w},\mathbf{s}\subseteq {\cal N}).
$$
Let $\sigma$ be a permutation on ${\cal N}$ such that $\sigma \in S_{\cal N}$. For each $\mathbf{w} \subseteq {\cal N}$, we extend the mapping $\sigma$ by defining
$$
\sigma(\mathbf{w}) \triangleq \{ \sigma(i) : \: i \in \mathbf{w} \}.
$$
Due to the symmetries, we have the following proposition.
\def{}
\begin{proposition} \label{prop1}
Suppose $( a_{\mathbf{w},\mathbf{s}}, c_{\mathbf{w},\mathbf{s}} :\: \mathbf{w},\mathbf{s} \subseteq {\cal N})$ satisfies all the constraints in the linear programming problem LP1 in Theorem \ref{thmLPbd}. For any $\sigma \in S_{\cal N}$, let
\begin{align}
a_{\mathbf{w},\mathbf{s}}^{(\sigma)} & = a_{\sigma(\mathbf{w}),\sigma(\mathbf{s})} \\
c_{\mathbf{w},\mathbf{s}}^{(\sigma)} & = c_{\sigma(\mathbf{w}),\sigma(\mathbf{s})}.
\end{align}
Then
$( a_{\mathbf{w},\mathbf{s}}^{(\sigma)}, c_{\mathbf{w},\mathbf{s}}^{(\sigma)} :\: \mathbf{w},\mathbf{s} \subseteq {\cal N})$ also satisfies the constraints in LP1, with the same values in the objective function. In other words,
\[
\sum_{\mathbf{w},\mathbf{s}\subseteq {\cal N}}a_{\mathbf{w},\mathbf{s}}^{(\sigma)} = \sum_{\mathbf{w},\mathbf{s}\subseteq {\cal N}}a_{\mathbf{w},\mathbf{s}}.
\]
\end{proposition}
\begin{proof}
The proposition follows directly from the symmetry in the constraint and optimising function.
\end{proof}
As the feasible region in the linear programming problem (LP1) is convex, we have the following corollary.
\begin{corollary}\label{corollary1}
Let
\begin{align}
a^*_{\mathbf{w},\mathbf{s}} & = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w},\mathbf{s}}^{(\sigma)} \label{eq:AvgSol} \\
c^*_{\mathbf{w},\mathbf{s}} & = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} c_{\mathbf{w},\mathbf{s}}^{(\sigma)}
\end{align}
Then
$( a_{\mathbf{w},\mathbf{s}}^{*}, c_{\mathbf{w},\mathbf{s}}^{*} :\: \mathbf{w},\mathbf{s} \subseteq {\cal N})$ also satisfies the constraints in (LP1) and
\begin{align} \label{eq:SumAvg}
\sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} a^*_{\mathbf{w},\mathbf{s}} = \sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} a_{\mathbf{w},\mathbf{s}}.
\end{align}
\end{corollary}
\begin{proof}
From Proposition \ref{prop1}, for any feasible solution $(a_{\mathbf{w},\mathbf{s}}, c_{\mathbf{w},\mathbf{s}} : \: \mathbf{w},\mathbf{s} \subseteq {\cal N})$, there exist $|S_{\cal N}|$ other feasible solution $(a^{(\sigma)}_{\mathbf{w},\mathbf{s}}, c^{(\sigma)}_{\mathbf{w},\mathbf{s}} : \: \mathbf{w},\mathbf{s} \subseteq {\cal N}, \sigma \in S_{\cal N})$. Since $(a^*_{\mathbf{w},\mathbf{s}},c^*_{\mathbf{w},\mathbf{s}})$ is the convex linear combination of all these feasible solutions, it also satisfies the constraints of LP1. From \eqref{eq:AvgSol}
\[
\sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} a^*_{\mathbf{w},\mathbf{s}} = \frac{1}{|S_{\cal N}|} \sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w},\mathbf{s}}^{(\sigma)}.
\]
Moreover, from Proposition \ref{prop1}
\[
\sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w},\mathbf{s}}^{(\sigma)} = |S_{\cal N}| \sum_{\mathbf{w},\mathbf{s} \subseteq {\cal N}} a_{\mathbf{w},\mathbf{s}}.
\]
The corollary then follows.
\end{proof}
By Corollary \ref{corollary1}, it is sufficient to consider only "symmetric" feasible solutions of the form
$
( a^*_{\mathbf{w},\mathbf{s}}, c^*_{\mathbf{w},\mathbf{s}} : \mathbf{w},\mathbf{s} \subseteq {\cal N}).
$
in LP1. In other words, one can impose additional symmetric constraint to LP1 without affecting the value of the objective function.
One important benefit for considering $
( a^*_{\mathbf{w},\mathbf{s}}, c^*_{\mathbf{w},\mathbf{s}} : \mathbf{w},\mathbf{s} \subseteq {\cal N}).
$ is that many terms in the linear programming bound can become alike (and hence can be grouped together).
\begin{proposition}[Grouping alike terms] \label{prop2}
Suppose $\mathbf{w},\mathbf{s},\mathbf{w}',\mathbf{s}' \subseteq {\cal N}$ such that
\begin{align}
|\mathbf{w} \setminus \mathbf{s} | & = |\mathbf{w}' \setminus \mathbf{s}' | \label{eq35} \\
|\mathbf{w} \cap \mathbf{s} | & = |\mathbf{w}' \cap \mathbf{s}' | \\
|\mathbf{s} \setminus \mathbf{w} | & = |\mathbf{s}' \setminus \mathbf{w}' | \label{eq37}.
\end{align}
Then $a^*_{\mathbf{w},\mathbf{s}} = a^*_{\mathbf{w}', \mathbf{s}'}$ and $c^*_{\mathbf{w},\mathbf{s}} = c^*_{\mathbf{w}', \mathbf{s}'} $.
\end{proposition}
\begin{proof}
By definitions,
\begin{align}
a^*_{\mathbf{w},\mathbf{s}} & = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w},\mathbf{s}}^{(\sigma)} \\
a^*_{\mathbf{w}',\mathbf{s}'} & = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w}',\mathbf{s}' }^{(\sigma)}.
\end{align}
Now, by \eqref{eq35}--\eqref{eq37}, there exists a permutation $\mu \in S_{{\cal N}}$ such that
$\mathbf{w}' =\mu(\mathbf{w}) $ and $\mathbf{s}' =\mu(\mathbf{s})$.
Hence, for any permutation $\sigma \in S_{{\cal N}}$, we have
\begin{align}
\sigma(\mathbf{w}') & = \sigma(\mu (\mathbf{w}) ) = (\sigma \circ \mu) (\mathbf{w})
\end{align}
Similarly, we have
$
\sigma(\mathbf{s}') = (\sigma \circ \mu) (\mathbf{s})
$.
Consequently,
\begin{align}
a^*_{\mathbf{w}',\mathbf{s}'} & = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{\mathbf{w}',\mathbf{s}'}^{(\sigma)} \\
& = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{\sigma(\mathbf{w}'), \sigma(\mathbf{s}')} \\
& = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{(\sigma\circ \mu) (\mathbf{w}), (\sigma\circ \mu)(\mathbf{s})} \\
& = \frac{1}{|S_{\cal N}|} \sum_{\sigma \in S_{\cal N}} a_{ \sigma (\mathbf{w}), \sigma (\mathbf{s})} \\
& = a^*_{\mathbf{w},\mathbf{s}}
\end{align}
where the second last equality follows from the fact that $S_{{\cal N}}$ is a group and hence
$$
\{ \sigma \circ \mu: \sigma \in S_{{\cal N}}\}
=
\{ \sigma : \sigma \in S_{{\cal N}}\}.
$$
Similarly, we can also prove that $c^*_{\mathbf{w},\mathbf{s}} = c^*_{\mathbf{w}', \mathbf{s}'} $.
\end{proof}
Due to Proposition \ref{prop2}, we can impose the following additional constraint on (LP1)
\begin{align}
A_{\mathbf{w},\mathbf{s}} &= A_{\mathbf{w}',\mathbf{s}'} \\
C_{\mathbf{w},\mathbf{s}} &= C_{\mathbf{w}',\mathbf{s}'}
\end{align}
for all $\mathbf{w}, \mathbf{s}$ satisfying \eqref{eq35} - \eqref{eq37}.
As many of these variables are now the same, one can greatly reduce the number of variables in (LP1).
\begin{theorem}[Simplified LP Bound]\label{thm4}
The maximum in (LP1) is the same as the maximum of the following linear programming problem:
\noindent
\underline{Reduced Linear Programming Problem (LP2)}
\begin{align}
\displaystyle
&\textbf{maximize} \quad \sum_{d,e,f } \binom{N}{d,e,f} X_{d,e,f } \nonumber\\
&\textbf{subject to} \nonumber\\
& \quad
X_{d,e,f} \geq 0, \hspace{3.8cm} \quad \forall d,e,f \tag{D1}\\
& \quad
X_{d,e,f} = X_{f,e,d},
\hspace{2.85cm} \quad \forall d,e,f \tag{D2}\\
& \quad
Y_{d,e,f} = \sum_{d',e',f'} \Delta_{1}(d,e,f,d',e',f') X_{d',e',f'}, \quad
\forall d,e,f
\tag{D3}\\
& \quad Y_{d,e,f} \geq 0, \hspace{3.75cm} \quad \forall d,e,f \tag{D4} \\
& \quad X_{0,0,0} = 1 & \tag{D5}\\
& \quad
X_{d,e,f} = 0,
\hspace{1.45cm} \quad \forall 1 \le d + e \le \beta_{1} \tag{D6}\\
& \quad
X_{d,0,0} = 0,
\hspace{2.8cm} \quad \forall 1 \le d\le \beta_{2} \tag{D7}\\
& \quad
\sum_{d=2}^{r_{1}+1} \Delta_{2}(d) Y_{d,0,0} \ge (q-1) \sum_{d,e,f} \binom{N}{d,e,f} X_{d,e,f},
\tag{D8} \\
& \quad
\sum_{d+e\leq r_2 +1,e+f\leq a} \Delta_{3}(d,e,f) Y_{d,e,f} \ge (q-1) \sum_{d,e,f} \binom{N}{d,e,f} X_{d,e,f}, \tag{D9}\\
& \quad
\sum_{d} X_{d,0,0} = {O_{1}}
\tag{D10}
\end{align}
where $ \Delta_{1}(d,e,f,d',e',f') $, $ \Delta_{2}(d)$ and $\Delta_{3}(d,e,f) $ are respectively defined as in \eqref{eqDelta1}, \eqref{eqDelta2} and \eqref{eqDelta3}.
Here, $ (d,e,f)$ are tuples such that $d,e,f$ are nonnegative integers with a total sum no more than $N$.
\end{theorem}
The proof of Theorem \ref{thm4} is given in Appendix \ref{append:B}.
\begin{remark}
In (LP1), the number of variables and constraints grows exponentially with $N$ -- the number of nodes in a rack. Such exponential growth makes (LP1) practically infeasible to solve for moderate $N$. However, via reduction by symmetry, the number of variables in (LP2) has greatly reduced to
$
2\binom{N+3}{3} + 1
$
while the number of constraints to
$
\frac{7}{2} \binom{N+3}{3} + 5 + \beta_{1} + \beta_{2}.
$
Clearly, the reduction is significant.
\end{remark}
\section{Conclusion} \label{Sec:Conc}
In this paper we introduced a code-design framework for multi-rack distributed data storage
networks. In this model the encoded data is stored in storage nodes distributed over multiple
racks. Each rack has a process unit which is responsible for all calculations and transmissions
inside the rack. Practically, the cost of data transmission within a rack is much less than the
data transmission across the racks. In order to take advantage of this characteristic, we proposed
a code-design framework for multi-rack storage networks which is able to locally repair the
nodes failure within the rack to minimise the repair cost. However, under the severe failure
circumstances where the failures are not repairable by the survived nodes inside the rack, our
coding scheme is capable of participating the other racks to help repairing the failures. We
showed that maximising the rate of our multi-rack storage code is equivalent to maximising the
code size. In order to maximise the code size, we established a linear programing problem based
on the code-design framework criteria. These bounds characterise the trade-off between different
code parameters such as the number of nodes in each rack involved in repair process, the number
of failures and the number of repairable simultaneous failures. We also exploit symmetry in our
multi-rack storage code in order to reducing the complexity of the linear programming problem.
\bibliographystyle{IEEEtran}
|
1,314,259,995,038 | arxiv | \section{Introduction}
Randomness extractors~\cite{NZ93, Sha04}, have numerous applications in complexity theory and cryptography. For example, in computational complexity they are used to isolate satisfying assignments of boolean formulas~\cite{VV86} and in constructing pseudorandom generators for space-bounded computation~\cite{Nis91, RR99}. One notable use in cryptography is in the construction of pseudorandom generators from one-way functions~\cite{HILL, HRV, VZ}.
In many applications of extractors, including the above ones, it is important that the extractors recover essentially all the entropy of the input distribution. A popular choice in such scenarios is to instantiate the extractor by a pairwise-independent hash function family~\cite{BBR88, ILL89}. Pairwise-independent functions are appealing because they have a variety of implementations, ranging from very simple ones~\cite{CW} to very efficient ones~\cite{IKOS}.
Mansour, Nisan, and Tiwari~\cite{MST} observed that pairwise-independent hash functions must be ``dense'' in the sense that a typical output in a typical function in the family must depend on a linear number of inputs. So despite their numerous nice properties, in terms of the number of input-output dependencies, pairwise-independent functions are quite complex. Motivated by an application to local cryptography, Bogdanov and Rosen~\cite{BR11} recently gave a way to bypass this barrier in the context of hardness amplification of ``local'' functions.
In this work we study sparse extractors for all the entropy, these are extractors with a small number of overall input-output dependencies. We consider the more general notion of sparse extractor families. An {\em extractor family} for distributions of min-entropy $k$ over $\{0,1\}^n$ with error $\varepsilon$ is a distribution $H$ on functions $\{0,1\}^n \times \{0,1\}^s \to \{0,1\}^m$ where $m \leq s + k$ such that for every distribution $X$ over $\{0,1\}^n$ of min-entropy $k$, the statistical distance between $(H, H(X, U_s))$ and $(H, U_m)$ is at most $\varepsilon$ (where $U_s$ and $U_m$ are uniformly random). The extractor family is {\em strong} if $s = 0$, i.e. $H$ does not take any additional randomness beyond $X$.
Without the sparsity restriction extractors and extractor families are essentially the same object, as the randomness used to choose an extractor from the family can be included in the seed. Once we take sparsity into consideration, however, extractor families allow for more flexibility. This advantage is especially pronounced in the case of strong extractors: Any single strong extractor in which some output bit depends only on $\ell$ input bits cannot extract from a source that fixes all those $\ell$ bits. In contrast, we show strong extractor families can achieve much better sparsity.
In this work we prove three results regarding sparse extractor families. First we give a simple construction of sparse extractor families for all the entropy. Then we show that the sparsity of our construction is optimal up to constant factors for a wide range of the min-entropy parameter. Finally, we show that an equally simple construction of weak extractor families achieves better sparsity. Thus when sparsity is required, weak extractors can provably outperform strong ones.
We also show our weak extractor family gives a somewhat improved nonuniform construction of local pseudorandom generators from local one-way functions, based on recent work of Vadhan and Zheng~\cite{VZ}. In general our results can be useful in randomized or nonuniform settings where hashing is used and obtaining or preserving small input-output dependencies (i.e., parallelism) is of interest.
\subsection{Our results}
Let $h\colon \{0,1\}^n \to \{0,1\}^m$ be a function. We say output $j$ of $h$ {\em depends} on input $i$ if there exists assignments $x, x' \in \{0,1\}^n$ that differ only in the $i$th coordinate such that $h(x)_j \neq h(x')_j$. We say $h$ is {\em $s$-sparse} if the number of input-output pairs $(i, j)$ such that output $j$ depends on input $i$ is at most $s$, and $h$ is {\em $\ell$-local} if every output $j$ depends on at most $\ell$ inputs $i$.
\begin{theorem}
\label{thm:upper}
Let $K$ be a sufficiently large constant and $n, k, m, \delta$ be parameters such that $1 \leq m\leq k \leq n$, and $0 < \delta < 1$. Let $H(x) = Mx$, where $M$ is an $m\times n$ matrix over $GF(2)$ where each entry equals $1$ independently with probability
\[ p = \min\Bigl\{\frac{1}{m} \cdot \log\frac{m}{\delta}\ln\frac{Kn}{m} , \frac12 \Bigr\}. \]
Then $H$ is a strong extractor family for min-entropy $k$ with error at most $\sfrac12\sqrt{\delta + K\cdot\/2^{- k+m}}$.
\end{theorem}
By a large deviation bound, all but an $\delta$-fraction of $H$ are $O(nmp)$-sparse. The best error we can hope for~\cite{RT} is $\Omega(\sqrt{2^{-k+m}})$, which is achieved by a pairwise independent hash function family. Perhaps the simplest construction of such a family is to choose each entry of $M$ independently at random with probability $p = 1/2$. When $p<1/2$, Theorem~\ref{thm:upper} shows the sparsity can be reduced dramatically at the cost of increasing the error by a little. For example if we set $\delta = 2^{-k+m}$, we obtain a $O(n \log{m} \log{(n/m)})$ sparse strong extractor family whose error is within a constant factor of optimal.
Our main negative result shows that this sparsity is necessary for a large range of values of $k$ and when $\delta$ is constant.
\begin{theorem}
\label{thm:lower}
Suppose $n^{0.99} \leq m \leq n/6$. There exists a distribution $\mathcal{D}$ over distributions $\overline{X}$ on $\{0,1\}^n$ of min-entropy $1.5m$ each so that for every function $h\colon \{0,1\}^n \to \{0,1\}^m$ of sparsity $0.001 n \log m \log(2n/m)$, the expected statistical distance between $h(\overline{X})$ and the uniform distribution over $\{0,1\}^m$ is at least $1 - e^{-m^{\Omega(1)}}$.
\end{theorem}
Applying Yao's minimax principle or just take the covex combination of distributions $\overline{X}$ as the bad distribution, we conclude that the sparsity in Theorem~\ref{thm:upper} is optimal up to constant factor for this range of parameters. The next results concerns weak extractor families.
\begin{theorem}
\label{thm:upper2}
Let $K$ be a sufficiently large constant and $n, k, c, m$ be parameters such that $1\leq k \leq n$, $1\leq s<m$ and $c > 1$. Let $H\colon \{0,1\}^n \times \{0,1\}^s \to \{0,1\}^m$ be given by $H(x, r) = Mx + Br$, where $M$ is an $m\times n$ random matrix in which each entry equals $1$ independently with probability
\[ p= \min \Bigl\{\frac{K}{m} \cdot \ln\frac{n}{\ln c},\frac12 \Bigr\}, \]
and $B$ is an $m\times s\ (m>s)$ matrix of full rank where every set of at most $m/2K$ rows is linearly independent. Then $H$ is an extractor family for min-entropy $k$ with error $\sfrac12\sqrt{c \cdot 2^{-k-s+m}}$.
\end{theorem}
The construction of a matrix $B$ with the desired properties and $O(m)$ sparsity is a well studied problem in the theory of low density parity check codes~\cite{Ga62,SS96}. Capalbo et al.~\cite{CRVW} give an explicit construction with $s = \alpha m$ for some constant $\alpha < 1$ and every $m$, which is optimal up to the choice of the constant $\alpha$. Instantiating Theorem~\ref{thm:upper2} with this matrix, and setting $c = 2$, we obtain a family of $O(n \log n)$ sparse extractors with error $O(\sqrt{2^{-k-s+m}})$, which is optimal up to constant factor. (If $m = k + s - O(1)$, the output contains almost all the entropy from the source plus all the entropy invested by the seed and the error is an arbitrarily small constant.) By using a larger value of $c$, we can reduce the sparsity at the cost of increasing the error.
We observe that using the randomized encoding of Applebaum et al.~\cite{AIK}, these extractors can be made to have constant locality at the cost of increasing the seed length $r$ to $O(n \log n)$ bits.
For certain parameters the weak extractor family from Theorem~\ref{thm:upper2} bypasses the limitation on strong extractor families from Theorem~\ref{thm:lower}. For example, for constant statistical distance and $m = n^{0.99}$ Theorem~\ref{thm:lower} implies that for a strong extractor to produce even a constant fraction of the entropy a sparsity of $\Omega(n (\log n)^2)$ is necessary, while Theorem~\ref{thm:upper2} says that an $O(n \log n)$-sparse weak extractor family can extract all but $O(1)$ bits of entropy.
\subsection{An application}
\paragraph{Pseudorandom generators from one-way functions} The construction of pseudorandom generators from one-way functions of H\aa\/stad et al.~\cite{HILL} does not in general preserve locality. Haitner, Reingold, and Vadhan~\cite{HRV} gave a construction that is more efficient and can be implemented in $\mathrm{NC}^1$. Recently Vadhan and Zheng~\cite{VZ} gave an even simpler variant of this construction. In combination with the ``compiler'' of Applebaum, Ishai, and Kushilevitz ~\cite{AIK04}, one obtains a generic locality-preserving transformation of one-way functions into pseudorandom generators.
Applying the transformation of Applebaum et al. may have an adverse effect on seed length, as it may grow quadratically. However, the construction of Vadhan and Zheng is extremely simple; it is obtained by applying extractor to a sequence of ``blocks'', each of which inherits the locality of the one-way function $f$. Instantiating the extractor by the construction from Theorem~\ref{thm:upper2}, we obtain a transformation of {\em nonuniform} one-way functions into nonuniform pseudorandom generators that preserves output locality logarithmic in the size of the adversary with the same seed length as the one obtained by Vadhan and Zheng. Using an additional idea of Applebaum et al., the transformation can be made to preserve constant output locality at the expense of increasing the seed length. We describe this application in Section~\ref{sec:prg} (see Proposition~\ref{prop:owfprg}).
\subsection{Related work}
\paragraph{Sparse extractors for restricted sources} Motivated by certain applications, Zhou and Bruck ~\cite{ZB11} show that low density random matrices can efficiently extract random bits from some restricted noisy sources, such as bit fixing sources and Markov sources. Our Theorem~\ref{thm:upper} shows that essentially the same construction extracts from arbitrary sources of given min-entropy.
\paragraph{Extractors in $\mathrm{NC}^0$} Applebaum, Ishai and Kushilevitz~\cite{AIK06} give a weak extractor in $\mathrm{NC}^0$ (thus sparsity $O(n)$) works for min-entropy $k = (1 - O(1))n$, but suffers $\Omega(n)$ entropy loss. Our extractor family from Theorem~\ref{thm:upper2} matches these parameters. The construction from~\cite{AIK06} does not appear to extend to distributions of smaller min-entropy or allow for smaller entropy loss, while ours does. However, they provide a single extractor that works for all distributions, while we only give an extractor family.
\paragraph{Locally computable extractors} A {\em locally computable extractor}~\cite{Lu04, Vad03} is an extractor in which after the seed is fixed, the output as a whole depends on a small number of input bits. Such extractors are used to implement private-key encryption in the bounded storage model~\cite{Maurer}. We observe that the notions of locally computable extractors and sparse extractor families are fundamentally different. This is best illustrated in the regime in which we extract all the entropy, which is of main interest in this work. A lower bound of Vadhan~\cite{Vad03} shows that when the output length $m$ is linear in the min-entropy $k$, then even after the seed is fixed the output of the extractor as a whole must depend on at least a linear fraction of the input. Thus $o(n)$-locally computable extractors are not possible when $m = \Omega(k)$. Although this is inevitable, our results show that it is possible to make a small number of input-output dependencies.
We observe that the locally computable extractors of Lu~\cite{Lu04} and De and Trevisan~\cite{DT09} are also sparse, but they extract only a fixed root of the min-entropy $k$. This is sufficient for bounded storage cryptography, but not for the application we describe in Section~\ref{sec:applications}.
\iffalse
\begin{table}[position specifier]
\centering
\begin{tabular}{|l|l|l|l|l|l|}
\hline
& locality & entropy & entropy loss & error & remark\\
\hline
~\cite{AIK06} & $O(1)$ & $k=(1-O(1))n$ & $\Omega(n)$ & $2^{-\Omega(n)}$ & $m=n$ \\
Theorem ~\ref{thm:upper} & $O((n/m)\log{(n/m)}\log{(m/\delta)})$ & $1\leq k\leq n$ & $k-m$ & $1/2\sqrt{\delta+O(2^{-k+m})} $ &\\
Theorem ~\ref{thm:upper2} & $O((n/m)\log{(n/\ln(1/\delta))})$ & $1\leq k\leq n$ & $k+s-m$ & $1/2\sqrt{(1/\delta)\cdot 2^{-k+m}} $ & $m>s$\\
\hline
\end{tabular}
\caption{Upper bound}
\label{tab: upper bound}
\end{table}
\begin{table}[position specifier]
\centering
\begin{tabular}{|l|l|l|l}
\hline
& locality & Remark\\
\hline
~\cite{Vio05} & $\Omega(n/k)$ & \\
Theorem ~\ref{thm:lower} & $\Omega((n/k)\log(n/k)\log{k})$ & $n^{0.99}\leq m\leq n/6, $\\
\hline
\end{tabular}
\caption{Lower bound: $m=\Omega(k), \varepsilon=\Omega(1)$}
\label{tab:lower bound}
\end{table}
\fi
\subsection{Our proofs}
Bogdanov and Rosen~\cite{BR11} proved a quantitatively weaker version of Theorem~\ref{thm:upper} that achieves sparsity $O(n (\log n)^3)$ instead of the optimal $O(n\log(m)\log(n/m))$. (They did not attempt to determine the dependence on $m$ and they can achieve sparsity $O(n\log(m)\log(n/m)^2)$) In their proof, $H$ is viewed as a collection of boolean functions $(h_1, \dots, h_m), h_i\colon \{0,1\}^n \to \{0,1\}$. They show that for most choices of $h_1$, conditioning on $h_1(x)$ reduces the min-entropy $k$ of $x$ by at most $1 + 1/\mathrm{poly}(k)$ bits (unless $k$ is very small), and so this bit extraction can be applied iteratively for $m$ steps.
One drawback of this argument is that as $i$ gets larger and the min-entropy of $x$ conditioned on $h_1(x),\dots,h_{i-1}(x)$ becomes smaller, the density of the functions $h_i$ must keep increasing (as required by our lower bound). To achieve our optimal (up to constant) sparsity, we must analyze the effect of all the functions $h_1, \dots, h_m$ simultaneously.
To do this, we upper bound the probability that two samples $x, x'$ collide under $h$, that is the probability that $h(x + x') = 0$. For a fixed pair $(x, x')$, each entry $h_i(x + x')$ of $h(x + x')$ is biased towards zero. We can think of $h_i(x + x')$ as a random variable that takes value zero with some probability $p(x, x')$, and is unbiased otherwise. Intuitively, our analysis shows that the unbiased components of this distribution dominate in collisions. Several technical complications arise in the formal argument. One useful tool that allows us to analyze the case when most of the components of $h(x + x')$ are unbiased is H\"older's inequality.
To give an idea of our proof of Theorem~\ref{thm:lower}, let's make the simplifying assumption that $h$ is $\ell$-local, where $\ell = \gamma (n/m) \log m \log(n/m)$. We give a heuristic argument why we expect the output of $h$ to be far from uniform when $h$ is linear. Let $X$ be the $p$-biased distribution over $\{0,1\}^n$ (each bit takes value $1$ independently with probability $p$) and $p$ is chosen so that $H(p) = m/n$, where $H(p)$ is the binary entropy of $p$. Then the distribution $X$ has Shannon entropy $m$. However, every output bit of $h(X)$ is $(1 - 2p)^\ell$-biased, and we chose the parameters so that $(1 - 2p)^\ell = m^{-\Omega(\gamma)}$. By choosing $\gamma$ small enough, we can ensure that every output bit of $h$ has, say, $m^{-1/2}$ bits of {\em entropy deficiency}, so by the sub-additivity of Shannon entropy $h(X)$ has $m^{1/2}$ fewer bits of entropy than a uniformly random variable over $\{0,1\}^m$. So $h(X)$ does not ``look'' random in terms of Shannon entropy.
To turn this heuristic argument into a proof we need to handle several issues, the most interesting of which is replacing entropy deficiency by statistical distance from the uniform distribution. One advantage of measuring entropy deficiency is that entropy is subadditive, which allows us to ignore the dependencies between the various outputs of $h(X)$ in the above argument. In contrast, to obtain a good lower bound on statistical distance we must take into account these dependencies. Here we apply tail bound for read $t$ family ~\cite{gavinsky2012tail}. To extend the analysis from linear functions to general ones we apply an elegant idea of Viola~\cite{Vio05} of shifting $X$ by a random offset.
We establish Theorem~\ref{thm:upper2} by a relatively straightforward probabilistic calculation.
\subsection{Open problems}
In terms of seed length our sparse extractors are quite poor. For example the size of the family $H$ in Theorem~\ref{thm:upper} is exponential in $n$. By a standard probabilistic argument it can be shown that a random sample of $H$ of size $O(n / \varepsilon^2)$ is as effective as the whole family while incurring an additional penalty of only $\varepsilon$ in statistical distance. Consequently there is no existential obstacle to sparse extractor families with short seed. It remains to see if such families can be found efficiently.
For weak sparse extractors, our work leaves open two possible improvements. First, we do not know if the sparsity of our weak extractors is the best possible. Second, we do not know what is the minimal size of a sparse weak extractor family. It could be that even a family of size 1, i.e. a single sparse weak extractor, is sufficient. Such an extractor could be used to obtain a uniform construction of local pseudorandom generators from local one-way functions. Could there be a single sparse weak extractor of sparsity linear in $n$ that extracts $k - k^{0.99}$ bits of min-entropy for every source over $\{0,1\}^n$ of min-entropy $k$?
\section{Proof of Theorem~\ref{thm:upper}}
\label{sec:upper}
To prove Theorem~\ref{thm:upper} it is sufficient to show that for every set $S$ of size $2^{k}$, the statistical distance between $(H, H(X))$ and $(H, U)$ is at most $\sfrac12\sqrt{\delta + O(2^{-k+m})}$, where $X$ is chosen at random from $S$ and $U$ is uniformly random.
In fact we will show for every $x_0 \in S$,
\[ \pr_{H, X}[H(X)=H(x_0)] \leq \frac{1+ \delta + O(2^{-k+m})}{2^m} \]
from where
\[ \pr_{H, X, X'}[H(X) = H(X')] \leq \max_{x_0 \in S} \pr_{H, X}[H(X) = H(x_0)] \leq \frac{1+ \delta + O(2^{-k+m})}{2^m} \]
where $X$ and $X'$ are independent samples from $S$. This is sufficient to establish Theorem~\ref{thm:upper} using the relation between collision probability and statistical distance from Claim~\ref{claim:colldist} in Appendix~\ref{app:colldist}.
\begin{proof}
When $p = 1/2$ the analysis is standard, so we will assume that $p =\frac{1}{m} \cdot \log(m/\delta)\ln(15 n/m) < 1/2$.
Since entries of $M$ are chosen independently from each other, for any $y \in \{0,1\}^n$, we have
\[
\pr_H[H(y) = 0] = \pr_a[\ip{a, y} = 0]^m = \Bigl(\frac{1+(1-2p)^{|y|}}{2}\Bigr)^m =\frac{1}{2^m}\sum_{i=0}^m\binom{m}{i}(1-2p)^{i|y|}
\]
Here $a \sim \{0,1\}^m$ is chosen from the $p$-biased distribution. Let $S_0$ be the set $\{x_0 + x\colon x\in S\}$. Then
\begin{align*}
\pr_{H, X}[H(X)=H(x_0)] &=\pr_{H, y \sim S_0}[H(y) = 0] \\
& = \E_{y\sim S_0}\Bigl[\frac{1}{2^m}\sum_{i=0}^m\binom{m}{i}(1-2p)^{i|y|}\Bigr] = \frac{1}{2^m}\sum_{i=0}^m\binom{m}{i}\E_{y\sim S_0}[(1-2p)^{i|y|}]
\end{align*}
Let $a_i= \E_{y\sim S_0}[(1-2p)^{i|y|}]$. We now upper bound the sum $\sum_{i=0}^m\binom{m}{i}a_i$ by $1+\delta+O(2^{-k+m})$. We will consider two cases: When $i$ is small -- specifically, $i \leq k/(2\log{(m/\delta)})$, we show that $a_i$ decreases at a rate faster than $(m/\delta)^{-i}$, so the sum is dominated by the term $i = 0$. When $i$ is large, we want to bound both $a_i$ and $\binom{m}{i}$ by its largest possible value. To achieve this, we have to further split the large $i$'s into ``mildly large'' and ``very large'' ones and apply the argument to each summation separately. The resulting contribution is $O(2^{-k+m})$. Notice that, in the case $m\leq k/(2\log{(m/\delta)})$, we need not consider the contribution of large $i$'s thus we can improve $p$ to be $\frac{1}{k} \cdot \log(m/\delta)\ln(15 n/k)$ by using the same analysis for small $i$'s.
\paragraph{The small $i$'s.} We show that if $i \leq k/(2\log{(m/\delta)})$, then $a_i \leq (m/\delta)^{-1.8i}$ and therefore
\[ \sum_{i=0}^{k/(2\log{(m/\delta)})} \binom{m}{i}a_i\leq (1+(m/\delta)^{-1.8})^m \leq e^{{\delta}^{1.8}/m^{0.8}}\leq 1+\delta. \]
To bound $a_i$ we apply H\"older's inequality, which says that for every $B \geq 1$:
\[ a_i = \E_{y\sim S_0}[(1-2p)^{i|y|}] = \frac{1}{\abs{S}} \sum_{y \in \{0,1\}^n} 1_{y \in S_0} \cdot (1 - 2p)^{i\abs{y}}
\leq \frac{1}{\abs{S}} \abs{S}^{1-1/B} \sum_{y \in \{0,1\}^n} \bigl((1 - 2p)^{Bi\abs{y}}\bigr)^{1/B} \]
The last expression can be simplified to give
\[
a_i \leq {\Bigl(\frac{(1+(1-2p)^{Bi})^n}{|S|}\Bigr)}^{1/B} \leq \Bigl(\frac{(1+e^{-2piB})^n}{|S|}\Bigr)^{1/B}.
\]
We choose $B = k/(2i\log{(m/\delta)})$, which is at least one because $i \leq k/(2 \log(m/\delta))$. By our choice of $p$, it follows that $2piB \geq \ln(15n/m)$ and so
\[ a_i \leq \Bigl(\frac{(1+ m/15n)^n}{|S|}\Bigr)^{1/B} \leq \Bigl(\frac{e^{m/15}}{2^{k}}\Bigr)^{1/B}
\leq \Bigl(\frac{e^{k/15}}{2^{k}}\Bigr)^{1/B} \leq (2^{-0.9})^{k/B} = (m/\delta)^{-1.8i}. \]
\paragraph{The large $i$'s.} We have that
\begin{equation}
\label{eqn:basicbound}
a_i = \E_{y\sim S_0}[(1-2p)^{i|y|}] \leq \frac{1}{\abs{S}} \sum_{y \in \{0,1\}^n} (1-2p)^{i|y|} = \frac{(1 + (1 - 2p)^i)^n}{\abs{S}}
\leq \frac{(1 + e^{-2pi})^n}{\abs{S}}.
\end{equation}
When $i \geq m/4$, the last expression is at most $(1 + e^{-pm/2})^n/\abs{S}$. By our choice of $p$, $pm/2 \geq \sfrac12 \log m \ln(15n/m)$. Optimizing for $\log m$, it can be calculated that this expression is at least $\ln n$ when $n$ is sufficiently large. It then follows that
\[ a_i \leq \frac{(1 + 1/n)^n}{\abs{S}} \leq e 2^{-k} \qquad \text{and so} \qquad \sum_{i=m/4}^m \binom{m}{i} a_i \leq 2^m \cdot e2^{-k} = e2^{-k+m}. \]
Finally, we handle the $i$'s in the range $k/(2\log{(m/\delta)}) < i < m/4$. Using (\ref{eqn:basicbound}) and the lower bound on $i$, we have that
\[ a_i \leq \frac{(1 + m/15n)^n}{\abs{S}} \leq \frac{e^{m/15}}{\abs{S}} \leq 2^{0.1m - k} \]
and so
\[ \sum_{k/(2\log{(m/\delta)}) < i < m/4} \binom{m}{i}a_i \leq 2^{0.1m - k} \sum_{i=0}^{m/4} \binom{m}{i} \leq 2^{0.1m - k} \cdot 2^{H(1/4)m + O(1)} \leq 2^{ - k+m+ O(1)} , \]
where $H$ is the binary entropy function, and $H(1/4) \leq 0.9$.
\end{proof}
\section{Proof of Theorem~\ref{thm:lower}}
\label{sec:lower}
Let distribution $\overline{X}$ be a truncated variant of the $p$-biased distribution $X$ where $p$ is chosen so that $H(p) = 2m/n$ and $p \leq 1/2$. The distribution $\mathcal{D}$ on distributions is defined as follows. Choose $y$ uniformly from $\{0,1\}^n$ and output $y+\overline{X}$. To prove Theorem~\ref{thm:lower}, we will show for most choices of $y$, there exists a statistical test $T_y$ that distinguishes $h(y+\overline{X})$ from the uniform distribution $U$ and then argue the expected statistical distance over choice of $y$ between $h(y+\overline{X})$ and $U$ is large. We will define $T_y$ shortly for $y\in\{0,1\}^n$ and first argue that for most choice of $y$, $T_y$ distinguishes $h(y+X)$ from $U$. Then we will show how to define $\overline{X}$ of min-entropy $1.5m$ in a way that $X$ and $\overline{X}$ are statistically close. Finally, we conclude that the expected statistical distance over choice of $y$ between $h(y+\overline{X})$ and $U$ is at least $1-e^{-m^{\Omega(1)}}$.
The following bounds on $p$ are obtained by plugging in $H(p) = 2m/n$ in Lemma~\ref{lemma:entropy} in Appendix~\ref{app:entropy}:
\begin{equation}
\label{eqn:pbounds}
\frac{m}{3n \log_2(n/m)} \leq p \leq \frac{2m}{n \log_2(n/2m)}.
\end{equation}
Now suppose $h$ has sparsity $(m/2p) \beta \log m$, where $\beta$ is a sufficiently small constant, say $\beta = 0.08$. Notice this $\beta$ also satisfies $n \leq m^{1 + 2\beta}$ (since by assumption $m \geq n^{0.99}$). Partition the inputs of $h'$ into two sets $H$ and $L$, where $H$ contains those inputs that participate in at least $m^{2 - 6\beta}/pn$ outputs of $h'$, and $L$ contains the rest. By Markov's inequality (using the assumption $n \leq m^{1 + 2\beta}$), $H$ has size at most $m^{8\beta} \beta \log k$. For $x \in \{0,1\}^n$, let $x_0$ and $x_1$ denote its projections onto $H$ and $L$, respectively. For every $y\in\{0,1\}^n$, we define the statistical test
\[T_y=\{z\in\{0,1\}^{m}: \text{$\Delta(h(x_0,y_1),z) \leq 1/2 - m^{-\beta}/4$ for some $x_0$}, \}\]
where $\Delta(a, b)$ is relative Hamming distance between the strings $a$ and $b$, i.e. the fraction of positions in which they differ.
\begin{claim}
\label{claim:ellone}
For sufficiently large $k$, $\Pr_X[h(X + y)\in T_y]\geq 1-e^{-m^{3\beta}/2}$ for at a least $1 - 1-e^{-m^{3\beta}/2}$ values of $y \in \{0,1\}^n$.
\end{claim}
In the proof we will need the following fact about Boolean functions $f\colon \{0,1\}^d \to \{0,1\}$
\begin{equation}
\label{eqn:noise}
\Pr_{X, Y}[f(X+Y)\neq f(Y)] \leq \tfrac12 - \tfrac12(1-2p)^d
\end{equation}
where $Y$ is uniformly distributed in $\{0,1\}^n$, and $X$ is chosen independently from the $p$-biased distribution on $\{0,1\}^n$. This fact follows easily by Fourier analysis~\cite{OD02} and was also used by Viola~\cite{Vio05} in a context related to ours.
We will also make use of the following inequality of Gavinsky et al.~\cite{gavinsky2012tail}. A collection of indicator random variables $Z_1, \dots, Z_m$ is called a {\em read $t$ family} of functions if there exist independent random variables $X_1, \dots, X_n$ such that each $X_i$
Then we will apply tail bound for {\em read t family} of functions ~\cite{gavinsky2012tail} to show for most choice of $(x,y)$ outcome concentrate on expectation. Indicator random variables $Z_1, \dots, Z_m$ is a {\em read $t$ family} if they can written as a function of independent random variables $X_1, \dots, X_n$ where each $X_i$
affects at most $t$ of the $Z_i$'s. Then for every $\varepsilon > 0$,
\begin{equation}
\label{eqn:tail}
\Pr[Z \geq \E[Z] + \varepsilon m] \leq e^{-2\varepsilon^2m/t}.
\end{equation}
where $Z = Z_1 + \dots + Z_m$.
\begin{proof}
We will show that for every choice of $x_0, y_0$, with probability $1 - e^{-\Omega(m^{1-\beta})}$ over the choice of $x_1, y_1$, $h(x+y) = h(x_0 + y_0, x_1 + y_1)$ is in $T_y$. Fix $x_0, y_0$ and consider the function $h^{x_0+y_0}(x_1) = h(x_0+y_0, x_1)$. Let $Z = Z_1 + \dots + Z_{m}$, where
\[ Z_i = \begin{cases}
1, &\text{if $h^{x_0+y_0}_i(x_1 + y_1)\neq h^{x_0+y_0}_i(y_1)$}, \\
0, &\text{otherwise}.
\end{cases} \]
Suppose $h^{x_0+y_0}_i$ depends on $d_i$ inputs for $1\leq i\leq m$, by (\ref{eqn:noise}) we have
\[\E[Z_i] \leq 1/2(1-(1-2p)^{d_i}).\]
By linearity of expectation and $\sum_{i=1}^m d_i\leq (m/2p)\beta\log{m}$, we get
\[\E[Z] \leq m/2(1-(1 -2p)^{\beta\log{m}/2p})\leq m(1/2-m^{-\beta}/2).\]
Now we apply tail bound (\ref{eqn:tail}) to $Z_1, \dots, Z_m$ with $t = m^{2 - 6\beta}/pn$ and $\varepsilon=m^{-\beta}/4$ to obtain
\[\Pr[Z\geq m(1/2-m^{-\beta}/4)]\leq e^{-2\varepsilon^2m/(m^{2 - 6\beta}/pn)}\leq e^{- m^{3\beta}}\]
where we used the estimate (\ref{eqn:pbounds}) to lower bound $pn$. In other words,
\[\pr_{x_1, y_1}[\Delta(h_{x_0+y_0}(y_1),h_{x_0+y_0} (x_1 + y_1)) \leq 1/2 - m^{ - \beta}/4] \geq 1 -e^{- m^{3\beta}}.\]
It follows that
\begin{align*}
\pr_{x, y}[h(x+y)\in T_{y}]
& \geq \E_{x_0,y_0}\bigl[\pr_{x_1, y_1}[ \Delta(h_{x_0+y_0}( y_1),h_{x_0+y_0}( x_1+y_1))\leq 1/2 - m^{ - \beta}/4]\bigr] \\
& \geq 1-e^{-m^{3\beta}}.
\end{align*}
Applying Markov's inequality, we conclude that for at least $1-e^{-m^{3\beta}/2}$ choices of $y$,
\[ \E_X[h(X+y)\in T_y]\geq 1-e^{-m^{3\beta}/2}. \hfill\qedhere \]
\end{proof}
\begin{claim}
\label{claim:uniform}
For any fixed $y\in\{0,1\}^n$, with probability $1-2^{-\Omega(m^{1-2\beta})}$ over the choice of a uniform $U \sim \{0,1\}^{m}$, $U$ is not in $T_y$.
\end{claim}
\begin{proof}
Since $H$ has size at most $m^{8\beta}\beta \log k$, the range of $h'(x_0, y_1)$ has at most $2^{m^{8\beta}\beta \log m}$ elements. For every such element $h(x_0, y_1)$, the probability that $U$ is within distance $m/2 - m^{1-\beta}/4$ to $h(x_0, y_1)$ can be computed by Chernoff bounds to be at most $2^{-\Omega(m^{1 - 2\beta})}$. Taking a union bound over all such $h(x_0, y_1)$, we obtain
\[ \pr[U \in T_y] \leq 2^{m^{8\beta}\beta \log m} 2^{-\Omega(m^{1 - 2\beta})} = 2^{-\Omega(m^{1 - 2\beta})} \]
as long as $\beta < 1/10$ and $m$ is sufficiently large.
\end{proof}
From these two claims, it follows that for a $1-e^{-m^{3\beta}/2}$ choices of $y$,
\begin{equation}
\label{eqn:statX}
\pr_X[h(X+y) \in T_y] - \pr_U[U \in T_y] \geq 1 - 1-e^{-m^{3\beta}/2} - 2^{-\Omega(m^{1 - 2\beta})}.
\end{equation}
To finish the proof, we show how to replace $X$ with another variable $\overline{X}$ of min-entropy at least $1.5m$ that is statistically close to it. We define $\overline{X}$ as follows: First, choose $X$ from the $p$-biased distribution. If the Hamming weight of $X$ is at least $0.9pn$, set $\overline{X} = X$. Otherwise, let $\overline{X}$ be uniformly random in $\{0,1\}^n$. We prove the following claim in Appendix~\ref{app:minent}:
\begin{claim}
\label{claim:minent}
$\overline{X}$ has min-entropy at least $1.5m$.
\end{claim}
Clearly the same conclusion holds for the distribution $\overline{X} + y$. The statistical distance between $X$ and $\overline{X}$ is upper bounded by the probability that $X$ has Hamming weight less than $0.9pn$. By Chernoff bounds, the probability of this is at most $\exp(-\Omega(pn))$, which using the lower bound (\ref{eqn:pbounds}) is at least $\exp(-\Omega(m/\log(n/m))) = \exp(-m^{\Omega(1)})$ (since $m \geq n^{-0.99}$). Applying the triangle inequality, for all $y$ satisfying (\ref{eqn:statX}) we have
\[ \pr_{\overline{X}}[h(\overline{X}+y) \in T_y] - \pr_U[U \in T_y] \geq 1 - e^{-m^{\Omega(1)}}. \]
We conclude that the expected statistical distance between $h(\overline{X}+y)$ and $U_m$ for a random choice of $y$ is at least $(1- e^{-m^{\Omega(1)}})(1- e^{-m^{\Omega(1)}})=1- e^{-m^{\Omega(1)}}$.
\section{Proof of Theorem~\ref{thm:upper2}}
\label{sec:upper2}
As in the proof of Theorem~\ref{thm:upper}, it is sufficient to show for every set $S$ of size $2^k$ and every $x_0$ in $S$ and $r_0$ in $\{0,1\}^s$,
\[ \Pr_{M, X, R}[MX + BR = Mx_0 + Br_0] \leq \frac{1+(1/\delta)\cdot 2^{-k-s+m}}{2^m}\]
where the probability is taken over the random matrix $M$, $X$ chosen uniformly from $S$ and $R$ chosen uniformly from $\{0,1\}^s$.
Assume that $p \leq 1/2$. Let $S_0$ be the set $\{x + x_0: x \in S\}$. Then
\[
\Pr_{M, X, R}[MX + BR = Mx_0 + Br_0] = \Pr_{M, X, R}[M(X + x_0)= B(R+r_0)] =\Pr_{M, Y, R}[MY = BR]
\]
where $Y$ is a random element from $S_0$. Let $M_i,B_i$ denote the $i$th row of $M$ and $B$. Then
\begin{multline*}
\Pr[MY = BR] = \E_{M,Y,R} [\prod_{i=1}^{m}\frac{1+(-1)^{M_iY + B_iR}} {2}] \\
= \frac{1}{2^m} \sum_{T\subseteq [m]} \E_{M, Y, R}\bigl[(-1)^{\sum_{i\in T}M_iY + B_iR}\bigr]
= \frac{1}{2^m} \sum_{T\subseteq [m]} \E_{M, Y}[(-1)^{\sum_{i\in T}M_iY}]\E_R[(-1)^{\sum_{i\in T}B_iR}].
\end{multline*}
Since any $t=m/2K$ rows of $B$ are linearly independent, for every nonempty $T$ of size at most $t$, $\sum_{i\in T} B_i \neq 0$ and so $\E[(-1)^{\sum_{i\in T}B_iR}]=0$. On the other hand for every $T$ of size at least $t$ we have
\begin{multline*}
\E_{M, Y}[(-1)^{\sum_{i\in T}M_iY}]
= \frac{1}{2^k}\sum_{y\in S_0}\E_M[(-1)^{\sum_{i\in T}M_iy}]
= \frac{1}{2^k} \sum_{y\in S_0}(1-2p)^{|y|\cdot\/|T|} \\
\leq \frac{1}{2^k}\sum_{y\in \{0,1\}^n}(1-2p)^{t|y|}
\leq \frac{1}{2^k}(1+(1-2p)^t)^n
\leq \frac{e^{ne^{-2pt}}}{2^k}.
\end{multline*}
Since $B$ has full rank, the condition $\sum_{i \in T} B_i = 0$ is satisfied for at most $2^{m-s}$ sets $T$. Hence,
\begin{align*}
\sum_{T\subseteq [m]} \E_{M,Y}[(-1)^{\sum_{i\in T}M_iY}] \E_R[(-1)^{\sum_{i\in T}B_iR}]
&= 1+\sum_{T:|T|>t} \E_{M,Y}[(-1)^{\sum_{i\in T}M_iy}] \E_R[(-1)^{\sum_{i\in T}B_is}] \\
& \leq 1+\sum_{T:|T|>t} \frac{e^{ne^{-2pt}}}{2^k} \abs{\E_R[(-1)^{\sum_{i\in T}B_iR}]} \\
& \leq 1+ 2^{m-s} \cdot \frac{e^{ne^{-2pt}}}{2^k} \\
&= 1+ \frac{e^{ne^{-2pt}}}{2^{s+k-m}}.
\end{align*}
Plugging in $t= \frac{m}{2K}$ and $p =\frac{K}{m} \cdot \ln\frac{n}{\ln{c}}$ we get the desired bound.
\section{Local pseudorandom generators from local one-way functions}
\label{sec:applications}
\label{sec:prg}
A sequence of correlated random variables $X_1, \dots, X_m$ taking values in $\{0,1\}^n$ has {\em $(s, \varepsilon)$ conditional pseudo-min-entropy} $r$ if for every $1 \leq i \leq m$, there exists a random variable $Y_i$ jointly distributed with $X_1, \dots, X_{i-1}$ such that the min-entropy of $Y$ conditioned on any choice of $X_1, \dots, X_{i-1}$ is at least $r$ and for every circuit $D$ of size $s$,
\[ \bigabs{\pr[D(X_i) \mid X_1, \dots, X_{i-1}] - \pr[D(Y_i) \mid X_1, \dots, X_{i-1}]} \leq \varepsilon. \]
Vadhan and Zheng~\cite{VZ} give the following construction of conditional pseudo-min-entropy sequences from a one-way function $f\colon \{0,1\}^n \to \{0,1\}^n$. Let $z_i, 1 \leq i \leq t$ be the random strings
\[ z_i = \/_{o_i}\lfloor f(x_{i1}) \circ \/x_{i1} \circ \dots \circ f(x_{ik}) \circ x_{ik}\rfloor_{n-o_i} \]
where $1 \leq o_i \leq n$ is an offset, $x_{i1}, \dots, x_{ik}$ are random strings, and $_{f}\lfloor\/y\rfloor_\ell$ denotes truncating the first $f$ and last $\ell$ bits of $y$ respectively. Let $X_j = z_{1j}z_{2j}\dots\/z_{tj}$, where $z_{ij}$ denotes the $j$th bit of $z_i$. Vadhan and Zheng prove the following theorem (we state it in the nonuniform setting).
\begin{theorem}[Vadhan and Zheng]
Suppose $f\colon \{0,1\}^n \to \{0,1\}^n$ is computable by a circuit of size $\mathrm{poly}(n)$ and is hard to invert on a $1/s$ fraction of inputs by circuits of size $s$.
There exists offsets $o_1, \dots, o_t$ such that for every $\varepsilon$, $X_1, \dots, X_m$ has $(s^{\Omega(1)}/\mathrm{poly}(n\varepsilon), \varepsilon)$ conditional pseudo-min-entropy at least $t(1/2 + \Omega((\log s)/n))$ where $k = O(n/\log{s})$, $t = O((n/\log{s})^2\log^2{n}\log{(1/\varepsilon)})$ and $m=2(k-1)n$.
\end{theorem}
\iffalse
\begin{theorem}[Haitner, Reingold, and Vadhan]
\label{theorem:entropytomin}
Suppose $G_{nb}\colon \{0,1\}^n \to \{0,1\}^m$ is computable by a circuit of size $\mathrm{poly}(n)$ such that $G_{nb}$ has $(T,\varepsilon)$ next block pseudoentropy at least $n+\Delta$. For every $\varepsilon$, set $u = O(n/\Delta)$ and $t = O((m/\Delta)^2\log^2{n}\log{(1/\varepsilon')})$. Then there exist offsets $o_1, \dots, o_t$ such that , $X_1, \dots, X_{(u-1)m}$ has $(T-O(umt), t^2(u\varepsilon+\varepsilon'+ 2^{-\Omega(t)}))$ next-block pseudo-min-entropy at least $t(n/m+\Omega(\Delta/m))$.
\end{theorem}
\fi
The following claim was proved in the uniform setting by Haitner, Reingold, and Vadhan. We need a nonuniform version of it, whose proof is analogous. We include it at the end of this section for completeness.
\begin{claim}
\label{claim:owfhash}
Suppose $X_1, \dots, X_m$ (where $X_i$ takes values in $\{0,1\}^t$) has $(T,\varepsilon_1)$ conditional pseudo-min-entropy $\alpha$. Let $H$ be an extractor family for min-entropy $\alpha$ with error $\varepsilon_2$ so that every function in $H$ is computable in size $T_0$. Then with probability at least $1/2$ over the choice of $H$ the distribution $(H(X_1, R_1), \dots, H(X_m, R_m))$ is $(T - mT_0, m\varepsilon_1+2m^2\varepsilon_2)$-pseudorandom where $R_1,\dots, R_m \sim \{0,1\}^r$.
\end{claim}
Instantiating Claim~\ref{claim:owfhash} with the function family from Theorem~\ref{thm:upper2} where we set the output length of function to be $t(1/2+\Omega((\log{s})/n))+r$ and let $d$ be the entropy loss. We obtain the following consequence for the function
\[ G(x_{11}, \dots, x_{tk}, r_1,\dots, r_{m}) = (H(X_1, r_1 ), \dots, H(X_{m}, r_{m})). \]
\begin{proposition}
\label{prop:owfprg}
Suppose $f\colon \{0,1\}^n \to \{0,1\}^n$ is an $\ell$-local function computable by a circuit of size $\mathrm{poly}(n)$ and is hard to invert on a $1/s$ fraction of inputs by circuits of size $s$. With probability at least $1/2$ over the choice of $H$, $G\colon \{0,1\}^{nkt+ mr} \to \{0,1\}^{nkt(1 + \Omega((\log s)/n))+mr}$ is an $O(\ell \cdot (nkt\ln{({t/\ln{c}})}+ mr))$-sparse,
$(s^{\Omega(1)}/\mathrm{poly}(n\varepsilon), \mathrm{poly}(n)(\varepsilon + \sqrt{c}2^{-d/2}))$ pseudorandom generator where $k = O(n/\log{s})$, $t = O((n/\log{s})^2\log^2{n}\log{(1/\varepsilon)})$ and $m=2(k-1)n$.
\end{proposition}
\iffalse
\begin{proposition}
\label{prop:owfprg}
Suppose $G_{nb}\colon \{0,1\}^n \to \{0,1\}^m$ is computable by a circuit of size $\mathrm{poly}(n)$ and $\ell$-local such that $G_{nb}$ has $(T,\varepsilon)$ next block pseudoentropy at least $n+\Delta$. For every $\varepsilon'$, set $u = O(n/\Delta)$ and $t = O((m/\Delta)^2\log^2{n}\log{(1/\varepsilon')})$. With probability at least $1/2$ over the choice of $H$,
\[G\colon \{0,1\}^{nut+(u-1)ns} \to \{0,1\}^{nut(1 + \Omega(\Delta/n))+(u-1)ns}\]
is an $O(\ell \log(n/\log{1/\varepsilon'}))$-local, $(T-n^{O(1)}, n^{O(1)}(\varepsilon+\varepsilon'))$ pseudorandom generator.
\end{proposition}
Vadhan and Zheng gave simple construction of next-bit pseudoentropy generator which is a next block pseudoentropy generator with block length $1$ bit from any one way function.
\begin{theorem} [Vadhan and Zheng]
Let $f\colon\{0,1\}^n\rightarrow\{0,1\}^n$ be $(T,\gamma)$ one-way. Then $(f(U_n),U_n)$ has $(t',\varepsilon)$ next-bit pseudoentropy at least $n+\log(1/\gamma)-\varepsilon$, for $t'=t^{\Omega(1)}/\mathrm{poly}(n, 1/\varepsilon)$.
\end{theorem}
Combining with Proposition~ref{prop:owfprg}, we can get local pseudorandom generator from local one way function. In particular, we can get linear strech pseudorandom generator from exponiential hard one way function which can be computed by NC0.
\fi
We can improve the locality of $G$ at the expense of increasing its input and output length by the factor of $O(\ln(t/\ln{c}))$ via the following transformation of Applebaum, Ishai, and Kushilevitz. For every output of $G$, which is obtained by applying a sparse linear transformation to some $X_j$ and therefore has the form
\[ X_{jk_1} + \dots + X_{jk_t} \]
introduce auxiliary new inputs $r_{j3}, r_{j4}, \dots, r_{j(t-1)}$ for $G$ and replace its corresponding output by the tuple
\[ (X_{jk_1} + X_{jk_2} + r_{j3}, r_{j3} + X_{jk3} + r_{j4}, \dots, r_{j_{t-1}} + X_{j(t-1)} + X_{jt}). \]
Call this new function $G'$. Applebaum et al. show that if $G$ is $(s^{\Omega(1)}, s^{-\Omega(1)})$-pseudorandom, so is $G'$. Since every bit of $X_j$ comes either from some input $x_i$ or from some output $f(x_i)$, it follows that if $f$ has locality $\ell$, then $G'$ has locality $3\ell$.
\begin{proof}[Proof of Claim~\ref{claim:owfhash}]
Let $Y_i$ be the conditional min-entropy model for $X_i$. We consider the hybrid distributions
\begin{align*}
X^{(i)} &= (H(X_1, R_1), \dots, H(X_{i-1}, R_{i-1}), H(X_i, R_{i}), U_{i+1}, \dots, U_m) \quad\text{and} \\
Y^{(i)} &= (H(X_1, R_1), \dots, H(X_{i-1}, R_{i-1}), H(Y_i, R_{i}), U_{i+1}, \dots, U_m)
\end{align*}
where $U_1, \dots, U_m$ are uniformly random and independent. By the definition of conditional pseudo-min-entropy, for every $i$ the distributions $X^{(i)}$ and $Y^{(i)}$ are $(T - mT_0, \varepsilon_1)$-indistinguishable. Because $\mathcal{H}$ is an extractor family, the distributions $(H, H(Y_i, R_i) \mid X_1, \dots, X_{i-1})$ and $(H, U_i)$ are within statistical distance $\varepsilon_2$ for any choice of $X_1, \dots, X_{i-1}$. It follows that $(H, Y^{(i)})$ and $(H, X^{(i-1)})$ are within statistical distance at most $\varepsilon_2$, so by Markov's inequality $Y^{(i)}$ and $X^{(i-1)}$ are within statistical distance $2m\varepsilon_2$ with probability at least $1 - 1/2m$ over the choice of $H$. By a union bound, with probability at least $1/2$ over the choice of $H$, $Y^{(i)}$ and $X^{(i-1)}$ are $2m\varepsilon_2$-statistically close for all $i$. For such a choice of $H$, by the triangle inequality $X^{(m)}$ is $(T - mT_0, m\varepsilon_1+2m^2\varepsilon_2)$ indistinguishable from $X^{(0)}$. Since $X^{(m)} = (H(X_1,R_1), \dots, H(X_m, R_m))$ and $X^{(0)}$ is the uniform distribution, we obtain the desired conclusion.
\end{proof}
\medskip
\paragraph{Acknowledgments} We would like to thank Rafail Ostrovsky for asking about the existence of a local extraction alternative to hash functions, and Elchanan Mossel, Oded Regev, Alon Rosen, Yuval Ishai, and Sid Jaggi for helpful discussions.
\bibliographystyle{alpha}
|
1,314,259,995,039 | arxiv | \section{Introduction and outline of the method}
\label{intro}
Let $T>0$ and $\Omega\subset\subset\tilde{\Omega}$ be domains in $\mathbb R^n$ with smooth boundaries. We assume $n\geq 2$. Given any
$$ f\in L^2(\mathbb R^{1+n})\quad \text{with} \quad \supp f \subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}),$$
we consider the wave equation
\begin{equation}\label{pf}
\begin{aligned}
\begin{cases}
(\Box+V(t,x))u=f(t,x),
& (t,x) \in (0,T)\times \mathbb R^n,
\\
u(0,x)=\partial_tu(0,x)=0,\,
& x \in \mathbb R^n
\end{cases}
\end{aligned}
\end{equation}
where $\Box=\partial_t^2-\Delta_x$ is the wave operator and $V\in L^\infty((0,T)\times \Omega)$ is an a priori unknown function. This problem admits a unique solution $u$ in the energy space
\begin{equation}
\label{sobolev_space}
\mathcal C^1([0,T];L^2(\mathbb R^n)) \cap \mathcal C([0,T];H^1(\mathbb R^n)).
\end{equation}
Moreover, $u(t,\cdot)$ is compactly supported for each $t \in [0,T]$ and the following bounds hold:
\begin{equation}
\label{energy}
\norm{u}_{\mathcal C^1(0,T;L^2(\mathbb R^n))} + \|u\|_{\mathcal C(0,T;H^1(\mathbb R^n))}\leq C \|f\|_{L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))},
\end{equation}
where $C$ is a positive constant depending on the geometry and $\|V\|_{L^2((0,T)\times \Omega)}$.
In the present paper we consider the following natural inverse problem; Does there exist a universal source function $f \in L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$, only depending on $T$, $\Omega$ and $\tilde \Omega$, such that the knowledge of $u$ restricted to $(0,T)\times \mathcal O$, with $\mathcal O\subset \tilde{\Omega}\setminus\overline{\Omega}$ an open subset, determines uniquely the unknown potential $V$?
\subsection{Main results}
There is a natural obstruction to uniqueness for the potential $V$. Namely, due to finite speed of propagation for the wave equation, the knowledge of $u_{|(0,T)\times(\tilde{\Omega}\setminus\overline{\Omega})}$ contains no information about the potential on the set
$$ \{ (t,x) \in (0,T)\times \Omega\,|\, 0<t<\dist{(x,\partial \Omega)}\quad \text{or}\quad T-\dist{(x,\partial\Omega)}<t<T\}.$$
We refer the reader to \cite[Section 1.1]{Kian1} for more details. Thus, the optimal domain for recovering the potential function will be the complement of this set that is given by
$$\mathcal D= \{(t,x) \in (0,T)\times \Omega\,|\, \dist{(x,\partial \Omega)}<t<T-\dist{(x,\partial\Omega)}\}.$$
This paper is concerned with the resolution of the question posed above in the optimal set $\mathcal D$. We remark that the main complexity of this single source inverse problem stems from the fact that it is a formally determined inverse problem. Heuristically, given any fixed source function $f$, the solution $u$ to \eqref{pf} and the unknown potential $V$ are both functions of $1+n$ variables. Our main result can be stated as follows.
\bigskip
\begin{theorem}
\label{t0}
Let $\Omega \subset\subset \tilde\Omega$ be domains in $\mathbb R^n$ with smooth strictly convex boundaries and let $T>$Diam$(\Omega)$. Then there exists a function $f \in L^2(\mathbb R^{1+n})$, with $\supp f \subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega})$, such that given any
\begin{equation}\label{V_space} V_j \in \mathcal C^4([0,T]\times\mathbb R^n)\cap \mathcal C([0,T];\mathcal C^4_0(\Omega)),\quad j=1,2,\end{equation}
the following injectivity result holds,
\begin{equation}\label{t0a}u_1=u_2,\quad \text{on $(0,T)\times(\tilde{\Omega}\setminus\overline{\Omega})$}\quad\Longrightarrow \quad V_1=V_2 \quad \text{on $\mathcal D$}.\end{equation}
Here, $u_j$, $j=1,2$, is the unique solution to the wave equation \eqref{pf} in energy space \eqref{sobolev_space} subject to $V=V_j$ and source term $f$.
\end{theorem}
Note that the result of Theorem \ref{t0} is stated with a single measurement on a neighborhood of the lateral boundary $(0,T)\times\partial \Omega$ of the solution of \eqref{pf} subjected to our universal source $f$. As a direct consequence of Theorem \ref{t0}, we can show that, when $T$ is large enough, it is possible to recover uniquely the coefficient $V$ on some subset of $\mathcal D$ from a single measurement on a neighborhood of the lateral boundary $(0,T)\times\gamma$ with $\gamma$ an arbitrary open subset of $\partial \Omega$. This result can be stated as follows.
\begin{corollary}\label{c1} Let the condition of Theorem \ref{t0} be fulfilled, fix $f \in L^2(\mathbb R^{1+n})$ the universal source introduced in Theorem \ref{t0} and assume that $\tilde{\Omega}\setminus \overline{\Omega}$ is connected. Consider $\mathcal O$ an arbitrary open subset of $\tilde\Omega\setminus\Omega$, $T_1>$Diam$(\Omega)$ and $\mathcal D_{T_1}$ a subset of $(0,T)\times\Omega$ given by
$$\mathcal D_{T_1}= \{(t,x) \in (0,T_1)\times \Omega\,|\, \dist{(x,\partial \Omega)}<t<T_1-\dist{(x,\partial\Omega)}\}.$$
Assume that the following condition is fulfilled
\begin{equation}\label{c1a} T>T_1+\sup_{x\in\overline{\tilde{\Omega}}\setminus \Omega}\textrm{\emph{dist}}(x,\mathcal O),\end{equation}
where \emph{dist} denotes the distance function on $\tilde{\Omega}\setminus \overline{\Omega}$. Then, for any $V_j$ in the Sobolev space \eqref{V_space}, $j=1,2$, and for $u_j$ solving \eqref{pf} with $V=V_j$ and source term $f$, there holds,
\begin{equation}\label{c1a}u_1=u_2,\quad \text{on $(0,T)\times\mathcal O$}\quad\Longrightarrow \quad V_1=V_2 \quad \text{on $\mathcal D_{T_1}$}.\end{equation}
\end{corollary}
\subsection{Previous literature}
The recovery of coefficients appearing in hyperbolic equations from boundary measurements, or the so-called Dirichlet-to-Neumann map, is an inverse problem with a rich recent literature. It physically arises in the study of recovery of information about signal propagation, such as determining the evolving density of an in-homogeneous medium or determining the wave speed of sound propagating in different layers of earth. It is also related to the challenging inverse problem of determining non-linear terms in hyperbolic equations (see e.g. \cite{Kian3}). These non-linear questions are motivated in part by the study of vibrating systems or the detection of perturbations arising in electronics, such as the telegraph equation or the study of semi-conductors (see for instance \cite{CH}).
Broadly speaking, the literature of inverse problems for hyperbolic equations can be divided into two categories, namely that of recovering time-independent or time-dependent coefficients, and the majority of the literature in both cases uses infinite measurements. Here, by infinite measurements we mean that an infinite number of sources $f$ in \eqref{pf} are required to deduce uniqueness of the coefficient $V$.
We begin with reviewing the literature of uniqueness results with infinite measurements. In the time-independent category, the first class of uniqueness results were obtained in the works \cite{Bel, RS1,I1}. We mention also the subsequent works \cite{AS,BCY,Ki,SU1,SU2} that also provide stability estimates from full or partial knowledge of the hyperbolic Dirichlet-to-Neumann map. In particular, the approach of \cite{Bel} is based on the discovery of the powerful boundary control method. At its core, this method is based on combining controllability theory and unique continuation for the wave equation together with boundary integral identities. This approach even extends to the recovery of a Riemannian manifold, up to isometry, from boundary measurements for the wave equation with a variable coefficient principal part. We refer to \cite{BelK,KKL} for applications of the boundary control method to the recovery of a Riemannian manifold. This method also allows unique recovery of coefficients in the case where the sources and the receivers are located on disjoint sets, see for example \cite{KKLO,LO}.
In the case of time-dependent coefficients, the boundary control method is less successful, even when the principal part of the wave equation has constant coefficients, as in \eqref{pf} for example. Indeed, the method relies on the unique continuation result of Tataru \cite{Ta}, that fails to hold in general, unless the time-dependence of all the coefficients is real-analytic (see the general counter examples of \cite{AB}). In the case that the coefficients depend analytically on the time variable we mention the works \cite{E1,E2,E3} where the author extended the boundary control method to these class of coefficients.
For more general time-dependent coefficients, the approach of \cite{RS1,St} based on the construction of geometric optics solutions, has been successful in deriving uniqueness and stability results. These results and many of the subsequent works are based on the principle of propagation of singularities for the wave equation and extend to the case of variable coefficient wave equations, where the problem of recovering coefficients reduces to injectivity of certain geometrical data on Lorentzian manifolds, see for example \cite{FIKO,FIO,FK,KO,LOSG}. We remark that all of these works require strong geometrical assumptions and that in general recovering time-dependent coefficients for variable coefficient wave equations remains a daunting prospect.
All of the aforementioned results are stated with infinitely many measurements (or sources). As discussed above and specifically in the case of the wave equation with constant coefficient principal part as in \eqref{pf}, the recovery of coefficients has been well-understood both in the time-dependent or time-independent categories.
The story is vastly different when one considers a finite number of measurements, where there seems to be no result for recovering a time-dependent coefficient. In the time-independent category however, by applying the Bukhgeim-Klibanov approach of \cite{BK} that is based on Carleman estimates, some authors have considered the recovery of time-independent coefficients from a single measurement, see for example \cite{Klibanov0}. Since then this approach has been improved to include stability results by several authors. We refer the reader to the works of \cite{Bell,BY,IY,Ya,Ste2} for further results in this direction.
The Bukhgeim-Klibanov approach is based on linearizing the inverse problem and reformulating the problem into that of recovering a source term. In light of this, all the results obtained by this approach require a non-vanishing initial condition for the solution $u$. The presence of this non-vanishing initial condition corresponds to some a priori information on the inaccessible part (the part $x\in\Omega$) that makes these results more difficult to apply in reality.
As an alternative to the Bukhgeim-Klibanov approach, we also mention the works \cite{AS,CY,HLYZ,KLLY} where the authors considered an approach based on the construction of suitable input for proving recovery of time-independent coefficients appearing in diffusion equations. Note that the approach of \cite{AS,CY,KLLY} is based on the analyticity in time of the solution which does not hold for hyperbolic equations.
Within the time-independent category, a few authors have also considered approaches based on a single measurement of the solution to the wave equation subjected to a point source, represented by a Dirac delta distribution, on the boundary or inside the domain. In contrast to the natural energy space \eqref{sobolev_space} for \eqref{pf} that we consider in this paper, these works are based on extending the solution space to \eqref{pf} in a distributional sense to allow very singular sources. In this setting one of the first results that we can mention is the one of \cite{SS} where partial information about the coefficient of a hyperbolic equation can be recovered from a single measurement associated with a boundary point source. In \cite{RY} the authors proved that under an additional smallness assumption on the unknown coefficient, it is possible to stably recover it from a single boundary measurement of the solution subjected to an internal point source. In the same spirit, the works of \cite{Ra1,Ra2,RSacks} were devoted to the unique recovery of a special class of zeroth order time-independent coefficients.
Finally, we mention the work of \cite{HLO} where the recovery of a time-independent Riemannian metric is considered from a single measurement. There, the single measurement corresponds to a source term that is the sum of a countable number of Dirac delta distributions in time and space.
\subsection{A comparison with the previous literature}
Let us now discuss the novelties of our main result. Firstly, and to the best of our knowledge, Theorem \ref{t0} corresponds to the first result for unique recovery of a general time-dependent coefficient from a single measurement subject to the wave equation, or any other evolution PDE.
In fact even within the class of time-independent coefficients, Theorem \ref{t0} appears to be the first single measurement uniqueness result that does not require initial time excitation of solutions, and that also provides a source function that is compatible with the natural energy class \eqref{sobolev_space} for solutions of the wave equation with vanishing initial conditions. In view of these features, even for time-independent coefficients, the statement of our uniqueness result can make it more suitable for applications.
As a second novelty, we mention that Theorem \ref{t0} proves uniqueness of time-dependent zeroth order coefficients in the optimal region $\mathcal D$. Even in case of infinite measurements, most of the uniqueness results for time-dependent coefficients either require information at $t=0,T$, see for example \cite{Kian,Kian1,Kian2,KO}, or require the knowledge of the coefficient outside of $\mathcal D$, \cite{Ben,FIKO,FK,RR}. In the latter group, uniqueness results are usually provided on a sub-optimal region that is approximately equal to
\begin{equation}\label{MU}\left\{(t,x)\in\left(\frac{d}{2},T-\frac{d}{2}\right)\times\Omega:\ \textrm{dist}(x,\partial\Omega)<\min\left(t-\frac{d}{2},T-t-\frac{d}{2}\right)\right\},\end{equation}
with $d=\textrm{Diam}(\Omega)$ and for $T>d$.
Finally, we also mention that Corollary \ref{c1} provides a partial data version of our main result, where the measurements associated to the single source are restricted to a neighborhood of an arbitrary portion of the boundary $\partial\Omega$, provided that the time $T$ is large enough. In particular, for coefficients that are real analytic with respect to the time variable, the result of Corollary \ref{c1} corresponds to the full recovery of the coefficient on the full spacetime domain $(0,T)\times \Omega$. Thus, Corollary \ref{c1} can also be viewed as a single boundary measurement formulation, in terms of localization of the measurement, of the work of \cite{E1,E2} that is devoted to the recovery of time analytic coefficients from infinitely many measurement on an arbitrary portion of the boundary.
Our proof explicitly constructs the universal source function $f$ and also provides an algorithm for reconstructing $V$. We remark that the domain $\tilde \Omega$ in the statement of Theorem~\ref{t0} could be as small as one wishes, or in other words the source $f$ can be supported in a very small neighborhood of $(0,T)\times\partial \Omega$. We believe that the approach here could be pushed in principle to allow unique recovery of time-dependent coefficient by a single {\em boundary} measurement as well, instead of measurements that are associated to a source located near the boundary. We leave this, as well as the extension of our result to the setting of Lorentzian manifolds, as directions for future research.
\subsection{Outline of the paper}
Let us briefly sketch the methodology employed in proving Theorem~\ref{t0}. We recall first that the term {\em light ray} refers to a curve in spacetime that is a geodesic with respect to the Minkowski metric and whose tangent vector at each point along the curve is null. We will start with a countable collection of light rays that densely pack the domain $\mathcal D$. Precisely, this means that given any small positive $\epsilon$ and any light ray $\gamma$ in $\mathcal D$, there will be a light ray in the collection that stays within a distance $\epsilon$ of $\gamma$. Next, we will consider a universal source function that is constructed based on combining infinitely many source functions that each generates a geometric optic solution to \eqref{pf} concentrating along a light ray in the collection. We show that the solution to \eqref{pf} corresponding to this universal source determines the integrals of the unknown function $V$ along all the light rays in the collection (see Theorem~\ref{t1}). The main theorem then follows by using the density of the rays in the collection and injectivity of the light ray transform, see for example \cite{Be,Ste}.
The paper is organized as follows. In Section~\ref{prelim_sec}, we begin with introducing a few notations used in the paper and then define the admissible collection of light rays that tightly pack the spacetime domain. Section~\ref{geo_optics_sec} is concerned with a review of the classical geometric optics solutions to \eqref{pf} also known as wave packets. The construction of wave packets in this paper is modified to allow thinner supports for these solutions as the frequency increases. Next, we show that it is possible to construct explicit sources that are supported in the set $(0,T)\times (\tilde{\Omega}\setminus \overline{\Omega})$ and such that the solution to \eqref{pf} subject to these source functions generate the desired wave packets. In section~\ref{universal_source_sec} we construct the universal source function $f$ that combines the geometric optic solutions via a double infinite summation corresponding to the set of light rays and the set of frequencies of the geometric optic solutions associated to each light ray. Section~\ref{asymptotic_analysis_sec} is concerned with the proof of Theorem~\ref{t1}, showing that the knowledge of $u_{|(0,T)\times (\tilde{\Omega}\setminus \overline{\Omega})}$, with $u$ solving \eqref{pf}, uniquely determines the integrals of $V$ along all the light rays in the collection. The proof of the main theorem follows immediately from combining Theorem~\ref{t1} and injectivity of the light ray transform. This is sketched in Section~\ref{proof_section}, where we also prove Corollary \ref{c1}.
\section{Preliminaries}
\label{prelim_sec}
\subsection{Notation}
Let us introduce a few notations that will be used in the paper. As already discussed, we use $$(t,x)=(t,x^1,x^2,\ldots,x^n)$$ for the spacetime coordinate system with $t \in \mathbb R$ and $x \in \mathbb R^n$. Given two vectors $v,w \in \mathbb R^{n}$, their inner product and norm is defined respectively by the expressions
$$ v\cdot w = \sum_{j=1}^n v_j\,w_j\quad\text{and}\quad |v|=\sqrt{v\cdot v}.$$
Throughout the paper we use the notation $\chi$ to stand for a smooth non-negative cutoff function satisfying
\begin{equation}
\label{cutoff}
\|\chi\|_{L^2(\mathbb R)}=1\quad\text{and}\quad
\chi(t)= \begin{cases}
1 & \text{if}\,\, |t|\leq\frac{1}{8\sqrt{n}},\\
0 & \text{if}\,\,|t|\geq \frac{1}{4\sqrt{n}}.
\end{cases}
\end{equation}
We denote also by $\mathbb N$ the set $\{1,2,\ldots\}$.
As already discussed in the introduction, the construction of the universal source function $f$ in this paper involves the summation of a countable number of smooth sources each of which generates a wave packet near a light ray. For this reason it is important to use a consistent notation for convergence of infinite series. Since we require $f \in L^2(\mathbb R^{1+n})$ with $\supp f \subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega})$, we will be working with convergence of source terms in the $L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$ topology. We formally write
$$ f= \lim_{j\to \infty}f_j$$
to stand for convergence with respect to the $L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$ topology of a sequence of functions $\{f_j\}_{j=1}^{\infty}\subset L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$. For solutions to the wave equation \eqref{pf}, we will work with the natural Sobolev space \eqref{sobolev_space} and as such we formally write
$$ u =\lim_{j\to \infty}u_j$$
to stand for convergence with respect to the \eqref{sobolev_space} topology. We close this section by recording the following trivial lemma about convergence of solutions to the wave equation. We have included the proof for the sake of completeness.
\begin{lemma}
\label{conv_lem}
Let $\{f_j\}_{j=1}^{\infty} \subset L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$ and assume that this sequence of sources converges to a source $f$ in this topology. Let $u_j$ denote the unique solution to \eqref{pf} with source $f_j$. Then, the sequence $\{u_j\}_{j=1}^{\infty}$ converges to a function $u$ with respect to the \eqref{sobolev_space} topology. Moreover, $u$ is the unique solution to \eqref{pf} subject to the source $f$.
\end{lemma}
\begin{proof}
Note that for each $j,k \in \mathbb N$, the function $u_j-u_k$ solves the wave equation with source function $f_j-f_k$. Therefore, the energy estimate \eqref{energy} applies to obtain
$$\|u_j-u_k\|_{\mathcal C([0,T];H^1(\mathbb R^n))\cap \mathcal C^1([0,T];L^2(\mathbb R^n))} \leq C \|f_j-f_k\|_{L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))}.$$
Therefore we deduce the the sequence $\{u_j\}_{j=1}^{\infty}$ is a Cauchy sequence with respect to the \eqref{sobolev_space} topology. We now define
$$ u = \lim_{j\to \infty} u_j$$
and proceed to prove that $u$ satisfies \eqref{pf} with source term $f$. The initial conditions are clearly satisfied. To prove $(\Box+V) u =f$, it suffices to show that
$$ \int_{(0,T)\times\mathbb R^{n}}f\,v\,dx=\int_{(0,T)\times\mathbb R^{n}} a(u,v)\,dx\quad \quad \forall\, v \in \mathcal C^{\infty}_0([0,T]\times\mathbb R^{n})$$
where $$a(u,v)=-\frac{\partial u}{\partial t}\,\frac{\partial v}{\partial t}+\sum_{k=1}^n\frac{\partial u}{\partial x^k}\,\frac{\partial v}{\partial x^k }+V\,u\,v.$$
We assume without loss of generality that $\|v\|_{H^1((0,T)\times\mathbb R^{n})}=1$ and note that given any $j \in \mathbb N$:
\begin{multline*}
\left|\int_{(0,T)\times\mathbb R^{n}}(f\,v-a(u,v))\,dx\right|= \left|\int_{(0,T)\times\mathbb R^{n}}((f-f_j)\,v+a(u_j-u,v))\,dx\right|\\
< C\,(\|f-f_j\|_{L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))} + \|u-u_j\|_{ \mathcal C(0,T;H^1(\mathbb R^n))}\\
+\|u-u_j\|_{\mathcal C^1(0,T;L^2(\mathbb R^n))}).
\end{multline*}
The proof is completed since $f_j\to f$ and $u_j\to u$ in their respective topologies.
\end{proof}
\subsection{Constructing a countable dense set of light rays}
\label{ray_sec}
The aim of this section is to construct a countable family of light rays that tightly pack the set $(0,T)\times\Omega$ and also introduce some notation that will be used later in the paper. In what follows, a future pointing light ray is a curve $\gamma:\mathbb R\to\mathbb R^{1+n}$ given by the parametrization
$$ \gamma(s)= \gamma(0) + s\,(1,\xi)\quad s\in \mathbb R$$
for some unit vector $\xi \in \mathbb R^n$.
\\
Let $\mathcal T=\{t_j:\ j\in\mathbb N\}$ denote an ordering of the rational numbers in the interval $(0,T)$ and let $\mathcal P=\{p_j\in\partial\Omega: \ j\in\mathbb N\}$
denote a dense set of points on $\partial\Omega$. We consider the countable set of all future pointing light rays $\gamma:\mathbb R \to \mathbb R^{1+n}$, parametrized as above, that satisfy the following three properties:
\begin{itemize}
\item[(i)]{The intersection of $\gamma$ with $(0,T)\times\Omega$ lies in the set $\mathcal D$.}
\item[(ii)]{The earliest intersection of $\gamma$ with $\mathbb R\times \partial \Omega$ is the point $\gamma(0)\in \mathcal T\times \mathcal P$.}
\item[(iii)]{The projection of $\gamma$ onto the spatial coordinates is a straight line that contains two distinct points in $\mathcal P$.}
\end{itemize}
We consider an ordering of this countable set of light rays and denote it by $$\mathbb V=\{\gamma_j\}_{j=1}^{\infty}.$$
Given any $j \in \mathbb N$, we choose a point $$q_{j}=(s_j,x_j)=\gamma_j(\hat{s}_j) \in (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega})$$ for some $\hat{s}_j<0$. Here, $s_j$ and $x_j$ are the time and spatial coordinates of $q_j$ respectively. We also choose a constant $\delta_j>0$ such that
$$B_{\delta_j}(q_{j})\subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega})$$
and additionally that the intersection of $(0,T)\times \Omega$ with the tubular neighborhood of the ray $\gamma_j$ of radius $\delta_j$ lies in the set $\mathcal D$. Here $B_{\delta_j}(q_{j})$ denotes the ball of radius $\delta_j$ centered at the point $q_{j}$. Since $\mathbb V$ is countable, we can always choose the sequence $\{\delta_j\}_{j=1}^{\infty}$ to be strictly decreasing, that is to say
$$ \delta_1>\delta_2>\delta_3>\ldots.$$
Next and for the purpose of later application, we define two smooth functions $\zeta_{j,{\pm}}:\mathbb R^{1+n}\to \mathbb R$ that satisfy
\[
\zeta_{j,-}(t)= \begin{cases}
0 & \text{if}\,\, t\leq s_j-\frac{\delta_j}{4\sqrt{n}},\\
1 & \text{if}\,\,t\geq s_j.
\end{cases}
\]
and
\[
\zeta_{j,+}(t) = \begin{cases}
0 & \text{if}\,\, t\geq s_j+\frac{\delta_j}{4\sqrt{n}},\\
\ &\ \\
1 & \text{if}\,\,t \leq s_j+\frac{\delta_j}{8\sqrt{n}}.
\end{cases}
\]
Observe that since $\hat{s}_j<0$, it follows that $\zeta_{j,-}=1$ on the segment of the light ray $\gamma_{j}$ that lies inside the set $(0,T)\times \Omega$.
\section{Geometric optics}
\label{geo_optics_sec}
In this section, we fix $j\in \mathbb N$ and recall the geometric optics construction, with some modifications, for the wave equation
$$ \Box u+V u=0$$
that gives solutions concentrating on the light ray $\gamma_{j} \in \mathbb V$. By definition of the set $\mathbb V$ there exists unique indices $k_j,l_j,m_j \in \mathbb N$ with $l_j\neq m_j$ such that the light ray $\gamma_j$ is given by the parametrization
$$ \gamma_{j}(s)=(t_{k_j}+s,p_{l_j}+s\,\xi_j)\quad \text{for all} \quad s\in \mathbb R$$
where
$$ \xi_j = \frac{p_{m_j}-p_{l_j}}{|p_{m_j}-p_{l_j}|}.$$
Note that, by strict convexity of $\partial \Omega$, the light ray $\gamma$ intersects the boundary $(0,T)\times \partial\Omega$ precisely two times at the points $\gamma(0)$ and $\gamma(|p_{m_j}-p_{l_j}|)$. Moreover, by property (i) in the definition of $\mathbb V$, the light ray does not intersect the set $\{0,T\}\times\Omega$. \\
The geometric optics construction here is based on the ansatz
\begin{equation}
\label{ansatz}
\mathcal U_{j,\tau}(t,x)= e^{{\rm i}\tau (-t+\xi_j\cdot x)}v_{j,\tau}(x)=e^{{\rm i}\tau (-t+\xi_j\cdot x)}\left(\sum_{k=0}^2 \frac{v_{j,\tau}^{(k)}(x)}{\tau^k}\right)
\end{equation}
where $\tau>e$ is a parameter.
We write
\begin{equation}\label{conjugatego}
\begin{aligned}
&(\Box+V) (e^{{\rm i}\tau(-t+\xi_j\cdot x)}v_{j,\tau})= \\
&e^{{\rm i}\tau (-t+\xi_j\cdot x)} \left( -2{\rm i}\tau(\partial_tv_{j,\tau}+ \xi_j\cdot\nabla_x v_{j,\tau}) + (\Box+V) \,v_{j,\tau}\right).
\end{aligned}
\end{equation}
The amplitudes $v_{j,\tau}^{(0)}$, $v_{j,\tau}^{(1)}$ and $v_{j,\tau}^{(2)}$ are determined iteratively, based on the requirement that the expression \eqref{conjugatego} vanishes in powers of $\tau$ up to second order. In particular, this imposes the transport equation
\begin{align}\label{transp_a0}
\partial_t v_{j,\tau}^{(0)}+ \xi_j\cdot\nabla_xv_{j,\tau}^{(0)}=0
\end{align}
on $v_{j,\tau}^{(0)}$. To solve this equation, we first choose the vectors $e_{j,1},\ldots,e_{j,n-1}\in\mathbb R^n$ such that
$$\{\xi_j,e_{j,1},\ldots,e_{j,n-1}\}$$ form an orthonormal basis for $\mathbb R^n$. Next, we set
\begin{multline}\label{a_0}v_{j,\tau}^{(0)}(t,x)= \left(\frac{\log\tau}{\delta_j}\right)^{\frac{n}{2}}\chi[((\log\tau)\,\delta_j^{-1}(s_j-t+(x-x_j)\cdot\xi_j )]\times \\
\prod_{k=1}^{n-1}\chi[(\log\tau)\,\delta_j^{-1}(x-x_j)\cdot e_{j,k} ],
\end{multline}
where $\delta_j$, $s_j$, $x_j$ are as defined in Section~\ref{ray_sec} and the function $\chi$ is given by \eqref{cutoff}. Then (\ref{transp_a0}) holds and the amplitude $v_{i,\tau}^{(0)}(t,x)$ is supported in a tubular neighborhood of radius $\frac{\delta_j}{2\,\log\tau}$ around $\gamma_j$. We emphasize here that our construction of the leading amplitude is different from that of the classical geometric optic constructions, as the support of the geometric optic solution around $\gamma_j$ also depends on the frequency parameter $\tau$. Indeed, as $\tau$ grows, the support of the geometric optics also gets more localized around the light ray $\gamma_j$. This will be important in our analysis. Moving on, the subsequent terms $v^{(k)}_{j,\tau}$ with $k=1,2$ are constructed iteratively by solving the transport equations
\begin{equation}\label{transmin}
-2{\rm i} ( \partial_tv_{j,\tau}^{(k)}+\xi_j\cdot\nabla_xv_{j,\tau}^{(k)}) + (\Box+V) v_{j,\tau}^{(k-1)} =0.
\end{equation}
These transport equations can be solved uniquely, by imposing zero initial conditions on the hyperplane
$$
\Sigma_j = \{(t,x) \in \mathbb R\times\mathbb R^{n} \,|\,
t-s_j+(x-x_j)\cdot\xi_j= 0 \}.
$$
This yields
\begin{align}\label{def_ak}
v_{j,\tau}^{(k)}(s+\tilde{\tau},s\xi_j+y)=\frac{1}{2{\rm i}}\int_0^s ((\Box+V)v_{j,\tau}^{(k-1)})(\tilde{s}+\tilde{\tau},\tilde{s}\xi_j+y)\,d\tilde{s},
\end{align}
where $s \in \mathbb R$ and $(\tilde{\tau},y) \in(\mathbb R\times\mathbb R^n)\cap \Sigma_{j}$. It follows from (\ref{a_0}), via an induction, that also the subsequent amplitude terms are supported in a $\frac{\delta_j}{2\log\tau}$ tubular neighborhood of $\gamma_{j}$.
\begin{remark}
We emphasize that while the principal amplitude $v_{j,\tau}^{(0)}$ does not depend on $V$, the subsequent terms $v_{j,\tau}^{(1)}$ and $v_{j,\tau}^{(2)}$ involve $V$ and its derivatives. In particular $v_{j,\tau}^{(1)}$ depends on $V$ while $v_{j,\tau}^{(2)}$ depends on $V$ and its first and second order derivatives.
\end{remark}
\noindent We have the following bounds that follow directly from the expressions \eqref{a_0}--\eqref{def_ak}:
\begin{equation}
\label{amp_bounds}
\begin{aligned}
\|v^{(k)}_{j,\tau}\|_{\mathcal C^\ell((0,T)\times \tilde\Omega)} &\leq \kappa_{0,j}\, \,(\log\tau)^{\frac{n}{2}+2k+\ell} \quad \text{for $k=0,1,2$ and $\ell=0,1,2$}\\
\|v^{(k)}_{j,\tau}\|_{H^\ell((0,T)\times \tilde\Omega)} &\leq \kappa_{0,j} \,(\log\tau)^{2k+\ell} \quad \text{for $k=0,1,2$ and $\ell=0,1,2$}
\end{aligned}
\end{equation}
where $\kappa_{0,j}$ is a positive constant that is independent of the parameter $\tau$. Next, we use the definition of the combined amplitude term $v_{j,\tau}$ and the bounds above to deduce that
\begin{equation}
\label{amp_bound}
\begin{aligned}
\|v_{j,\tau}\|_{\mathcal C^{k}((0,T)\times\tilde\Omega)}&\leq \kappa_{1,j}\,(\log\tau)^{\frac{n}{2}+k} \quad \text{for $k=0,1,2$},\\
\|v_{j,\tau}\|_{H^{k}((0,T)\times\tilde\Omega)}&\leq \kappa_{1,j}\,(\log\tau)^{k} \quad \text{for $k=0,1,2$}
\end{aligned}
\end{equation}
where $\kappa_{1,j}$ is a positive constant that is independent of the parameter $\tau$. Similarly using equation \eqref{ansatz} together with the latter bound we deduce that
\begin{equation}
\label{principal_bound}
(\log\tau)^{\frac{n}{2}}\|\mathcal U_{j,\tau}\|_{H^{k}((0,T)\times\tilde{\Omega})}+ \|\mathcal U_{j,\tau}\|_{\mathcal C^{k}((0,T)\times\tilde{\Omega})}\leq \kappa_{2,j}\,\tau^{k}(\log\tau)^{\frac{n}{2}}\quad \text{for $k=0,1,2,$}
\end{equation}
where $\kappa_{2,j}$ is a positive constant that is independent of the parameter $\tau$. Moreover, equations (\ref{transp_a0}) and (\ref{transmin}), together with (\ref{conjugatego}) imply that
$$ (\Box+V)\,\mathcal U_{j,\tau}= \tau^{-2}\,e^{{\rm i} \tau(-t+\xi_j\cdot x)}(\Box+V)v_{j,\tau}^{(2)}$$
and therefore
\begin{align}\label{go_remainder}
\|(\Box+V)\,\mathcal U_{j,\tau}\|_{H^{1}((0,T) \times \tilde\Omega)} \leq \kappa_{3,j}\, \tau^{-1}\,(\log\tau)^{6},
\end{align}
where $\kappa_{3,j}$ is a positive constant that is independent of the parameter $\tau$.
Let us now consider the source term $f_{j,\tau}$ defined through the expression
\begin{equation}
\label{packets}
f_{j,\tau}(t,x) = \zeta_{j,+}(t)\,\Box(\zeta_{j,-}(t)\,\mathcal U_{j,\tau}(t,x)),\quad (t,x)\in\mathbb R^{1+n}
\end{equation}
where $\zeta_{j,\pm}$ are as defined in Section~\ref{prelim_sec}. From the definition of $\zeta_{j,\pm}$, we deduce that
\begin{equation}\label{sup1}f_{j,\tau}(t,x)=0,\quad t\notin\left[s_j-\frac{\delta_j}{4\sqrt{n}},s_j+\frac{\delta_j}{4\sqrt{n}}\right],\ x\in\mathbb R^n.\end{equation}
Then, from the condition imposed to the cut-off function $\chi$, we get
$$f_{j,\tau}(t,x)=0,\quad t\in\left(s_j-\frac{\delta_j}{4\sqrt{n}},s_j+\frac{\delta_j}{4\sqrt{n}}\right),\ |s_j-t+(x-x_j)\cdot\xi_j|\geq \frac{\delta_j}{4\sqrt{n}}$$
which implies that
\begin{equation}\label{sup2}f_{j,\tau}(t,x)=0,\quad t\in\left(s_j-\frac{\delta_j}{4\sqrt{n}},s_j+\frac{\delta_j}{4\sqrt{n}}\right),\ |(x-x_j)\cdot\xi_j|\geq \frac{\delta_j}{2\sqrt{n}}.\end{equation}
In the same way, for all $k=1,\ldots,n-1$, using the fact that
$$\chi[(\log\tau)\,\delta_j^{-1}(x-x_j)\cdot e_{j,k} ]=0,\quad |(x-x_j)\cdot e_{j,k}|\geq \frac{\delta_j}{2\sqrt{n}},$$
we obtain
\begin{equation}\label{sup3}f_{j,\tau}(t,x)=0,\quad t\in\mathbb R,\ |(x-x_j)\cdot e_{j,k}|\geq \frac{\delta_j}{2\sqrt{n}}.\end{equation}
Combining \eqref{sup1}--\eqref{sup3} with the fact that $\{\xi_j,e_{j,1},\ldots,e_{j,n-1}\}$ form an orthonormal basis for $\mathbb R^n$, we deduce that
\begin{equation}\label{sup4} f_{j,\tau}(t,x)=0,\quad t\in\mathbb R,\ |x-x_j|\geq \frac{\delta_j}{2}.\end{equation}
Here we use the fact that the condition $|x-x_j|\geq \frac{\delta_j}{2}$ implies that either
$$|(x-x_j)\cdot\xi_j|\geq \frac{\delta_j}{2\sqrt{n}}$$ or there exists $k\in\{1,\cdots,n-1\}$ such that
$|(x-x_j)\cdot e_{j,k}|\geq \frac{\delta_j}{2\sqrt{n}}.$
The identity \eqref{sup1} and \eqref{sup4} imply that $$\supp (f_{j,\tau}) \subset B_{\delta_j}(q_j)\subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}).$$
\begin{remark}
We emphasize that the source function $f_{j,\tau}$ is explicitly known, independent of the potential $V$, since it is supported in $B_{\delta_j}(q_j)$ and the function $\mathcal U_{j,\tau}$ is explicitly known here since its construction is local around $q_j$ and $V$ vanishes there.
\end{remark}
\noindent We also record that
\begin{equation}
\label{f_bound0}
\|f_{j,\tau}\|_{H^k((0,T)\times\tilde{\Omega})}\leq \kappa_{4,j}\,\tau^{1+k} \quad \text{for}\quad k=0,1.
\end{equation}
where $\kappa_{4,j}$ is a positive constant that is independent of the parameter $\tau$.
Next we define $u_{j,\tau}$ as the unique solution to equation \eqref{pf} subject to the source function $f_{j,\tau}$. Recalling the fact that $V$ vanishes in a $\delta_j$ neighborhood of $q_j$, we write
$$\begin{aligned}(\Box + V)(u_{j,\tau}-\zeta_{j,-}(t)\,\mathcal U_{j,\tau}) &= f_{j,\tau}- (\Box+V)(\zeta_{j,-}(t)\, \mathcal U_{j,\tau})\\
\ &=(\zeta_{j,+}(t)-1)\,(\Box+V)\,\mathcal U_{j,\tau},\end{aligned}$$
where we used the fact that $\zeta_{j,-}=1$ on a neighborhood of the support of $1-\zeta_{j,+}$.
Writing
\begin{equation}
\label{correction_term}
u_{j,\tau}(t,x)= \zeta_{j,-}(t)\,\mathcal U_{j,\tau}(t,x)+R_{j,\tau}(t,x),\quad (t,x)\in (0,T)\times \mathbb R^{n}
\end{equation}
and applying the bound \eqref{go_remainder} together with classical energy estimates for the wave equation, we deduce the following bounds for the correction term $R_{j,\tau}$:
\begin{equation}
\label{correction_bound}
\|R_{j,\tau}\|_{H^2((0,T)\times \tilde\Omega)} \leq \kappa_{5,j}\,\tau^{-1}(\log\tau)^6
\end{equation}
where $\kappa_{5,j}$ is a positive constant that is independent of the parameter $\tau$. Let us also recall from Section~\ref{ray_sec} that $\zeta_{j,-}=1$ on the segment of the light ray $\gamma_j$ that lies inside $(0,T)\times \Omega$. Thus, the source term $f_{j,\tau}$ generates a solution that is approximately equal to the geometric optic ansatz $\mathcal U_{j,\tau}$ on $(0,T)\times \Omega$.
Finally and for the sake of brevity, we define for each $j \in \mathbb N$, the positive constant $\kappa_j$ through the expression
\begin{equation}
\label{kappa}
\kappa_j =\delta_j^{-2}\cdot\max{\{\kappa_{0,j},\ldots,\kappa_{5,j}\}}.
\end{equation}
We mention in passing that $\kappa_j$ can for example be chosen to be $C\,\delta_j^{-\frac{n}{2}-8}$ where $C$ is a sufficiently large constant depending only on $T$, $\tilde\Omega$ and an a priori bound on $\|V\|_{C^4((0,T)\times\Omega)}$.
\section{Construction of the universal source function}
\label{universal_source_sec}
Let $\tau_k=e^k$ for $k\in\mathbb N$ and define the sequence $\{c_k\}_{k=1}^{\infty}$ through
$$ c_k =k^{-3}\tau_k^{-1},\quad k\in\mathbb N$$
We proceed to define for each $j \in \mathbb N$, a source term $f_{j}\in L^2((0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}))$ through the expression
$$ f_{j} = \lim_{N \to \infty}\sum_{k=1}^{N} c_k\, f_{j,\tau_k} \quad \text{in $L^2((0,T)\times \tilde \Omega)$ topology.}$$
Here, the sources $f_{j,\tau_k}$ are given by expression \eqref{packets}. Observe that this definition is justified since by \eqref{f_bound0} we have
$$ \sum_{k=1}^{\infty}c_k\,\|f_{j,\tau_k}\|_{L^2((0,T)\times\tilde \Omega))} \leq \kappa_{j} \sum_{k=1}^{\infty} c_k\, \tau_k\leq C\kappa_{j},$$
with $C>0$ independent of $j$.
Since all the sources $f_{j,\tau_k}$ are supported in balls of radius $\delta_j$ centered at points $q_{j}$ we also have
$$\supp f_{j} \subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega}).$$
Henceforth, we will use the formal notation
$$f_{j}= \sum_{k=1}^{\infty} c_k \,f_{j,\tau_k}$$
noting that the convergence is implicitly implied in the $L^2((0,T)\times \tilde\Omega)$ topology. Next, we define a sequence of positive real numbers $\{b_j\}_{j=1}^{\infty}$ such that
\begin{equation}
\label{b_sequence}
\sum_{j=1}^{\infty} b_j \,\kappa_j <\infty.
\end{equation}
We now define our universal source function through the expression
\begin{equation}
\label{universal_source}
f = \lim_{N \to \infty}\sum_{j=1}^{N} b_j\, f_{j} \quad \text{in $L^2((0,T)\times \tilde \Omega)$ topology.}
\end{equation}
Observe that $f \in L^2((0,T)\times\tilde{\Omega})$ and $\supp f\subset (0,T)\times (\tilde{\Omega}\setminus\overline{\Omega})$.
With the construction of the universal source function $f$ completed as above, we proceed to study \eqref{pf} subject to this source term. Let $u_{j,\tau_k}$ denote the solution to \eqref{pf} subject to the source $f_{j,\tau_k}$. Applying the energy estimate \eqref{energy}, it follows that
$$\sum_{k=1}^{\infty} c_k \,\|u_{j,\tau_k}\|_{\mathcal C^1([0,T];L^2(\tilde \Omega))\cap\mathcal C([0,T];H^1(\tilde \Omega))} \leq\,C\,\sum_{k=1}^{\infty}\|f_{j,\tau_k}\|_{L^2((0,T)\times\tilde \Omega)}\leq C\kappa_j.$$
Therefore
\begin{equation}\label{conv_u}
\sum_{j=1}^{\infty}b_j\,\sum_{k=1}^{\infty} c_k \,\|u_{j,\tau_k}\|_{\mathcal C^1([0,T];L^2(\tilde \Omega))\cap\mathcal C([0,T];H^1(\tilde \Omega))} \leq C\sum_{j=1}^{\infty}b_j\,\kappa_j<\infty
\end{equation}
where we used \eqref{b_sequence} in the last step. Thus, we can define the function
\begin{equation}
\label{u_exp}
u = \sum_{j=1}^{\infty}b_j\,( \underbrace{\sum_{k=1}^{\infty} c_k\,u_{j,\tau_k}}_{u_j})
\end{equation}
where the convergence of the infinite series holds with respect to the $$\mathcal C^1([0,T];L^2(\tilde \Omega))\cap\mathcal C([0,T];H^1(\tilde \Omega))$$ topology. Applying Lemma~\ref{conv_lem}, we conclude that the function $u$ above is the unique solution to \eqref{pf} subject to the universal source function $f$ given by \eqref{universal_source}.
\section{A representation formula}
\label{asymptotic_analysis_sec}
Let us consider a fixed $j \in \mathbb N$ corresponding to a fixed $\gamma_j \in \mathbb V$ and define
\begin{equation}
\label{I_N}
I_{N}^{j}= \int_0^T\int_{\tilde{\Omega}\setminus\overline{\Omega}} \left(f\, \eta_j(x)\,\mathcal W_{j,\tau_N}-(\Box(\eta_j(x) \mathcal W_{j,\tau_N})-\eta_j(x) \,\Box \mathcal W_{j,\tau_N})\,u\right) \,dxdt,
\end{equation}
where $u$ solves \eqref{pf}.
Here,
$$ \mathcal W_{j,\tau_N}(t,x)= e^{-{\rm i} \tau_N(-t+\xi_j\cdot x)}\, w_{j,N}(t,x)$$
with
$$ w_{j,N} (t,x)=\left(\frac{N}{\delta_j}\right)^{\frac{n}{2}}\,\chi[N\,{\delta_j}^{-1}(s_j-t+\xi_j\cdot (x-x_j))] \prod_{k=1}^{n-1}\chi(N\,\delta_j^{-1}e_{j,k}\cdot (x-x_j))$$
and $\eta_j\in C^{\infty}_c(\tilde{\Omega})$ is chosen such that $\eta_j \equiv 1$ on $\Omega$ and $\eta_j(x)=0$ for all $x\in \tilde{\Omega}\setminus\overline{\Omega}$ such that $\dist{(x',\partial\Omega)}>\frac{\delta_j}{4}$. We also require that
$$ \|\eta_j \|_{C^2(\tilde\Omega)} \leq C\,\delta_j^{-2}$$
for some constant $C>0$ independent of $j$.
Let us emphasize that the dependency of $I_N^{j}$ with respect to the coefficient $V$ is given by $u_{|(0,T)\times\tilde{\Omega}\setminus\overline{\Omega}}$ with $u$ the solution of \eqref{pf}. Therefore, if $u_{|(0,T)\times\tilde{\Omega}\setminus\overline{\Omega}}$ is known, $I_N^{j}$ will be also known even if the coefficient $V$ is unknown. The definition of $I_N^{j}$ is motivated by the following computation:
\begin{align*}
\int_0^T\int_{\tilde{\Omega}\setminus\overline{\Omega}}f\, \eta_j(x)\,\mathcal W_{j,\tau_N}\,dx=& \int_0^T\int_{\tilde{\Omega}} \left((\Box+V)u\right)\eta_j(x)\,\mathcal W_{j,\tau_N}\,dx\,dt\\
=&\int_{(0,T)\times \tilde{\Omega}}u\,\Box(\eta_j(x)\,\mathcal W_{j,\tau_N})\,dx\,dt\\
&+\int_{(0,T)\times \tilde{\Omega}} V\,u\,\eta_j(x)\,\mathcal W_{j,\tau_N}\,dx\,dt,
\end{align*}
where $u$ solves \eqref{pf} and we have used integration by parts in the second step. There are no boundary terms on $(0,T)\times\partial\tilde\Omega$ since $\eta_j$ vanishes there. Moreover, no boundary terms appear at $t=0$ or $t=T$ since $u,\partial_t u$ vanish at $t=0$ while $(t,x)\mapsto\eta_j(x)\,\mathcal W_{j,\tau_N}(t,x)$ is supported away from $\{T\}\times\tilde\Omega$. This implies that
\begin{equation}
\label{I_exp}
I_N^{j}=\int_{(0,T)\times\tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\, u\, \underbrace{\eta_j\,(\Box+V) w_{j,N}}_{\tilde{w}_{j,N}}\,dx\,dt
\end{equation}
Let us record in passing that the function $\tilde{w}_{j,N}$ is compactly supported in $(0,T)\times \tilde{\Omega}$ and that
\begin{equation}
\label{w_bound}
\|w_{j,N}\|_{H^k((0,T)\times \tilde{\Omega})}+N^{-2}\|\tilde{w}_{j,N}\|_{H^k((0,T)\times \tilde{\Omega})}< C \kappa_j \, N^{k}\quad\text{for}\quad \, k=0,1,2
\end{equation}
for some $C>0$ independent of $j$ and $N$, where we recall that $\kappa_j$ is as defined in \eqref{kappa}.
For the remainder of this section, we aim to prove the following theorem.
\begin{theorem}
\label{t1}
Let $f$ be the universal source given by \eqref{universal_source} and let $$V \in \mathcal C^4([0,T]\times\Omega)\cap \mathcal C([0,T];\mathcal C^4_0(\Omega)).$$
For each $j \in \mathbb N$, there holds:
\begin{equation}
\label{t1a} \lim_{N \to \infty} \left(c_N^{-1}\,I_N^{j}-S^{j}_N\right) =\frac{\sqrt{2}}{2}\,b_j\,\int_{\mathbb R} V(\gamma_j(s))\,ds.\end{equation}
where $I_N^j$ is as defined in \eqref{I_N} and
$$ S_N^{j} =\sum_{k=1}^{\infty}b_{k} \int_{(0,T)\times \tilde{\Omega}}e^{{\rm i}\tau_N(\xi_k-\xi_j)\cdot x}\, \eta_j(x)\,\zeta_{k,-}(t)\,v_{k,\tau_N}^{(0)}(t,x)\,\Box w_{j,N}(t,x)\,dx\,dt$$
is an explicit constant depending only on $N$, $j$, $T$, $\tilde\Omega$ and $\Omega$.
\end{theorem}
We remark that for each fixed $j$ and $N$, the expression for $S_N^j$ is well-defined by \eqref{b_sequence} and \eqref{w_bound}. Let us make a preliminary computation to divide the analysis of the limit in Theorem~\ref{t1} into two components. To this end we use \eqref{u_exp} to write
\[
\begin{aligned}
I_N^{j}&=\int_{(0,T)\times\tilde\Omega} \left(\sum_{k=1}^{\infty}b_k\, e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\, u_k\,\tilde{w}_{j,N}\right)\,dx\,dt\\
&=\sum_{k=1}^{\infty}b_k\,\int_{(0,T)\times\tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\, u_{k}\,\tilde{w}_{j,N}\,dx\,dt\\
&= \sum_{k=1}^{\infty} b_k\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,\left(\sum_{\ell=1}^{\infty} c_\ell u_{k,\tau_\ell}\right)\,\tilde{w}_{j,N}\,dx\,dt\\
&= \sum_{k=1}^{\infty} b_k\,\sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,u_{k,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt
\end{aligned}
\]
The interchanging of the integration and the limits are justified by \eqref{conv_u}. The latter expression can be rewritten as
$$ I_N^{j}=b_j\,J_N^{j}+K_N^{j}$$
where
\begin{equation}
\label{JK_def}
\begin{aligned}
J_N^{j}&=\sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,u_{j,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt \\
K_N^{j}&= \sum_{k\neq j}b_{k}\, \sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,u_{k,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt.
\end{aligned}
\end{equation}
We proceed to study asymptotic behavior of these two terms as $N$ approaches infinity.
\begin{remark}
\label{C_generic}
In what follows, we will use the symbol $C$ to denote a generic positive constant that is independent of the indices $j$ and $N$ in $I^j_N$ and that only depends on $T$, $\tilde\Omega$, $\Omega$ and $\|V\|_{C^4((0,T)\times\tilde\Omega)}$.
\end{remark}
\subsection{Asymptotic analysis of $J_N^{j}$}
The aim of this section is to prove the following lemma.
\begin{lemma}
\label{lem_J_bound}
Let $j\in\mathbb N$ and $J_N^{j}$ be defined through \eqref{JK_def}. Then
$$\lim_{N\to \infty} \left|c_N^{-1}\,J_N^{j}- \int_{(0,T)\times \tilde \Omega}\zeta_{j,-}(t)\,\tilde{w}_{j,N}\,v^{(0)}_{j,\tau_N}\,dx\, dt\right|=0.$$
\end{lemma}
Applying the definition \eqref{correction_term}, we split the expression for $J_N^{j}$ into two terms $J_N^{j}=J_{1,N}^{j}+J_{2,N}^{j}$ where
$$J_{1,N}^{j}=\sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,\zeta_{j,-}(t)\,\mathcal U_{j,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt$$
and
$$J_{2,N}^{j}=\sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,R_{j,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt.$$
Note that this breaking of the infinite series $J_N^{j}$ is justified again since each of the series $J_{1,N}^{j}$ and $J_{2,N}^{j}$ are absolutely convergent.
\subsubsection{Asymptotic analysis of $J_{1,N}^{j}$}
\label{subsection_1}
Observe that
\begin{multline*}
J_{1,N}^{j}=c_N \int_{(0,T)\times \tilde \Omega}\zeta_{j,-}\,\tilde{w}_{j,N}\,v_{j,\tau_N}\,dx\,dt\\
+\sum_{\ell\neq N}c_\ell\,(\tau_\ell-\tau_N)^{-2} \int_{(0,T)\times \tilde \Omega} \partial^2_t\left(\zeta_{j,-}\,\tilde{w}_{j,N}\,v_{j,\tau_\ell}\right)\, e^{{\rm i}(\tau_\ell-\tau_N)(-t+\xi_j\cdot x)}\,dx\,dt,
\end{multline*}
where we have isolated the summation index $\ell=N$ and performed integration by parts with respect to the time variable twice in the summation over indices $\ell \neq N$, also using the fact that the function $\tilde{w}_{j,N}$ is compactly supported in the set $(0,T)\times \tilde\Omega$. Next, recalling the definition \eqref{kappa} together with the estimates \eqref{amp_bound}, \eqref{w_bound} and $$\|\zeta_{j,\pm}\|_{\mathcal C^2((0,T)\times\tilde\Omega)}\leq C\delta_j^{-2},$$
we obtain that
\begin{equation}
\label{alt_w_bound}
\|\zeta_{j,-}\,\tilde{w}_{j,N}\,v_{j,\tau_\ell}\|_{H^2((0,T)\times\tilde\Omega)}< C\kappa_j^2 \,N^{4}\,\ell^2.
\end{equation}
Combining this with the bound
\begin{equation}
\label{tau_diff}
|\tau_\ell-\tau_N|=|e^\ell-e^N|\geq |e^{N-1}-e^N|=\frac{e-1}{e}e^N\geq \frac{\tau_N}{2}\quad \quad \ell\neq N,
\end{equation}
we deduce that
\begin{equation}
\label{J1_bound}\begin{aligned}
\left|c_N^{-1}J_{1,N}^{j}- \int_{(0,T)\times \tilde \Omega}\zeta_{j,-}\,\tilde{w}_{j,N}\,v_{j,\tau_N}\,dx\,dt\right| &\leq 4C\kappa_j^2 N^{4}c_N^{-1} \tau^{-2}_N (\sum_{\ell\neq N} c_\ell\,\ell^2) \\
&\leq 4C\kappa_j^2 N^{7} \tau^{-1}_N,\end{aligned}
\end{equation}
where we recall the notation from Remark~\ref{C_generic} that $C>0$ is a constant independent of $j$ and $N$.
\subsubsection{Asymptotic analysis of $J_{2,N}^{j}$}
\label{subsection_2}
We write
\begin{equation}
\label{J2_bound}
\begin{aligned}
|J_{2,N}^{j}|&=\left| \sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,R_{j,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt \right|\\
&=\left|\sum_{\ell=1}^{\infty} c_\ell\,\tau_N^{-2}\int_{(0,T)\times\tilde\Omega} \partial^2_t\left(\tilde{w}_{j,N}\,R_{j,\tau_\ell}\right)e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,dx\,dt\right|\\
&<\sum_{\ell=1}^{\infty}c_\ell\,\tau_N^{-2} \|R_{j,\tau_\ell}\|_{H^2((0,T)\times\tilde\Omega)}\|\tilde{w}_{j,N}\|_{H^2((0,T)\times\tilde\Omega)}\\
&< \kappa_j^2\,\tau_N^{-2}N^{4} (\sum_{\ell=1}^{\infty} c_\ell\,\tau_\ell^{-1}\,\ell^6)<C\kappa_j^2\,\tau_N^{-2}\,N^{4}
\end{aligned}
\end{equation}
where we have integrated by parts in time twice and used the bounds \eqref{correction_bound} and \eqref{w_bound}. Combining the bounds given by \eqref{J1_bound} and \eqref{J2_bound}, together with the fact that
\begin{equation}
\label{limit_iden}
\lim_{N\to\infty}c_N^{-1}\tau_N^{-2}N^{4}=\lim_{N\to\infty}\tau_N^{-1}N^{7}=0
\end{equation}
\begin{proof}[Proof of Lemma~\ref{lem_J_bound}]
Note that by combining the estimates for $J_{1,N}^j$ and $J^j_{2,N}$ we have shown that
\begin{equation}\label{lem_J_almost}\lim_{N\to \infty} \left|c_N^{-1}\,J_N^{j}- \int_{(0,T)\times \tilde \Omega}\zeta_{j,-}(t)\,\tilde{w}_{j,N}\,v_{j,\tau_N}\,dx\, dt\right|=0.\end{equation}
Moreover, using \eqref{ansatz} and \eqref{amp_bounds} we obtain
\begin{equation}
\label{a_approx}
\|v_{j,\tau_N}-v_{j,\tau_N}^{(0)}\|_{L^2((0,T)\times\Omega)}\leq C\kappa_j\,N^2\,\tau_N^{-1} \quad \forall \, j\in \mathbb N.
\end{equation}
This bound implies that
$$\begin{aligned}&\lim_{N\to\infty} \left|\int_{(0,T)\times\tilde\Omega}\zeta_{j,-}\,\tilde{w}_{j,N}\,v_{j,\tau_N}^{(0)}\,dx\,dt-\int_{(0,T)\times\tilde\Omega}\zeta_{j,-}\,\tilde{w}_{j,N}v_{j,\tau_N}\,dx\,dt\right|\\
&=\lim_{N\to\infty}N^2\tau_N^{-1}=0.\end{aligned}$$
The claim now follows immediately from this estimate and \eqref{lem_J_almost}.
\end{proof}
\subsection{Asymptotic analysis of $K_N^{j}$}
In this section we prove the following lemma.
\begin{lemma}
\label{lem_K_bound}
Let $j\in \mathbb N$ and $K_N^{j}$ be defined through \eqref{JK_def}. Then
$$\lim_{N\to \infty} \left|c_N^{-1}\,K_N^{j}-\sum_{k\neq j}b_{k} \int_{(0,T)\times \tilde{\Omega}}e^{{\rm i}\tau_N (\xi_k-\xi_j)\cdot x}\, \eta_j\,\zeta_{k,-}\,v^{(0)}_{k,\tau_N}\,\Box w_{j,N}\,dx\,dt\right|=0.$$
\end{lemma}
Applying the definition \eqref{correction_term}, we can split the expression for $K_N^{j}$ into three terms $$K_N^j=K_{1,N}^{j}+K_{2,N}^{j}+K_{3,N}^{j}$$ with
$$K_{1,N}^{j}=\sum_{k \neq j}b_{k}\,c_N\,\int_{(0,T)\times \tilde\Omega} e^{{\rm i}\tau_N(\xi_k-\xi_j)\cdot x}\,\zeta_{k,-}(t)\,v_{k,\tau_N}\,\tilde{w}_{j,N}\,dx\,dt,$$
$$K_{2,N}^{j}=\sum_{k \neq j}b_{k}\,\sum_{\ell\neq N}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,\zeta_{k,-}\,\mathcal U_{k,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt,$$
$$K_{3,N}^{j}=\sum_{k\neq j}b_{k}\,\sum_{\ell=1}^{\infty}c_\ell\,\int_{(0,T)\times \tilde\Omega} e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,R_{k,\tau_\ell}\,\tilde{w}_{j,N}\,dx\,dt.$$
We emphasize that this step is justified since all three series converge absolutely. We proceed to bound each of the three terms above.
\subsubsection{Asymptotic analysis of $K_{1,N}^{j}$}
We show in this section that
\begin{equation}
\label{K1Nbound}
\lim_{N \to\infty} \left|c_N^{-1} K_{1,N}^{j}- \sum_{k\neq j}b_{k} \int_{(0,T)\times \tilde{\Omega}}e^{{\rm i}\tau_N(\xi_k-\xi_j)\cdot x}\,\zeta_{k,-}(t)\,v_{k,\tau_N}\,\Box w_{j,N}\,dx\,dt \right| =0.
\end{equation}
Note that it suffices to show that
\begin{equation}
\label{Kbound_rand}
\lim_{N\to \infty}\left| \sum_{k \neq j}b_{k}\,\int_{(0,T)\times \tilde\Omega}e^{{\rm i}\tau_N(\xi_k-\xi_j)\cdot x}\,\zeta_{k,-}(t)\,v_{k,\tau_N}\,\eta_j\,V\,w_{j,N}\,dx\,dt \right|=0.
\end{equation}
Before proving this limit, we need to make a definition. For each $j,k\in \mathbb N$, we set
$$\theta_{k,j}:=\inf_{s,\tilde{s}\in\mathbb R}\dist{(p_{l_k}+s\xi_k,p_{l_j}+\tilde{s}\xi_j)},$$
where we recall that
$$\gamma_j(s)=(t_{k_j}+s,p_{l_j}+s\xi_j),\quad s\in\mathbb R.$$
Then, for all $j\in\mathbb N$, we define the function $h_{j}:\mathbb N\to \mathbb N$ through
$$ h_{j}(r) = \min \{k \in \mathbb N\setminus \{j\}\,:\,|\xi_k-\xi_j|<\tau_r^{-\frac{1}{2}},\quad \theta_{k,j}<(\delta_j+\delta_k)\,r^{-\frac{1}{2}}\},$$
This minimum always exists since $\mathcal T \times \mathcal P$ is dense in $(0,T)\times \partial\Omega$ and the sequence $\{\delta_k\}_{k\in\mathbb N}$ is a decreasing sequence. We claim that
\begin{equation}
\label{density}
\lim_{r\to\infty} h_j(r) = \infty,\quad\quad j \in \mathbb N.
\end{equation}
To show this, we suppose for contrary that there exists an integer $j$, a strictly increasing sequence $\{r_k\}_{k=1}^{\infty}$ and an integer $N_0$, such that $h_j(r_k)\leq N_0$ for all $k\in\mathbb N$. Note first that
$$\limsup_{k\to\infty}|\xi_{h_j(r_k)}-\xi_j|\leq \limsup_{k\to\infty}\tau_{r_k}^{-\frac{1}{2}}=0,$$
$$\limsup_{k\to\infty}\theta_{h_j(r_k),j}<\limsup_{k\to\infty}(\delta_j+\delta_{h_j(r_k)})\,r_k^{-\frac{1}{2}}\}=0.$$
Combining this with the fact that the set $\{1,\ldots,N_0\}$ is finite, we deduce that there exists an index $k_0$ such that for $k=h_j(r_{k_0})$ we have
$$ \xi_k=\xi_j\quad \text{and}\quad\inf_{s,\tilde{s}\in\mathbb R}\dist{(p_{l_k}+s\xi_k,p_{l_j}+\tilde{s}\xi_j)}=0.$$
But then $h_j(r_{k_0})=j$ which contradicts the definition of $h_j$. Thus, \eqref{density} holds.
We return to the expression \eqref{Kbound_rand} and rewrite it as
\begin{multline}
\label{Kbound_rand_1}
\sum_{k< h_j(N),\,k\neq j} b_{k}\, \int_{(0,T)\times \tilde \Omega}\zeta_{k,-}\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\\
+\sum_{k\geq h_j(N),\,k\neq j}b_{k}\, \int_{(0,T)\times \tilde \Omega}\zeta_{k,-}\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt.
\end{multline}
Let us begin by analyzing the first term in the expression \eqref{Kbound_rand_1}. We note that, from the definition of the map $h_j$, given any $k < h_j(N)$ with $k\neq j$, either \begin{equation}\label{tt1}|\xi_k-\xi_j|\geq \tau_N^{-\frac{1}{2}}\end{equation} or
\begin{equation}\label{tt2}\inf_{s,\tilde{s}\in\mathbb R}\dist{(p_{l_k}+s\xi_k,p_{l_j}+\tilde{s}\xi_j)}\geq (\delta_j+\delta_{k})\,N^{-\frac{1}{2}}\end{equation}
holds true. In the latter scenario, the terms in the summation vanish. To see this, note that the terms $w_{j,N}$ and $v_{k,\tau_N}$ are supported in tubular neighborhoods of $\gamma_j$ and $\gamma_{k}$ of radius $\frac{\delta_j}{2N}$ and $\frac{\delta_{k}}{2N}$ respectively. Therefore, the condition \eqref{tt2} implies that
$$ v_{k,\tau_N}\,\tilde{w}_{j,N}\equiv 0,\quad N\in \mathbb N.$$
In the former scenario, integrating by parts, we get
$$\begin{aligned}&\int_{(0,T)\times \tilde \Omega}\zeta_{k,-}(t)\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\\
&=\int_{(0,T)\times \tilde \Omega}\zeta_{k,-}(t)\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, \frac{(\xi_k-\xi_j)\cdot\nabla_xe^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}}{{\rm i} \tau_N\left|\xi_k-\xi_j\right|^2}\,dx\,dt\\
&=\frac{{\rm i}\int_{(0,T)\times \tilde \Omega}e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,\zeta_{k,-}(t)\,(\xi_k-\xi_j)\cdot\nabla_x\left[\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\right]\,dx\,dt}{ \tau_N\left|\xi_k-\xi_j\right|^2}.\end{aligned}$$
Then, \eqref{tt1} implies
$$\begin{aligned}&\left|\int_{(0,T)\times \tilde \Omega}\zeta_{k,-}(t)\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\right|\\
&\leq C\tau_N^{-\frac{1}{2}}\|\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\|_{L^1(0,T;W^{1,1}(\tilde \Omega))}\\
&\leq C\tau_N^{-\frac{1}{2}}(\|\eta_j\,w_{j,N}\|_{L^2(0,T;H^1(\tilde \Omega))}\|v_{k,\tau_N}\|_{L^2((0,T)\times\tilde \Omega)}\\
&\quad\quad \quad\quad\quad \quad+\|\eta_j\,w_{j,N}\|_{L^2((0,T)\times\tilde \Omega)}\|v_{k,\tau_N}\|_{L^2(0,T;H^1(\tilde \Omega))}).\end{aligned}$$
Combining this with \eqref{amp_bounds} and \eqref{w_bound}, we obtain
$$\left|\int_{(0,T)\times \tilde \Omega}\zeta_{k,-}(t)\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\right|\leq C\kappa_j\kappa_kN\tau_N^{-\frac{1}{2}}.$$
According to the above discussion, this last estimate holds true for all $k < h_j(N)$ with $k\neq j$. Taking the sum, we deduce that
$$\begin{aligned}&\left|\sum_{k< h_j(N),\,k\neq j} b_{k}\, \int_{(0,T)\times \tilde \Omega}\zeta_{k,-}\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\right|\\
&\leq C\kappa_jN\tau_N^{-\frac{1}{2}}\sum_{k< h_j(N),\,k\neq j} b_{k}\kappa_k\leq C\kappa_jN\tau_N^{-\frac{1}{2}}.\end{aligned}$$
Therefore, we have
\begin{equation}\label{tt3}\lim_{N\to\infty}\sum_{k< h_j(N),\,k\neq j} b_{k}\, \int_{(0,T)\times \tilde \Omega}\zeta_{k,-}\,\eta_j\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt=0.\end{equation}
We now consider the second term in \eqref{Kbound_rand_1}. We write
\begin{multline*}
\left|\sum_{k\geq h_j(N),\,k\neq j}b_{k}\,\int_{(0,T)\times \tilde \Omega}\zeta_{k,-}(t)\,\eta_j(x)\,V\,w_{j,N}\,v_{k,\tau_N}\, e^{{\rm i} \tau_N(\xi_k-\xi_j)\cdot x}\,dx\,dt\right|\\
\leq\sum_{k\geq h_j(N)}b_{k}\,\|v_{k,\tau_N}\,w_{j,N}\|_{L^1((0,T)\times\tilde\Omega)}\|\zeta_{k,-}\,\eta_j\,V\|_{L^{\infty}((0,T)\times\tilde\Omega)}\leq C\,\kappa_j\,\sum_{k\geq h_j(N)}\kappa_{k}b_{k}.
\end{multline*}
Here, in the last step we used the Cauchy-Schwarz inequality together with the bounds \eqref{amp_bounds} and \eqref{w_bound} to write
$$\|v_{k,\tau_N}\,w_{j,N}\|_{L^1((0,T)\times\tilde\Omega)}\leq \|v_{k,\tau_N}\|_{L^2((0,T)\times \tilde\Omega)}\|\,w_{j,N}\|_{L^2((0,T)\times \tilde\Omega)}\leq C \kappa_k\kappa_j.$$
Now, applying \eqref{b_sequence} and \eqref{density}, we conclude that
$$ \lim_{N\to\infty} \sum_{k\geq h_j(N)}\kappa_{k}b_{k}=0.$$
Combining this with \eqref{Kbound_rand_1} and \eqref{tt3}, we deduce that \eqref{Kbound_rand} holds true. This concludes our asymptotic analysis of $K_{1,N}^{j}$ showing that \eqref{K1Nbound} is fulfilled.
\subsubsection{Asymptotic analysis of $K_{2,N}^{j}$}
We write
$$ |K_{2,N}^j|=\left|\sum_{k\neq j}b_{k}\sum_{\ell \neq N}c_\ell \int_{(0,T)\times \tilde \Omega}\zeta_{k,-}\,\tilde{w}_{j,N}\,v_{k,\tau_\ell}\, e^{{\rm i} S(t,x)}\,dx\,dt\right|.$$
where we are using the shorthand notation $$S(t,x)=-(\tau_\ell-\tau_N)t+(\tau_\ell\xi_k-\tau_N\xi_j)\cdot x.$$
Using integration by parts with respect to the time variable, this reduces as follows.
$$\begin{aligned}
|K_{2,N}^j|&=\left|\sum_{k\neq j}b_{k}\sum_{\ell\neq N}c_\ell\, (\tau_\ell-\tau_N)^{-2}\int_{(0,T)\times \tilde \Omega} \partial^2_t\left(\zeta_{k,-}\,\tilde{w}_{j,N}\,v_{k,\tau_\ell}\right)\, e^{{\rm i} S(t,x)}\,dx\,dt\right|\\
&\leq 4\sum_{k\neq j}b_{k}\sum_{\ell\neq N} c_\ell\, \tau_N^{-2}\| \zeta_{k,-}\,v_{k,\tau_\ell}\,\tilde{w}_{j,N}\|_{H^2((0,T)\times\tilde\Omega)}\\
&\leq\sum_{k\neq j}4\kappa_{j}\,b_{k}\kappa_{k} \tau_N^{-2} N^{4}(\sum_{\ell=1}^{\infty}c_\ell\,\ell^2)\leq C\kappa_j\,\tau_N^{-2} N^{4}.
\end{aligned}$$
where we have used estimates \eqref{alt_w_bound}--\eqref{tau_diff}. Thus, we obtain that
$$|c_N^{-1}\,K_{2,N}^{j}|\leq C\,\kappa_j\,c_N^{-1}\tau_N^{-2}N^{4}\leq C\,\kappa_j\,\tau_N^{-1}N^{7}$$
which implies that
\begin{equation}
\label{K2Nbound}
\lim_{N\to\infty}|c_N^{-1}\,K_{2,N}^{j}|=0.\end{equation}
\subsubsection{Asymptotic analysis of $K_{3,N}^{j}$}
To bound $K_{3,N}^{j}$ we write
\[
\begin{aligned}
\label{Kbound3}
&\left|\sum_{k\neq j}b_{k}\sum_{\ell=1}^{\infty}c_\ell \,\left(\int_{(0,T)\times \tilde\Omega}e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)} \, R_{k,\tau_\ell}\,\tilde{w}_{j,N}\,dx\right)\right|\\
&=\left|\sum_{k\neq j}b_{k}\,\sum_{\ell=1}^{\infty} c_\ell\,\tau_N^{-2}\int_{(0,T)\times\tilde\Omega} \partial^2_t\left(\tilde{w}_{j,N}\,R_{k,\tau_\ell}\right)e^{-{\rm i}\tau_N(-t+\xi_j\cdot x)}\,dx\right|\\
&\leq\sum_{k\neq j}b_{k}\,\sum_{\ell=1}^{\infty}c_\ell\,\tau_N^{-2} \|R_{k,\tau_\ell}\|_{H^2((0,T)\times\tilde\Omega)}\|\tilde{w}_{j,N}\|_{H^2((0,T)\times\tilde\Omega)}\\
&\leq \sum_{k\neq j}\kappa_j\,\kappa_{k}\,b_{k}\tau_N^{-2}N^{4} (\sum_{\ell=1}^{\infty} c_\ell\,\tau_\ell^{-1}\ell^6)\leq C\kappa_j\,\tau_N^{-2}N^{4}
\end{aligned}
\]
where we have used the bounds \eqref{correction_bound} and \eqref{w_bound}. Thus we obtain
\begin{equation}\label{K3Nbound}|c_N^{-1}K_{3,N}^{j}|\leq C\,\kappa_j\, c_N^{-1}\tau^{-2}_N\,N^{4}\leq C\,\kappa_j\,\tau_N^{-1}N^{7}.\end{equation}
We are now ready to prove Lemma~\ref{lem_K_bound} as follows.
\begin{proof}[Proof of Lemma~\ref{lem_K_bound}]
Note that by combining the estimate \eqref{K3Nbound} with \eqref{K1Nbound} and \eqref{K2Nbound}, we have shown that
$$\lim_{N\to \infty} \left|c_N^{-1}\,K_N^{j}-\sum_{k\neq j}b_{k} \int_{(0,T)\times \tilde{\Omega}}e^{{\rm i}\tau_N (\xi_k-\xi_j)\cdot x}\, \eta_j\,\zeta_{k,-}\,v_{k,\tau_N}\,\Box w_{j,N}\,dx\,dt\right|=0.$$
Applying the estimates \eqref{w_bound} and \eqref{a_approx} together with the convergence of the series \eqref{b_sequence}, we observe that
\begin{multline*}\left|\sum_{k\neq j}b_{k} \int_{(0,T)\times \tilde{\Omega}}e^{{\rm i}\tau_N (\xi_k-\xi_j)\cdot x}\, \eta_j\,\zeta_{k,-}\,(v_{k,\tau_N}-v_{k,\tau_N}^{(0)})\,\Box w_{j,N}\,dx\,dt\right| \\
\leq C\kappa_j\,N^4\tau_N^{-1} (\sum_{k=1}^{\infty}\kappa_k b_k), \end{multline*}
which converges to zero as $N$ approaches infinity. The claim follows immediately.
\end{proof}
With the proof of Lemmas \ref{lem_J_bound}--\ref{lem_K_bound} completed, we are ready to state the proof of Theorem~\ref{t1}.
\begin{proof}[Proof of Theorem~\ref{t1}]
Let $j \in \mathbb N$ corresponding to some $\gamma_j \in \mathbb V$. Combining the definition of $I^j_N$ in terms of $J^j_N$ and $K^j_N$ as given by \eqref{JK_def} together with Lemma~\ref{lem_J_bound}--\ref{lem_K_bound}, we obtain that
$$ \lim_{N\to \infty} \left|c_N^{-1}\,I_N^{j}-S^{j}_N- b_j\,\int_{(0,T)\times \tilde \Omega}\zeta_{j,-}\,V\,\eta_{j}\,w_{j,N}v_{j,\tau_N}^{(0)}\,dx\,dt \right|=0.$$
Note that $v^{(0)}_{j,\tau_N}=w_{j,N}$. We proceed to study the expression
\begin{equation}
\label{weak_conv}
\int_{(0,T)\times \tilde \Omega}\eta_j\,\zeta_{j,-}\,V\,w^2_{j,N}\,dx\,dt.
\end{equation}
To simplify this expression, we introduce the new coordinate system $$(t,x^1,\ldots,x^n) \mapsto y=(y^0,\ldots,y^n)$$ on $\mathbb R^{1+n}$ that is defined by
$$(t,x^1,\ldots,x^n)= q_{j}+y^0\,\alpha_j^\star+y^1\,\alpha_{j}+\sum_{k=2}^{n}y^{k}\,e_{j,k-1}.$$
Here, $\alpha_j,\alpha_j^*\in\mathbb S^n=\{z\in\mathbb R^{1+n}:\ |z|=1\}$ are given by
$$\alpha_j=\frac{\sqrt{2}}{2}\,(-1,\xi_j),\quad \alpha_j^*=\frac{\sqrt{2}}{2}\,(1,\xi_j).$$
Note that in the $y$-coordinate system the points on the light ray $\gamma_{j}$ are given by $y^1=\ldots=y^n=0.$
Using this coordinate system together with the definitions of $v_{j,0}$ and $w_{j,N}$, the expression \eqref{weak_conv} reduces to
$$\int_{\mathbb R^{1+n}}\left(\frac{N}{\delta_j}\right)^{n}\,\eta_j(y)\,\zeta_{j,-}(y)\,V(y)\chi^2(N\,\delta_j^{-1}\sqrt{2}y^1)\left(\prod_{k=2}^{n}\chi^2(N\,\delta_j^{-1}y^k)\right)\,dy.$$
Taking the limit as $N\to \infty$ and noting that both $\eta_j$ and $\zeta_{j,-}$ are identical to one on the segment of $\gamma_j$ that lies inside $(0,T)\times\Omega$, we obtain:
$$\begin{aligned}&\lim_{N\to\infty}\int_{\mathbb R^{1+n}}\left(\frac{N}{\delta_j}\right)^{n}\eta_j(y)\,\zeta_{i,-}(y)\,V(y)\chi^2(N\,\delta_j^{-1}\sqrt{2}y^1)\,\prod_{k=2}^{n}\chi^2(N\,\delta_j^{-1}\,y^k)\,dy\\
&= \frac{\sqrt{2}}{2}\int_\mathbb R V(y^0,0,\ldots,0)\,dy^0\end{aligned}$$
where we used \eqref{cutoff} in the last step. This completes the proof of the theorem.
\end{proof}
\section{Proofs of main results}
\label{proof_section}
This section is devoted to the proof of the main results stated in Theorem \ref{t0} and Corollary \ref{c1}. For this purpose, we will combine all the arguments of the previous sections. We start with Theorem~\ref{t0}.
\begin{proof}[Proof of Theorem~\ref{t0}] Let $f\in L^2(\mathbb R^{1+n})$ be the source term given by \eqref{universal_source} and let $V_j \in \mathcal C^4([0,T]\times\mathbb R^n)\cap \mathcal C([0,T];\mathcal C^4_0(\Omega))$, $j=1,2$. We consider also $u_j\in \mathcal C^1([0,T];L^2(\mathbb R^n)) \cap \mathcal C([0,T];H^1(\mathbb R^n))$ solving \eqref{pf} with $V=V_j$, $j=1,2$. Assuming that the condition
\begin{equation}\label{t0b}u_1(t,x)=u_2(t,x),\quad (t,x)\in(0,T)\times(\tilde{\Omega}\setminus\overline{\Omega}))\end{equation}
is fulfilled, we will prove that $V_1=V_2$ on $\mathcal D$.
We start by observing that \eqref{t0b} combined with Theorem~\ref{t1} imply
\begin{equation}\label{V_gamma} \int_{\mathbb R} (V_1-V_2)(\gamma_j(s))\,ds=0\quad \forall\, \gamma_j \in \mathbb V.
\end{equation}
Let $\gamma:\mathbb R\to\mathbb R^{1+n}$ be any future pointing light ray such that its intersection with $(0,T)\times \Omega$ lies inside $\mathcal D$. We write
$$\gamma(s) = \gamma(0)+ s\,(1,\xi)$$
for some $\gamma(0) \in (0,T)\times \partial \Omega$ and some unit vector $\xi \in \mathbb R^n$. Recall that all light rays $\gamma_j \in \mathbb V$ can be written in the form
$$ \gamma_j(s)= (t_{k_j}+s,s\,\xi_j+p_{l_j})$$
for some sequences $\{k_j\}_{j=1}^{\infty}$, $\{l_j\}_{j=1}^{\infty}$, $\{m_j\}_{j=1}^{\infty}$ and where $\xi_j=\frac{p_{m_j}-p_{l_j}}{|p_{l_j}-p_{m_j}|}$. Applying the density of $\mathcal T \times \mathcal P$ in $(0,T)\times \partial \Omega$, it follows that there exists a sub-sequence $\{j_\ell\}_{\ell=1}^{\infty}\subset \mathbb N$ such that
$$\lim_{\ell\to\infty}(t_{k_{j_\ell}},p_{l_{j_\ell}})= \gamma(0)$$
and such that
$$ \lim_{\ell\to\infty} \xi_{j_\ell}=\xi.$$
Thus, using continuity of $V_1-V_2$ together with \eqref{V_gamma}, it follows that
$$ \int_{\mathbb R} (V_1-V_2)(\gamma(s))\,ds=0$$
for all light rays $\gamma$ in $\mathcal D$. Finally, applying the injectivity of the light ray transform (see for example \cite[Theorem 2.1]{Ste}) we deduce that
$$ V_1 = V_2 \quad \text{on $\mathcal D$}.$$
This completes the proof of the theorem.
\end{proof}
\begin{proof}[Proof of Corollary~\ref{c1}] Let $f\in L^2(\mathbb R^{1+n})$ be the source term given by \eqref{universal_source} and let $V_j \in \mathcal C^4([0,T]\times\mathbb R^n)\cap \mathcal C([0,T];\mathcal C^4_0(\Omega))$, $j=1,2$. We consider also $u_j\in \mathcal C^1([0,T];L^2(\mathbb R^n)) \cap \mathcal C([0,T];H^1(\mathbb R^n))$ solving \eqref{pf} with $V=V_j$, $j=1,2$. Assuming that the condition
$$u_1(t,x)=u_2(t,x),\quad (t,x)\in(0,T)\times\mathcal O)$$
is fulfilled, we will prove that $V_1=V_2$ on $\mathcal D_{T_1}$. Consider $u=u_1-u_2$ and notice that $u$ satisfies
\begin{equation}\label{c1a}\left\{\begin{array}{ll}\partial_t^2u-\Delta_x u=0,\quad &\textrm{in}\ (0,T)\times(\tilde{\Omega}\setminus\overline{\Omega}),\\ u(0,\cdot)=\partial_tu(0,\cdot)=0,\quad &\textrm{in}\ \mathbb R^n,\\ u=0,\quad &\textrm{on}\ (0,T)\times\mathcal O.\end{array}\right.\end{equation}
Now let us consider $\tilde{u}$ defined on $[-T,T]\times \mathbb R^n$ by $\tilde{u}=u$ on $[0,T]\times \mathbb R^n$ and
$$\tilde{u}(-t,x)=u(t,x),\quad (t,x)\in[0,T]\times \mathbb R^n.$$
Since $u(0,\cdot)=\partial_tu(0,\cdot)=0$, we deduce that $\tilde{u}\in H^1(\mathbb R^{1+n})$ and \eqref{c1a} implies
\begin{equation}\label{c1b}\left\{\begin{array}{ll}\partial_t^2\tilde{u}-\Delta_x \tilde{u}=0,\quad &\textrm{in}\ (-T,T)\times(\tilde{\Omega}\setminus\overline{\Omega}),\\ \tilde{u}=0,\quad &\textrm{on}\ (0,T)\times\mathcal O.\end{array}\right.\end{equation}
Applying the global Holmgren uniqueness theorem for hyperbolic equations (see e.g. \cite[Theorem 3.11]{KKL} or \cite[Theorem 2.2]{KMO} ), which is a consequence of the well known local unique continuation result of \cite[Theorem 1]{Ta}, we deduce that
$$u(t,x)=\tilde{u}(t,x)=0,\quad t\in(0,T),\ x\in\{y\in\tilde{\Omega}\setminus\overline{\Omega}:\ \textrm{\emph{dist}}(y,\mathcal O)<T-t\}.$$
In particular, \eqref{c1a} implies that $u=0$ on $(0,T_1)\times (\tilde{\Omega}\setminus\overline{\Omega})$. Therefore, we have
$$u_1(t,x)=u_2(t,x),\quad (t,x)\in(0,T_1)\times(\tilde{\Omega}\setminus\overline{\Omega}))$$
and, repeating the arguments of Theorem \ref{t0} with $T$ replaced by $T_1$, we deduce that $V_1=V_2$ on $\mathcal D_{T_1}$. This completes the proof of the corollary.\end{proof}
------------------------------------------------------
\section*{Acknowledgments}
A.F acknowledges the support from the EPSRC grant EP/P01593X/1. Y.K. acknowledges the support from the Agence Nationale de la Recherche (project MultiOnde) grant ANR-17-CE40-0029.
\bibliographystyle{abbrv}
|
1,314,259,995,040 | arxiv | \section{Introduction}
The field of fractional quantum Hall effect~\cite{Tsui82} (FQHE) has been the birthplace for a web of spectacular phenomena, exotic emergent particles, and nontrivial states, all arising as a result of the interaction between electrons. The FQHE is a rare example of a strongly correlated state for which we not only have a qualitative understanding of a large part of the prominent phenomenology but have achieved a detailed microscopic description that is quantitatively accurate~\cite{Jain07, Halperin20}. Nonetheless, the origin of a few experimentally observed states remains unsettled. This article aims to report on our theoretical investigations of one such state, namely the FQHE state at filling factor $\nu=1/2$ observed in wide quantum wells (WQWs)~\cite{Suen92, Suen92b, Suen94b, Luhman08, Shabani09a, Shabani09b, Shabani13, Liu14d, Hasdemir15, Mueed16, Drichko19}, the origin of which has been a topic of debate ever since its discovery. There are two motivations for our study. First, this observation is in stark contrast to the state at half-filling in narrow quantum wells, which is established to be a Fermi sea of composite fermions (CFs)~\cite{Halperin93, Jain07, Halperin20}. The FQHE thus arises due to changes in the interaction arising from finite quantum well width, and thus constitutes an important challenge for our quantitative understanding of the FQHE. Second, the physical origin of the observed state can be potentially very interesting.
A promising two-component state is the Halperin $(3,3,1)$ state~\cite{Halperin83}, which can be relevant because a very WQW behaves as a two-component system. [There is little doubt that the $1/2$ FQHE observed in {\it real} double-layer systems~\cite{Eisenstein92}, observed at the same time as the $1/2$ state in WQWs, is the two-component Halperin $(3,3,1)$ state~\cite{Faugno20}. Our focus in this article is on WQWs, not double layer systems.] However, another promising candidate is the one-component Pfaffian state, which is a paired state of composite fermions~\cite{Moore91, Read00}. This state is believed to be responsible for the FQHE at $\nu=5/2$~\cite{Willett87}, and one can ask if the changes in the inter-electron interaction due to finite width may stabilize this state at $\nu=1/2$ as well. The Halperin $(3,3,1)$ state supports Abelian quasiparticles, whereas the Pfaffian is believed to support non-Abelian quasiparticles. The latter has motivated many interesting theoretical
and experimental studies of the $5/2$ FQHE. If the 1/2 state in WQWs turns out to be the Pfaffian state, that would provide another venue where non-Abelian quasiparticles may be investigated.
While the $1/2$ FQHE in WQWs has often been interpreted in terms of the $(3,3,1)$ state, arguments can also be given in favor of a one-component state. We provide here a summary of experimental results and their implications for the nature of the state:
\begin{itemize}
\item In a double layer system, which consists of two layers separated by a distance $d$, the situation is relatively clear~\cite{Park98,Papic10,Faugno20,Scarola01b, Scarola02b,Chakraborty87,Yoshioka89,He91,He93}. For zero layer separation, the two-component system of spin polarized electrons is formally equivalent to a single layer system of spinful electrons with zero Zeeman splitting. Here the state is a layer singlet Fermi sea of composite fermions~\cite{Park98,Balram15c,Balram17}. In variational calculations~\cite{Scarola01b, Scarola02b,Faugno20} this state survives in the range $d/l_B\lesssim1$. The $(3,3,1)$ state is predicted to occur for layer separations $1\lesssim d/l_B \lesssim 3$\cite{Scarola01b,Faugno20}, in general agreement with experiments. For layer separations $d/l_B \gtrsim 3$ two uncoupled CF Fermi seas (CFFSs) are formed in each layer, with composite fermions now binding four vortices~\cite{Scarola01b,Faugno20}. In contrast, the 1/2 FQHE in WQWs is seen when the width is approximately $2.6 - 8$ $l_B$\cite{Suen92,Suen92b,Suen92c,Suen94b, Shayegan96,Manoharan96,Shabani09a,Shabani09b,Shabani13,Yang14b,Hasdemir15, Mueed16,Liu14d}. Although not conclusive, this points against the two-component $(3,3,1)$ state.
\item For quantum well widths and densities where the 1/2 FQHE is observed in WQWs, the behavior of FQHE states surrounding it is often consistent with single layer physics. In particular, the standard Jain sequences $n/(2n\pm 1)$~\cite{Jain89} are observed. Recently, Mueed {\it et al.}~\cite{Mueed15} have directly measured, from commensurability oscillations, the Fermi wave vector of composite fermions in the vicinity of filling factor $1/2$ and found that the Fermi sea is a one-component state. The fact that the states in the immediate vicinity of $\nu=1/2$ are one-component states makes it plausible that the $\nu=1/2$ FQHE also has a one-component origin. If not, it would be important to understand what is special about $\nu=1/2$ that makes a two-component state favorable.
\item A phase diagram has been constructed as a function of the filling factor and $\Delta_{\rm SAS}$, the gap between the symmetric and antisymmetric subbands~\cite{Manoharan96}. The island of the 1/2 FQHE state straddles the boundary where many nearby FQHE states make a transition from a one-component state to an insulator, presumably a double layer crystal. However, the 1/2 FQHE island is contiguous, i.e. it is either all one component or two-component.
\item The effect of asymmetry in the charge distribution is complex but worth mentioning here. An early work on 80 nm wide QW by Suen {\it et al.}~\cite{Suen92b, Suen94b} reported a monotonic decrease in the strength of the FQHE at $\nu=1/2$ as the charge distribution is made asymmetric, with the FQHE state disappearing at approximately 10\% imbalance. This may arise from either two-component nature or complicated changes in the effective interaction. Subsequently, Shabani {\it et al.}~\cite{Shabani09a, Shabani09b} found that in a 55 nm quantum well an asymmetry of the charge distribution favors FQHE at $1/2$. This suggests a one-component nature of the FQHE here. Numerical studies also show that in such asymmetric quantum wells around certain widths the one-component Pfaffian wave function has a large overlap with the ground state, although the $(3,3,1)$ state is also competitive~\cite{Thiebaut14, Liu14d, Peterson10}.
\end{itemize}
We next briefly review the theoretical studies of the 1/2 FQHE in WQWs and also provide a summary of the main results arising from the present study. In particular, we indicate how the theoretical phase diagram is sensitive to the various assumptions that go into the calculation.
The problem has been addressed by exact diagonalization (ED)~\cite{Papic10,He93,Storni10,Peterson10,Balram20}. ED can often deal with only very small systems and is thus not likely to capture the thermodynamic behavior. This is especially the case for WQWs, for which the width may become comparable to the available lateral dimension of the system. The energy orderings of states are often seen to change as the system size increases.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./torus_VMC_Phase_boundary_all.pdf}
\caption{The phase diagram of states at $\nu=1/2$ obtained by the VMC method as a function of the quantum well width $W$ and the carrier density. The transverse wave function is assumed to have the form obtained from LDA at zero magnetic field. Both one-component and two-component states are included. The following states are seen to occur: the one-component CFFS state (red), the $(3,3,1)$ state (green), and the state with two uncoupled $1/4$ CFFSs, labeled $1/4+1/4$ CFFS (yellow). The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed lines. For a given width, the uncertainty of the calculated transition densities is approximately $1\times 10^{10} \text{cm}^{-2}$. The overall phase boundary is obtained by smoothly joining the transition points at $W=50,60,70,80$ nm. The subband gap determined by LDA is used to to determine the total energies of the two-component states.
}
\label{VMC_PHASE_DIAGRAM_2}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2dDMC_Phase_boundary_all.pdf}
\caption{The phase diagram of states as a function of the quantum well width $W$ and the carrier density obtained from a 2D DMC calculation, which incorporates finite width corrections by using an LDA interaction derived at zero magnetic field. This figure shows how the phase diagram in Fig.~\ref{VMC_PHASE_DIAGRAM_2} changes upon Landau level mixing. The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. For a given width, the uncertainty of the calculated transition densities is about $2\times 10^{10} \text{cm}^{-2}$.}
\label{2D_DMC_BOUNDARY_ALL}
\end{figure}
This issue has also been investigated by variational Monte Carlo (VMC)\cite{Biddle13,Papic09, Scarola10,Thiebaut14,Thiebaut15,Faugno20}. During the course of this work, we have determined the phase diagram of $\nu=1/2$ in a WQW using the VMC method, shown in Fig.~\ref{VMC_PHASE_DIAGRAM_2}. The $(3,3,1)$ state is stabilized in a part of the phase diagram that qualitatively agrees with experiments. The phase boundary between one component CFFS and the $(3,3,1)$ state is consistent with earlier calculations~\cite{Thiebaut15}.
However, the VMC calculations make the following assumptions. (i) The effect of finite width is incorporated through a transverse wave function for electrons, which modifies the interactions between them (see Eq.\,\ref{V_eff}). The transverse wave function is evaluated in local density approximation (LDA) at zero magnetic field~\cite{Park99b}, and it is assumed that it remains unaltered at a strong perpendicular magnetic field. Given that the nature of the transverse wave function depends on the state that the electrons form in two dimensions (for example, at zero magnetic field LDA assumes a Fermi sea state of electrons), one may wonder to what extent this assumption is valid. (ii) The phase boundary between the one- and two-component states depends sensitively on $\Delta_{\rm SAS}$, i.e. the gap between the symmetric and antisymmetric subbands. One uncritically uses its value obtained at zero magnetic fields. However, this gap is typically very large compared to the Coulomb energy differences between the competing states, and even a few percent change in $\Delta_{\rm SAS}$ can substantially shift the phase boundaries.
The VMC calculation also does not incorporate the effect of Landau level mixing (LLM) directly. We have further investigated the role of LLM within the VMC method through a two-dimensional (2D) fixed-phase diffusion Monte Carlo (DMC) method developed by Ortiz, Ceperley and Martin ~\cite{Ortiz93, Melik-Alaverdian97}, which itself is a generalization of the standard DMC method~\cite{Foulkes01} to find ground states in the presence of broken time-reversal symmetry. In this method, we allow for LLM for electrons interacting with the effective interaction derived from LDA at zero magnetic field. We refer to this as ``2D-DMC." We find that, at this level of approximation, the phase diagram is substantially altered and neither the (3,3,1) nor the Pfaffian state is stabilized for a significant range of parameters (see Fig.~\ref{2D_DMC_BOUNDARY_ALL}). However, a conceptual difficulty with this method is an uncontrolled double-counting, because mixing with higher bands has already been incorporated through the modification of the transverse wave function, which, in a sense, is akin to LLM at a finite magnetic field. (At finite magnetic fields, it is LLM that leads to a modification of the form of the transverse wave function.) This study nonetheless shows the importance of LLM, indicating that the results from neither VMC nor 2D-DMC are fully reliable.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3dDMC_Phase_boundary_all_strict2L.pdf}
\caption{The phase diagram of states determined by 3D-DMC as a function of the quantum well width $W$ and the carrier density. Here both finite width and Landau level mixing are included in a DMC calculation directly in the presence of a magnetic field. The red region is the single-component CFFS state and the yellow region marks the $1/4+1/4$ CFFS state. In the purple region, the energies of the single-component CFFS and the single-component Pfaffian states are equal within numerical uncertainty. The uncertainty of the transition density from one-component state to the $1/4+1/4$ CFFS at each width is approximately $5\times 10^{10}\text{cm}^{-2}$. The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. }
\label{3D_PHASE_2}
\end{figure}
The primary motivation of our work is to develop a technique that circumvents some of the above issues and treats finite width and LLM effects directly at a large magnetic field. Specifically, we use a three-dimensional (3D) version of the fixed phase DMC method, referred to below as ``3D-DMC," or simply as ``DMC." The most important advantage of the 3D-DMC method is that it directly gives the ground state energy (as well as the form of the transverse wave function) at a high magnetic field, automatically including the effects of finite width and LLM. No reference is made to zero magnetic field in our calculation. Of course, this method also makes an approximation, namely the choice of fixed phase, and all of our conclusions are subject to the validity of our choice of the phase. (We use the accurate lowest Landau level wave functions to fix the phase, which has been found to give good agreement with experiments in the past~\cite{Zhang16, Zhao18, Ma20}.) There are other practical difficulties with our method. One is that the required computation time does not allow treatment of very large systems; we have studied systems with up to about 25 particles. The second is that it does not allow treatment of two-component states with non-zero $\Delta_{\rm SAS}$. For two-component states, we assume that $\Delta_{\rm SAS}=0$, i.e. the wave function strictly vanishes at the center. This should be a decent approximation for sufficiently large widths and densities where $\Delta_{\rm SAS}$ is small.
The phase diagram obtained from 3D-DMC calculations is shown in Fig.~\ref{3D_PHASE_2}. The light purple region shows the part of the phase diagram where the energies of the one-component CFFS and the one-component Pfaffian states are so close that we cannot distinguish between them within numerical uncertainty [although both of these energies are lower than the energy of the two-component $(3,3,1)$ state]. Given that experiments show an incompressible state here, we believe that the one-component Pfaffian state is the most likely possibility. Nonetheless, in light of the approximations made in the calculation, a definitive confirmation can come only from experiments, and we hope that our study will motivate further experimental studies of this state.
We have also studied several other candidate wave functions at $\nu=1/2$ but found them not to be relevant for the issue at hand.
Additionally, our 3D-DMC study yields the form of the transverse wave function directly in the presence of a high perpendicular magnetic field. Here, the double layer nature of the ground state for large widths or densities arises due to LLM. We find that, surprisingly, the form of the transverse wave function of the lowest symmetric band is not particularly sensitive to the nature of the 2D state; we find very similar forms for $\nu=1$, $1/3$, and $1/5$, as discussed later. Furthermore, also surprisingly, we find that the transverse wave function obtained from our 3D-DMC is also close to that obtained from LDA at zero magnetic field. Nonetheless, our phase diagram with 3D-DMC method is very different from that obtained from VMC.
A recent work~\cite{Zhu16} has concluded that switching on tunneling in a bilayer favors the Pfaffian state. The model for quantum well considered in Ref.~\cite{Zhu16} is different from ours.
The plan of our paper is as follows. In Sec.\,\ref{sec_cf_states}, we briefly review the fundamentals of the FQHE on a torus and give explicit forms of wave functions that are involved in our calculation. In Sec.\,\ref{sec_vmc} we report our VMC studies on the topic. We next introduce the general principles of the DMC method in Sec.\,\ref{sec_dmc_basics}. After that, we present our 2D-DMC and 3D-DMC investigations individually. We discuss our results in the end and more technical details can be found in the appendices.
All calculations are performed in the torus geometry, except those presented in Appendix~\ref{VMC_SPHERE_SEC}. Throughout this work, we assume parameters appropriate for GaAs, with dielectric constant $\epsilon=12.6$ and band mass $m=0.067 m_e$, where $m_e$ is the electron mass in vacuum. The magnetic length is denoted $l_B=\sqrt{\hbar c/eB}$ where $B$ is the magnetic field.
\section{Relevant states at half filling}\label{sec_cf_states}
We shall include in our study several different states at filling factor $\nu=1/2$, which we now list. We primarily use the torus geometry for our study, because the CFFS can be constructed on a torus with explicit wave vector configuration. (On the sphere one must approach the CFFS by taking the limit $n\to \infty$ for Jain states at $\nu=\frac{n}{2n+1}$~\cite{Rezayi94,Balram15b,Balram17}, which requires going to very large systems that are not accessible to DMC.) We also give the VMC results in the spherical geometry in Appendix~\ref{VMC_SPHERE_SEC} for comparison.
We start this section by reviewing some basics of FQHE on a torus\cite{Pu17, Bernevig12, Haldane85b, Haldane85, Greiter16}.
\subsection{Basics of FQHE on a torus}
We start by formulating the single-particle orbitals and we use them to construct the many-body wave functions. We map a torus to a parallelogram with quasi-periodic boundary conditions in the complex plane. The two edges of the parallelogram are given by $L$ and $L\tau$ in the complex plane, where $\tau$ is a complex number representing the modular parameter of the torus. We will take $L$ to be real. (We also use the symbol $\uptau$, with a different font, for the imaginary time in the introduction of the DMC algorithm; this should not cause any confusion, given that the two appear in very different contexts.) The location $\vec{z}=(x,y)$ of a particle in the complex plane is represented by the complex number $z=x+i y$. Later when we include the transverse dimension, the displacement vector in 3D space is labeled by $\vec{r}=(x, y, w)$. To make the quasi-periodic boundary conditions in $L$ and $L\tau$ directions compatible, the number of flux quanta through the torus, $N_\phi=BL^2{\rm Im}[\tau]/\phi_0$, must be an integer, where $\phi_0=hc/e$ is a single flux quantum. We will work with the symmetric gauge $\vec{A}=(B/2)(y, -x, 0)$, which corresponds to a uniform magnetic field $\vec{B}=-B\hat{z}$ perpendicular to the surface of the torus. For simplicity, we choose a square torus with $\tau=i$. The magnetic translation operator is given by
\begin{align}
t\left(\vec{\xi}\right)=e^{-\frac{i}{2l_B^2} \hat{z}\cdot (\vec{\xi}\times \vec z)}T\left(\vec{\xi} \right)
\label{}
\end{align}
where $T\left(\vec{\xi}\right)$ is the usual translation operator.
The single-particle orbitals are imposed with the quasi-periodic boundary conditions: \begin{equation}
\begin{aligned}
&t\left(L\right) \psi\left(z\right)=e^{i\phi_1}\psi\left(z\right)\\
&t\left(L\tau\right) \psi\left(z\right)=e^{i\phi_\tau}\psi\left(z\right)
\end{aligned}
\label{PBC_single}
\end{equation}
where the phases $\phi_1$ and $\phi_\tau$ are the periodic boundary phases which define the Hilbert space. We have chosen $\phi_1=\phi_\tau=0$ because for our purpose, the calculation of the energy is independent of the choice of these phases.
In general, the single-particle orbitals in the Lowest Landau level (LLL) in symmetric gauge can be written as:\cite{Greiter16, Pu17}
\begin{align}
\psi^{(n)}\left(z\right)&=e^{\frac{z^2-|z|^2}{4 l_B^2}} f^{(n)}(z)\\
\end{align}
where $f\left(z\right)$ satisfies
\begin{equation}
\begin{aligned}
\frac{T\left(L\right)f\left(z\right)}{f\left(z\right)}=\frac{f\left(z+L\right)}{f\left(z\right)}&=1\\
\frac{T\left(L\tau\right)f\left(z\right)}{f\left(z\right)}=\frac{f\left(z+L\tau\right)}{f\left(z\right)}
&=e^{-i \pi N_\phi(2z/L+\tau)}
\end{aligned}
\label{PBC_eigenstate}
\end{equation}
The solutions to Eq. \ref{PBC_eigenstate} are given by\cite{Greiter16}
\begin{equation}
\begin{aligned}
f^{(n)}\left(z\right) &= e^{i k^{(n)} z} \prod_{s=1}^{N_\phi}\theta\left( z/L-w_s^{(n)}|\tau\right)\\
k^{(n)} &=\frac{-\pi N_\phi+2\pi n}{L}\\
w_s^{(n)} &=\frac{1}{2\pi N_\phi}\left[-\pi N_\phi(2-\tau)-2\pi n \tau+\pi+2\pi(s-1)\right]
\end{aligned}
\end{equation}
where $\theta\left( z|\tau\right)$ is the odd Jacobi theta function\cite{Mumford07} (see Appendix~\ref{theta_function_definition} for its definition and properties). Here we have $n=0,1,2,\cdots N_\phi-1$; $w_s^{(n)} L$ give the positions of zeros; and $k^{(n)}$ is a real number labeling the eigenvalues of magnetic translation $t(L/N_\phi)$:
\begin{equation}
\label{T1}
t\left(L/N_\phi\right)\psi^{(k)}(z,\bar{z}) =e^{\i{{2\pi} k\over N_\phi}}\psi^{(k)}(z,\bar{z}).
\end{equation}
Starting from single-particle wave functions, one can construct many-body wave functions that preserve the quasi-periodic boundary conditions. In general, the many-body wave function at filling $p/q$, where $p$ and $q$ are co-primes, has a $q$ fold center-of-mass (CM) degeneracy\cite{Haldane85b}. The Laughlin wave function at $\nu=1/m$ is given by\cite{Haldane85, Greiter16, Haldane85b}
\begin{equation}
\begin{aligned}
\label{Laughlin_wf}
\Psi_{1/m}^{(n)}(\{z_i\})=e^{\sum_i\frac{z_i^2-|z_i|^2 }{4l_B^2}} F_\frac{1}{m}^{(n)}\left( Z\right)\prod_{i<j}\left[\theta\left(\frac{z_i-z_j}{L}|\tau\right)\right]^{m}
\end{aligned}
\end{equation}
where $F_\frac{1}{m}^{(n)}(Z)$ describes the CM part with $Z=\sum_{i=1}^N z_i$:
\begin{equation}
\begin{aligned}
F_\frac{1}{m}^{(n)}(Z)=&e^{i K^{(n)}Z}\prod_{s=1}^{m} \theta\left(Z/L-W_s^{(n)}|\tau\right),\\
K^{(n)}
=&(-\pi N_\phi+2 \pi n)/L\\
W_s^{(n)}
=&\frac{ N_\phi \tau-N_\phi-2n \tau-(m-1)+2(s-1)}{2m}
\end{aligned}
\end{equation}
where $n=0, 1, 2,\dots,m-1$ labels the $m$-fold CM degeneracy\cite{Haldane85,Greiter16}. In the special case $m=1$, Eq.~\ref{Laughlin_wf} gives the wave function $\Psi_1$ for filled LLL. For the filled LLL wave function, we drop the superscript $n$ for $F_1(Z)$, since $n$ can take only one value $n=0$.
The Jain state at $\nu=\frac{s}{2ps+1}$ is constructed as
\begin{equation}
\begin{aligned}
\Psi_{s\over 2ps+1}=\mathcal{P}_\text{LLL}\Psi_s\Psi_1^2
\end{aligned} \label{Jainwf}
\end{equation}
where $\Psi_s$ stands for the wave function of electrons filling the lowest $s$ LLs, $\Psi_1^2$ attaches $2p$ vortices to each electron to composite-fermionize it, and $\mathcal{P}_\text{LLL}$ projects the wave function into the LLL. This form is valid for both the spherical and the torus geometries. On torus, the wave function in Eq.~\ref{Jainwf} does not have a well defined CM momentum, but $2ps+1$ degenerate CM eigenstates can be constructed as discussed by Pu {\it et al.}\cite{Pu17} Ref.~\onlinecite{Pu17} also shows how LLL projection can be conveniently accomplished for the Jain states in the torus geometry.
\subsection{One-component CFFS state}
An important state involved for our purposes is the one-component CFFS. As mentioned above, this state thrives in narrow quantum wells.
The construction of the CFFS wave function at $\nu=1/2p$ in torus geometry is accomplished by attaching 2p flux quanta to an electron fermi sea state and projecting it into the LLL \cite{Rezayi94,Shao15,Geraedts18,Wang19,Pu18}:
\begin{equation}
\Psi_\text{CFFS, 1/2p}\left(\left\{ z_i\right\}\right)=\mathcal{P}_\text{LLL} \Psi_{FS} \Psi_1^{2p}
\end{equation}
where $\Psi_\text{FS}={\rm det}[e^{i\vec{k}_n\cdot\vec{r}_i}]$ stands for fermi sea wave function. It can be projected into the LLL to produce
\begin{equation}
\begin{aligned}
& \Psi_\text{CFFS, 1/2p}\left(\left\{ z_i\right\}\right)
=e^{\frac{\sum_i z_i^2-|z_i|^2}{4l_B^2}} F_1\left( Z+i\ell_B^2K\right)^{2p} \\
&\times \det{\left[G_{k_n}\left(z_m\right)\right]} \left[\prod_{i<j}\theta\left( \frac{z_i-z_j}{L}|i\right)\right]^{2p-2}
\end{aligned}
\label{CFFS WF}
\end{equation}
where
\begin{equation}
\begin{aligned}
G_{k_n}\left(z_m\right)&=e^{-\frac{k_n l_B^2}{4}(k_n+2\bar{k}_n)}e^{\frac{i}{2}(\bar{k}_n+k_n)z_m}\cdot\\ &\cdot\prod_{j, j\neq m}\theta\left(\frac{z_m+2pik_n l_B^2-z_j}{L}|i\right).
\end{aligned}
\end{equation}
Here $k_n$ stand for the magnetic momenta occupied by the CFFS, with the CM momentum given by $K=\sum_n k_n$. The empirical rule is that the configuration of $k_n$'s that produces the ground state is as compact as possible, i.e. minimizes $\sum_n\left(k_n-K/N\right)^2$. More details can be found in References \onlinecite{Rezayi94,Pu18,Fremling18,Pu20b}.
\subsection{Pfaffian state}
Three distinct Pfaffian wave functions on the torus are given by \cite{Greiter91,Greiter92a}
\begin{equation}
\label{Pfaffian_wfn}
\begin{aligned}
&\Psi_\text{Pf, 1/2}\left( \left\{ z_i\right\}\right)\\
=&Pf\left( M_{ij} \right)F_1^2\left(Z\right) \prod_{i<j}\theta^2\left(\frac{z_i-z_j}{L}|i\right) e^{\frac{\sum_i z_i^2-|z_i|^2}{4l_B^2}}.
\end{aligned}
\end{equation}
Here $Pf\left(M_{ij}\right)$ is the Pfaffian of the matrix $M_{ij}=\frac{\theta_a\left(\frac{z_i-z_j}{L}|i\right)}{\theta_1\left(\frac{z_i-z_j}{L}|i\right)}$, and the choices $a=2, 3, 4$ produce three distinct Pfaffian wave functions.
The definition of $\theta_a\left(z|\tau\right)$ can be found in Appendix\,\ref{theta_function_definition}. These three states are degenerate for a three-body Hamiltonian for which the Pfaffian state is exact and are believed to become degenerate for Coulomb interaction in the thermodynamic limit \cite{Peterson08}. Our calculations also show that the energy difference between them is negligible because: (1) for VMC calculation the difference is much smaller than the difference between the Pfaffian state and the CFFS; and (2) for DMC calculation the energy differences are smaller than the statistical uncertainty. (See Appendix\,\ref{PF_DEGENERACY}) Due to these reasons and the limit of our computational resources, we choose $a=2$ below.
\subsection{Uncoupled $1/4+1/4$ two-component CFFS state}
In the limit of very wide quantum wells, we expect the system to form two uncoupled $1/4$ CFFSs, which is referred to as $1/4+1/4$ CFFS. The wave function of this two-component state is the product of the two $1/4$ CFFSs defined in Eq.\,\ref{CFFS WF}:
\begin{equation}
\Psi_\text{CFFS, 1/4+1/4}=
\Psi_\text{CFFS, 1/4}\left(\left\{ z_i\right\}\right) \Psi_\text{CFFS, 1/4}\left(\left\{ z_{[j]}\right\}\right)
\end{equation}
where $i=1, 2, \dots, N_e/2$ denote the electrons belonging to the first layer and $[j]\equiv N_e/2+j=N_e/2+1, N_e/2+2,\dots,N_e$ denote the electrons belonging to the second layer.
\subsection{The pseudo-spin singlet CFFS states}
We also consider the pseudo-spin singlet CFFS states, which is compressible and it is constructed by attaching flux quanta to the pseudo-spin-singlet fermi sea wave function. Here the term "pseudo-spin" refers to the layer index. The pseudo-spin singlet CFFS state has interlayer correlations, in contrast to the $1/4+1/4$ CFFS state. One can write its wave function by simply replacing in Eq.\,\ref{CFFS WF} the determinant in the wave function of the pseudo-spin polarized 1/2 CFFS by the product of determinants of the two pseudo-spins \cite{Hossain20a}:
\begin{equation}
\begin{aligned}
\det \left[G_{k_n}\left( z_m \right)\right] \to \det \left[G_{k_n}\left( z_i \right)\right] \det \left[ G_{k_l}\left( z_{[j]} \right)\right]
\end{aligned}
\end{equation}
where $i=1, 2, \dots, N_e/2$ and $[j]=N_e/2+1, N_e/2+2,\dots,N_e$ denote the electrons belonging to two pseudo-spin components. The Jastrow factor remains the same as in Eq.\,\ref{CFFS WF} which includes both intra-layer and inter-layer correlations. To make sure that the state is a singlet, one also needs to make the momentum distribution identical for both pseudo-spins.
\subsection{The Halperin $(3,3,1)$ state}
The Halperin $(3,3,1)$ state reads
\begin{equation}
\begin{aligned}
&\Psi_\text{$(3,3,1)$}\left( \left\{ z_i\right\} \right)=\\
&e^{\frac{\sum_i z_i^2-|z_i|^2}{4l_B^2}} F_{(3,3,1)}\left(Z\right)\prod_{1\leq i<j\leq N_e/2}\theta^3\left( \frac{z_i-z_j}{L}|i\right)\cdot\\
\cdot&\prod_{N_e/2<[i]<[j]\leq N_e}\theta^3\left( \frac{z_{[i]}-z_{[j]}}{L}|i\right) \prod_{\substack{1\leq i\leq N_e/2,\\N_e/2<[j]\leq N_e}}\theta\left( \frac{z_i-z_{[j]}}{L}|i\right).
\end{aligned}
\end{equation}
Here
\begin{equation}
\begin{aligned}
F_{(3,3,1)}(Z)&=F^{(0)}_\frac{1}{2}\left(Z_L\right)F^{(0)}_\frac{1}{2}\left(Z_R\right)F_1\left(Z\right)
\end{aligned}
\end{equation}
where $Z_L=\sum_{i=1}^{N_e/2}z_i$, $Z_R=\sum_{[j]=N_e/2+1}^{N_e}z_{[j]}$, and $Z= Z_L+Z_R$ (here $L$ and $R$ denote the left and right layers).
\section{VMC calculation of the phase diagram}\label{sec_vmc}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./LDA_n_1_5_10_30_w80.pdf}
\caption{The transverse density profile for the lowest (red) and the first excited (blue) subbands in the quantum well of width $W=80$ nm calculated by LDA.}
\label{LDA_band}
\end{figure}
We shall model the confinement potential as a quantum well with the infinite depth and a width of $W$. In some circumstances the finite depth is also considered, but in general this does not cause any significant difference because the GaAs quantum wells we discuss in this article (and also those in experiments) are generally very deep. The problem is modeled via a VMC calculation which includes an effective two-dimensional interaction, defined as follows:
\begin{equation}
V_{\text{eff}}\left( \vec{r}\right)=\frac{e^2}{\epsilon}\int dw_1\int dw_2 \frac{|\psi(w_1)|^2|\psi(w_2)|^2}{\sqrt{|\vec{r}|^2+(w_1-w_2)^2}}.
\label{V_eff}
\end{equation}
Here $\psi(w)$ is the transverse wave function, $w$ is the transverse coordinate, and $\vec{r}$ is a two-dimensional vector. In the simplest approximation, the subband wave functions are taken as the single-particle solutions of a quantum well problem $\psi_S(w)=\sqrt{\frac{2}{W}}\cos\left(\frac{\pi w}{W}\right)$ and $\psi_A(w)=\sqrt{\frac{2}{W}}\sin\left(\frac{2\pi w}{W}\right)$, where $S$ and $A$ refer to symmetric and antisymmetric. In this approximation the subband gap is $\Delta_{\rm SAS}=\frac{3\pi^2}{2}\frac{\hbar^2}{mW^2}$, where $m$ is the band mass of the electron. A better approximation for $\psi(w)$ is obtained by LDA at zero magnetic field, where one assumes a Fermi liquid state in the 2D plane~\cite{Park99b,Faugno19}. In this and the next section that introduces the 2D fixed-phase DMC, we use the LDA form for $\psi(w)$. We denote the lowest two subbands as $\psi_S$ and $\psi_A$, in which $S$ represents the symmetric subband and $A$ represents the anti-symmetric subband. The typical LDA density profiles of the lowest two subbands are shown in Fig.~\ref{LDA_band}. Before going further, let us discuss how the occupation of the subbands changes as one tunes the subband gap. When $\Delta_{\rm SAS}$ is much larger than the Fermi energy, as is the case for either very small $W$ or small densities, only the lowest subband is occupied. In the limit when the lowest two bands are approximately degenerate ($\Delta_{\rm SAS}\approx 0$), which happens at large $W$ or at large densities, two-component states are possible, where the two components are linear combinations of the two subbands. Because the system tends to form two layers at large widths, we choose the left-right bases as (Fig.~\ref{LDA_LR_COMPONENT}):
\begin{equation}
\begin{aligned}
\psi_L=\frac{1}{\sqrt{2}}(\psi_S +\psi_A)\\
\psi_R=\frac{1}{\sqrt{2}}(\psi_S -\psi_A)\\
\end{aligned}
\label{vmc_bases}
\end{equation}
More generally, we can choose $\psi_\theta=\frac{1}{\sqrt{2}}(\psi_S +e^{i \theta}\psi_A)$ and $\psi'_\theta=\frac{1}{\sqrt{2}}(\psi_S -e^{-i \theta}\psi_A)$. However, because the systems becomes a bilayer for sufficiently wide quantum wells or large densities, we expect that $\theta=0$ will produce the lowest energy.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./LDA_n_1_5_10_30_w80_LR.pdf}
\caption{The density profiles of the left (blue) and right (red) bases in the quantum well of $W=80$ nm calculated by LDA.}
\label{LDA_LR_COMPONENT}
\end{figure}
Similarly to Eq.\,\ref{V_eff}, we define the effective interactions as follows. For one-component states, only the lowest symmetric subband is used for defining the effective interactions whereas for two-component states, both intra-component interaction and inter-component interaction are needed:
\begin{equation}
\begin{aligned}
V_{\text{SS}}\left( \vec r\right)=\frac{e^2}{\epsilon}\int dw_1\int dw_2 \frac{\rho_\text{S}(w_1)\rho_\text{S}(w_2)}{\sqrt{|\vec{r}|^2+(w_1-w_2)^2}}\\
V_{\text{LL}}\left( \vec r\right)=\frac{e^2}{\epsilon}\int dw_1\int dw_2 \frac{\rho_\text{L}(w_1)\rho_\text{L}(w_2)}{\sqrt{|\vec{r}|^2+(w_1-w_2)^2}}\\
V_{\text{RR}}\left( \vec r\right)=\frac{e^2}{\epsilon}\int dw_1\int dw_2 \frac{\rho_\text{R}(w_1)\rho_\text{R}(w_2)}{\sqrt{|\vec{r}|^2+(w_1-w_2)^2}}\\
V_{\text{LR}}\left( \vec r\right)=\frac{e^2}{\epsilon}\int dw_1\int dw_2 \frac{\rho_\text{L}(w_1)\rho_\text{R}(w_2)}{\sqrt{|\vec{r}|^2+(w_1-w_2)^2}}\\
\end{aligned}
\label{V_eff_explicit}
\end{equation}
The densities are defined as $\rho_\text{S}=\left|\psi_S\right|^2$ and $\rho_\text{L,R}=\left|\psi_\text{L,R}\right|^2$.
We mention here two caveats. First of all, we consider states for which either only the lowest subband is occupied, or the two lowest subbands are equally occupied. All of our trial wave functions, namely the single-component CFFS, the single component-Pfaffian, the pseudo-spin singlet CFFS, the uncoupled $1/4+1/4$ CFFS, and the $(3,3,1)$ satisfy this requirement. In principle, we can also consider a partially polarized CFFS, which will have an unequal occupation of two subbands, but we have not done so (because it significantly enhances the calculational difficulty). All other states considered here cannot be partially polarized. Second, the value of $\Delta_{\rm SAS}$ is relevant for transitions from a single-component to a two-component state. $\Delta_{\rm SAS}$ is typically very large compared to the Coulomb energy differences between the relevant states. We determine the value of $\Delta_{\rm SAS}$ from the LDA calculation (Fig.~\ref{Delta_SAS}).
The energies of one-component states relative to the CFFS are shown in Fig.~\ref{VMC_E_1}.
As one can see, the CFFS remains the lowest energy state for all parameters, although the Pfaffian comes as close as $0.001 \frac{e^2}{\epsilon l_B}$ at densities greater than $2\times 10^{11} \text{cm}^{-2}$. This conclusion is also supported by exact diagonalization studies of finite systems in the spherical geometry. In Appendix~\ref{ED_Ajit}, we show the overlap between the exact ground state of the LDA interaction with the one-component CFFS and the Pfaffian states, and find that in the entire region of parameter space that we considered, the one-component CFFS always has a very high overlap with the exact ground state and thus is superior to the Pfaffian.
[Note: We have also performed the energy comparison in the spherical geometry, where we see a different result, namely that the Pfaffian state has lower energy in the thermodynamic limit for some parts of the phase diagram. We believe that the torus results are more reliable because the thermodynamic extrapolation on the sphere is less accurate for finite widths. See Appendix\,\ref{VMC_SPHERE_SEC} for further discussion.]
The energies of the two-component states, namely the Pseudo-spin singlet CFFS, $(3,3,1)$ and the uncoupled $1/4+1/4$ CFFS, relative to the $(3,3,1)$ are shown in Fig.~\ref{VMC_E_2}.
A transition from the singlet CFFS to the Halperin $(3,3,1)$ occurs at very low densities, followed by a second transition into the uncoupled $1/4+1/4$ CFFS (Fig.~\ref{VMC_E_2}). This behavior is similar to that found in earlier VMC calculations on the zero-width bilayer systems\cite{Scarola01b}. The phase diagrams for one and two-component states separately are shown in Fig.~\ref{VMC_PHASE_DIAGRAM_1}.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./LDA_Subband_Ec.pdf}
\caption{Subband gap $\Delta_{\rm SAS}$ calculated in LDA for various quantum well widths as a function of the density.}
\label{Delta_SAS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_VMC_FW_onelayers.pdf}
\caption{The VMC calculation of the energy difference per particle between the Pfaffian and the one-component CFFS state in the thermodynamic limit. The well widths are shown on the plots.}
\label{VMC_E_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_VMC_FW_bilayers.pdf}
\caption{The VMC calculation of the energy per particle of the $1/4+1/4$ CFFS state and the singlet CFFS state relative to the $(3,3,1)$ state in the thermodynamic limit. The well widths are shown on the plots. The statistical errors are smaller than the symbol sizes.}
\label{VMC_E_2}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_VMC_FW_all_states.pdf}
\caption{The VMC calculation of the energy per particle of the one-component CFFS state, the Pfaffian state, the $1/4+1/4$ CFFS state, and the singlet CFFS state in the thermodynamic limit. All energies are measured relative to the energy of the $(3, 3, 1)$ state. The well widths are shown on the plots. The energies of the one-component states change rapidly relative to the $(3, 3, 1)$ state due to the $\Delta_{\rm SAS}$ component. The statistical errors are smaller than the symbol sizes.}
\label{VMC_E_3}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./torus_VMC_Phase_boundary_1L.pdf}
\includegraphics[width=\columnwidth]{./torus_VMC_Phase_boundary_2L.pdf}
\caption{(a) The phase diagram of one component states, including CFFS (red) and the Pfaffian state (purple). The Pfaffian state is not stabilized for the parameters considered. (b) The phase diagram of two component states, including the $(3, 3, 1)$ state (green), pseudo-spin singlet (blue), and $1/4+1/4$ CFFS state (yellow). The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. At each width, the uncertainty of the transition density is about $1\times 10^{10} \text{cm}^{-2}$. The overall phase boundary is obtained by smoothly joining the transition points at $W=50,60,70,80$ nm.}
\label{VMC_PHASE_DIAGRAM_1}
\end{figure}
Fig.~\ref{VMC_E_3} shows the energies of all states. We add $\frac{1}{2} \Delta_\text{SAS}$ to the energy per particle for each two-component state, because half of all particles occupy the second subband. We quote all energies relative to the $(3,3,1)$ state in Fig.~\ref{VMC_E_3}, which yields the phase diagrams in Fig.~\ref{VMC_PHASE_DIAGRAM_2}. In general, one can see the ground state of the system is in a one-component state when the carrier density is low and the system makes a transition into a two-component state at high density. This result is qualitatively consistent with the experiments~\cite{Shabani09b, Suen92, Luhman08}, and favors the possibility that the observed incompressible state is the $(3,3,1)$ state.
There are significant differences, however. As noted earlier, the lower phase boundary is very sensitive to $\Delta_{\rm SAS}$. However, the upper theoretical phase boundary ought to be more reliable, and its deviation from the experimental phase boundary is thus significant. We note, however, that the calculation, so far, does not include LLM or disorder.
\section{Fixed-phase Diffusion Monte Carlo Method}\label{sec_dmc_basics}
In the following sections, we will use the fixed-phase DMC method to evaluate the phase diagram. The general DMC is a standard Monte Carlo method designed to obtain the ground state of the many-body Schr\"{o}dinger equation\cite{Reynolds82, Foulkes01} by a stochastic method.
By setting time to an imaginary variable $(t\to t=-i \uptau)$, the Schr\"odinger equation takes the form
\begin{equation}
\begin{aligned}
-\hbar \partial_\uptau \Psi\left(\vec{R}, \uptau\right)= \left( H\left(\vec{R}\right)-E_T\right)\Psi\left(\vec{R}, \uptau\right)
\end{aligned}
\end{equation}
where $\vec{R}=\left( \vec{r}_1, \vec{r}_2, \dots, \vec{r}_{N_e}\right)$ is the collective coordinate of the system and $E_T$ is a constant energy offset. When $\Psi\left(\vec{R}, \uptau\right)$ is real and non-negative, one can interpret the above equation as a diffusion equation, with $\Psi\left(\vec{R}, \uptau\right)$ interpreted as the density distribution of randomly moving walkers. The energy offset $E_T$
controls the population of random walkers. Starting from an initial trial wave function $\Psi_T$, as the walkers diffuse stochastically, the distribution gradually converges to a stable distribution that represents the ground state (provided $\Psi_T$ has a non-zero overlap with the ground state).
More details can be found in Ref.~[\onlinecite{Foulkes01},\onlinecite{Mitas98}] .
The applicability of the DMC method relies on the assumption that the ground state is real and non-negative. However, this condition is not satisfied in a system with broken time-reversal symmetry, which is the case in the presence of a magnetic field. To overcome this difficulty, the fixed-phase DMC method has been proposed\cite{Ortiz93, Melik-Alaverdian97}. The key idea is to write the wave function as
\begin{equation}
\Psi(\vec{R})= \left| \Psi(\vec{R}) \right | \exp \left[ i \phi(\vec{R})\right]
\label{amp_phase}
\end{equation}
and determine the $\left| \Psi(\vec{R})\right |$ that gives the lowest energy for a fixed phase $\phi(\vec{R})$ by DMC method. This amounts to solving the Schr\"odinger equation
\begin{equation}
\begin{aligned}
&H_\text{DMC} \left| \Psi\left(\vec{R}, \uptau\right)\right|=\\
& \left( -\sum_{i=1}^N \frac{\hbar^2\nabla_i^2}{2m} +V_{\text{DMC}}\left(\vec{R}\right)-E_T\right) \left|\Psi\left(\vec{R}, \uptau\right)\right|=E \left|\Psi\left(\vec{R}, \uptau\right)\right|\\
\end{aligned}
\label{eqn_amp}
\end{equation}
with
\begin{equation}
V_{\text{DMC}}\left( \vec{R} \right)=V\left( \vec{R} \right)+\frac{1}{2 m}\sum_{i=1}^N
\left[ \hbar \nabla_i \phi \left(\vec{R}\right)+\frac{e}{c} {\mathbf A}\left({\mathbf r }_i\right) \right]^2 \label{Vdmc}.
\end{equation}
The diffusion equation is often efficiently solved by an importance sampling method. The so-called guiding function is defined as
\begin{equation}
f\left( \vec{R}, \uptau \right)=\left|\Psi_T \left( \vec{R}\right) \right| \left|\Psi \left( \vec{R}, \uptau\right) \right|
\end{equation}
where $\Psi_T$ is the trial wave function.
Instead of solving Eq.\,\ref{eqn_amp}, we have an equivalent equation:
\begin{equation}
\begin{aligned}
-\hbar\partial_{\uptau}f({\bf R},\uptau)
&=-\frac{\hbar^2}{2m}\nabla^2 f({\bf R},\uptau)+\frac{\hbar^2}{m}\nabla \cdot \left({\bf v}_D f({\bf R},\uptau)\right)\\
&+\left(E_L({\bf R})-E_T\right) f({\bf R},\uptau)
\end{aligned}
\label{equiv_schrodingerr}
\end{equation}
where $\nabla=\left(\nabla_1, \nabla_2, \dots, \nabla_N\right)$ is the dN-dimensional (in d space dimensions) gradient operator, $\vec{v}_D\left(\vec{R}\right)$ is the dN-dimensional drift velocity defined by
\begin{equation}
\vec{v}_D\left(\vec{R}\right)=\nabla \ln \left|\Psi_T \left(\vec{R}\right)\right|,
\end{equation}
and
\begin{equation}
E_L\left(\vec{R}\right)=|\Psi_T|^{-1}H_\text{DMC} |\Psi_T|
\end{equation}
is the local energy. We give their explicit forms in Appendix~\ref{DMC_algorithm} based on Ref.~[\onlinecite{Ortiz93}].
The accuracy of the DMC energy depends on the choice of the phase $\phi(\vec{R})$. In this paper, our initial DMC trial wave functions will be our candidate trial wave functions described earlier. (In the case of 3D-DMC, these will also include the transverse wave function.)
Each trial wave function identifies a specific phase $\phi_T$. The DMC algorithm then produces the lowest energy state for each choice of the trial wave function.
We stress that the DMC calculation automatically includes LLM. In fact, it is a non-perturbative method for treating LLM, which has been shown in past studies to give rather accurate results\cite{Guclu05a,Ortiz93,Melik-Alaverdian95, Bolton96, Melik-Alaverdian97, Melik-Alaverdian99, Zhao18, Zhang16, Hossain20}.
\section{2D fixed-phase DMC study with effective interaction}\label{sec_2d_dmc}
We implement a 2D fixed-phase DMC study of the problem where we obtain the lowest energy using DMC while setting $V(\vec{R})$ in Eq.~\ref{Vdmc} to $V_{\rm eff}(\vec{R})$ introduced in Eq.~\ref{V_eff}. This allows for LLM in a model where electrons confined to 2D are interacting via $V_{\rm eff}(\vec{R})$. As noted above, the phase is fixed by the trial wave functions described above.
As shown in Fig.~\ref{DMC_2d_1}, the comparison between the one-component CFFS and the Pfaffian state is very similar to that from VMC calculation and no transition occurs into the Pfaffian state (Fig.~\ref{2D_DMC_BOUNDARY}(a)). Meanwhile, the result for two-component states is quite different from the VMC result (Fig.~\ref{DMC_2d_2}). We find that the uncoupled $1/4+1/4$ CFFS is very efficient in lowering its energy in the presence of the LLM. In contrast to the VMC result, the system makes a transition from the pseudo-spin singlet CFFS directly into the $1/4+1/4$ CFFS state for most parameters (Fig.~\ref{2D_DMC_BOUNDARY}(b)). For very large widths and low densities, we find a small region of $(3,3,1)$ state.
When both one-component and two-component states are considered, the resulting phase diagram is shown in Fig.~\ref{2D_DMC_BOUNDARY_ALL}. The one-component CFFS makes a transition into the uncoupled two-component $1/4+1/4$ without going through an incompressible state, except in a small region where the well-width is large. We note here that the extrapolation of the 2D-DMC is less linear for the two-component states, which leads to a larger statistical error of about $2\times 10^{10} \text{cm}^{-2}$ for the density where the phase transition occurs.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_2d_DMC_FW_onelayers.pdf}
\caption{2D-DMC calculation of the energy difference per particle between the one-component CFFS and the Pfaffian state in the thermodynamic limit.}
\label{DMC_2d_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_2d_DMC_FW_bilayers.pdf}
\caption{2D-DMC calculation of the energy per particle of the $1/4+1/4$ CFFS and the singlet CFFS state in the thermodynamic limit relative to the $(3,3,1)$ state. The well widths are indicated on the plots.}
\label{DMC_2d_2}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Torus_2d_DMC_FW_all.pdf}
\caption{2D-DMC calculation of the energy per particle of the one-component CFFS state, the Pfaffian state, the $1/4+1/4$ CFFS and the singlet CFFS state relative to the $(3, 3, 1)$ state in the thermodynamic limit. The well widths are labeled on the plots. The energies include contribution from $\Delta_{\rm SAS}$.}
\label{DMC_2d_3}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2dDMC_Phase_boundary_1L.pdf}
\includegraphics[width=\columnwidth]{./2dDMC_Phase_boundary_2L.pdf}
\caption{(a) The phase diagrams of one component states obtained by 2D-DMC on torus. (b) The phase diagrams of two component states. The states considered are the CFFS state (red), the $(3, 3, 1)$ state (green), the singlet CFFS state (blue), the $1/4+1/4$ CFFS state (yellow), and the Pfaffian state (purple). In the lower panel, the uncertainty of the transition density from the singlet CFFS state to $(3,3,1)$ is about $1\times 10^{10} \text{cm}^{-2}$, while the transition density from $(3, 3, 1)$ or singlet CFFS to $1/4+1/4$ CFFS has an uncertainty of about $2\times 10^{10} \text{cm}^{-2}$.The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. The overall phase boundary is obtained by smoothly joining the transition points at $W=50,60,70,80$ nm.}
\label{2D_DMC_BOUNDARY}
\end{figure}
\section{3D fixed-phase DMC study of the $1/2$ FQHE}\label{sec_dmc_3d}
The transverse trial wave function for the one-component states are chosen to be:
\begin{equation}
\begin{aligned}
\Psi_\text{trans}\left( \left\{ w_i\right\}\right)&=\prod_{i=1}^{N_e}\psi_S(w_i)\\
&=\prod_{i=1}^{N_e} \left[\cos\left( \frac{\pi w_i}{W} \right)-\alpha \cos\left( \frac{3 \pi w_i}{W} \right)\right],
\end{aligned}
\label{trans_wf_sing}
\end{equation}
where $W$ is the width of the quantum well and $\alpha$ is a parameter introduced to improve the converging speed. Empirically we find the program to be most efficient and stable when $\alpha$ is tuned from $0.2$ to $0.8$ when the well width ranges from $2 l_B$ to $10 l_B$. However, one should keep in mind that the choice of $\alpha$ is a technical matter; as long as the number of iterations is large enough, any choice of $\alpha$ leads to the same result because the fixed-phase DMC solves for the lowest energy state within a given phase sector independent of the initial wave function.
For two-component states, before coming to 3D-DMC,
it is necessary to address a significant difficulty. In general, one needs to evaluate the energy expectation of a given two-component state [e.g. Halperin $(3,3,1)$ state] by fully anti-symmetrizing the wave function. For two-component states in a single layer with real spin, the Coulomb interaction does not depend on the spin index, and all the cross-terms produced by anti-symmetrization vanish, and one can treat the two components as two sets of distinguishable particles, which greatly simplifies the calculation. This, however, is not true for the present case since the Coulomb interaction explicitly depends on the transverse coordinates and the cross-terms are nonzero. Here, one must include all the permutation-terms to fully anti-symmetrize the wave function. This is impractical for systems with greater than 10 particles because there are $\frac{N_e!}{(N_e/2)! (N_e/2)!}$ inter-component permutations. A special case is when the two transverse bases have no overlap. In this case, all cross-terms vanish and one can calculate the energy expectation as if the two components were two distinguishable sets of particles. We, therefore, use a transverse trial wave function for the two components to be strictly spatially separated, i.e. one basis function is strictly confined in the left half of the quantum well while the other component in the other half. In other words, our basis is given by:
\begin{equation}
\Psi_\text{trans} \left( \left\{ w_i, w_{[j]}\right\}\right)=\prod_{i=1}^{N_e/2} \prod_{\left[j\right]=N_e/2+1}^{N_e} \psi_L\left(w_i \right) \psi_R \left( w_{[j]}\right)
\label{trans_wf_left}
\end{equation}
where
\begin{equation}
\begin{aligned}
\psi_L\left( w_i \right)&=\left\{\begin{array}{ll}
-\sin(\frac{2\pi w_i}{W}), & \text{if } -W/2<w_i<0\\
0, & \text{if } 0\leqslant w_i<W/2
\end{array}
\right.\\
\psi_R\left( w_{[j]} \right)&=\left\{\begin{array}{ll}
0, & \text{if } -W/2<w_{[j]}<0\\
\sin(\frac{2\pi w_{[j]}}{W}), & \text{if } 0\leqslant w_{[j]}< W/2
\end{array}
\right.
\end{aligned}
\end{equation}
represents the left- and right-components.
The 3D trial wave function for the $(3,3,1)$ state is constructed as:
\begin{equation}
\Psi_{(3,3,1)}^{\text{3D}}\left({\mathbf{R}}\right) =\Psi_{(3,3,1)}\left( \left\{ z_i, z_{[j]}\right\}\right) \Psi_\text{trans} \left( \left\{ w_i, w_{[j]}\right\}\right),
\end{equation}
The other two-component states are constructed similarly, with the in-plane part replaced by the corresponding wave functions.
In Appendix~\ref{Full_Antisymmtrized} we test the regime of validity of our approximation (that the right and left components are non-overlapping) for a system of four particles, for which we can implement full antisymmetrization. We find that our approximation becomes excellent near the upper phase boundary in Fig.~\ref{3D_PHASE_2}.
\subsection{Transverse density profile evaluated by 3D-DMC}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./LDA_W50_W80.pdf}
\caption{The transverse density calculated by LDA, which assumes a finite depth quantum well with the realistic parameters of the GaAs. The carrier densities are shown in units of $10^{10}\text{cm}^{-2}$. The area under each profile is normalized to unity.}
\label{LDA_density}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_dmc_density_other_W80_Ne16.pdf}
\caption{3D-DMC calculation of transverse densities for different FQHE states. The calculations are performed for $N_e=16$ and quantum well width $W=80 \text{nm}$. The legend shows the carrier densities corresponding to each color, measured in units of $10^{10}\text{cm}^{-2}$}
\label{DMC_density_others}
\end{figure}
It is essential to quantitatively understand how the transverse distribution of electrons evolves as the well-width increases. We first show the transverse density calculated by LDA (Fig.~\ref{LDA_density}). The LDA package \cite{Martin20} is for realistic parameters with finite well-depth, and the transverse density extends outside the well by about $3\sim 4$ nm or less on each side.
This justifies our infinite-depth approximation. (In principle our method can also deal with finite depth quantum well, but technically that makes the form of the transverse trial wave function and the local energy more complicated.) In our approach, we implement the 3D-fixed-phase DMC and explicitly calculate the transverse density profiles of different candidate states.
Let us first consider one component states. Fig.~\ref{FINITE_SIZE_DEN} in Appendix~\ref{transverse density} shows that the transverse density for the CFFS is insensitive to the system size. We have found similar behavior at other filling factors. Hence, we believe that the density profiles shown in our work represent the thermodynamic limit.
We have also studied the transverse profile for several filling factors, e.g. for $\nu=1$, 1/3, 1/5 FQHE states. As shown in Fig.~\ref{DMC_density_others}, we find that the transverse densities are not sensitive to the filling factor.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_dmc_density_W50_1L_Ne16.pdf}
\includegraphics[width=\columnwidth]{./3d_dmc_density_W80_1L_Ne16.pdf}
\caption{Transverse densities for the one-component CFFS and the Pfaffian state calculated by 3D-DMC. The legend shows the carrier density in units of $\mathrm{10^{10} cm^{-2}}$. The results are shown for quantum well widths $W=50$nm (a) and $W=80$ nm WQW (b). At each width, the density profiles of one-component CFFS the Pfaffian state are shown individually in the upper two panels of (a) and (b). The lowest panel shows the differences between the two densities $\rho_\text{CFFS}-\rho_\text{Pfaf}$; the scale on the left corresponds to the lowest plot, and the rest are shifted up by 0.0025 units; also only 1 out of every 5 data points in the calculation are shown for clarity. The system size is $N_e=16$.}
\label{Density_1L}
\end{figure}
In Figure\,\ref{Density_1L} we show the transverse density for the one-component CFFS and Pfaffian states at $\nu=1/2$ in a $\mathrm{50 nm}$ and a $\mathrm{80 nm}$ well width, with the areal density ranging from $\mathrm{n=1\times10^{10} cm^{-2}}$ to $\mathrm{3\times 10^{11} cm^{-2}}$. Other widths we consider in this article are between $\mathrm{50 nm}$ and $\mathrm{80 nm}$ and the profiles of the transverse density are similar (not shown). As one can see, the system becomes more and more two-component-like with increasing carrier density.
If one compares the 3D-DMC results with the LDA results, one can see that the two methods give very similar predictions, although the two ``humps," which indicate the onset of bilayer-like physics, appear at somewhat smaller densities in the LDA results.
We next show in Fig.~\ref{Density_2L} the transverse density profiles for two-component states, assuming that the density vanishes at the center point (for reasons discussed above). The transverse wave function is insensitive to the state in 2D, and, as expected, the system becomes more bilayer-like with increasing carrier density.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_dmc_density_strict2L_Ne8.pdf}
\caption{Transverse density of the left-component obtained by 3D-DMC for two-component states. The right component is analogous. The legend shows the carrier density in units of $\mathrm{10^{10} cm^{-2}}$. The system size is $N_e=8$.}
\label{Density_2L}
\end{figure}
\subsection{Energy calculation and phase diagram by 3D-DMC}
In this section, we show our calculation of the energy expectations of different states considered in this article. We first show in Fig.~\ref{3D_1L_energy} the energy comparison between the one-component CFFS and the Pfaffian state. The energy of the Pfaffian state gets closer and closer to the CFFS as the carrier density increases for each well-width. In fact, their difference becomes so small that it is comparable to the statistical error and we are not able to determine which one is lower. Within the two-component states, the energies are shown in Fig.~\ref{3D_strict2L_energy} and the theoretical phase diagram is shown in Fig.~\ref{3D_PHASE_1} (b). As the density increases, the system first makes a transition from the pseudo-spin singlet CFFS state into the Halperin $(3,3,1)$ state, and finally into the uncoupled $1/4+1/4$ state. This is qualitatively similar to the behavior found in the VMC calculation. The resulting phase diagrams for one- and two-component states (separately) are shown in Fig.~\ref{3D_PHASE_1} (a).
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_DMC_1L_e.pdf}
\caption{The energy difference between the Pfaffian state and the one-component CFFS state in the thermodynamic limit as a function of the carrier density at different well widths calculated by 3D-DMC in the torus geometry.}
\label{3D_1L_energy}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_DMC_2L_strict2_e.pdf}
\caption{The energy of several two-component states relative to the Halperin $(3, 3, 1)$ state in the thermodynamic limit as functions of the carrier density at different well widths calculated by 3D-DMC in the torus geometry.}
\label{3D_strict2L_energy}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3dDMC_Phase_boundary_1L.pdf}
\includegraphics[width=\columnwidth]{./3dDMC_Phase_boundary_2L_strict.pdf}
\caption{
(a) The phase diagrams of one component states obtained by 3D-DMC on the toroidal geometry. Above the dashed boundary, the uncertainty is greater than the energy difference between the one-component CFFS and the Pfaffian state so we suggest it is either the one-component CFFS or the Pfaffian state (purple). (b) The phase diagrams of two-component states. The states considered are the CFFS state (red), the $(3, 3, 1)$ state (green), the singlet CFFS state (blue), and the $1/4+1/4$ CFFS state (yellow).In the lower panel, The uncertainty of the transition density at each width is about $2\times10^{10}\text{cm}^{-2}$. The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. The overall phase boundary is obtained by smoothly joining the transition points at $W=50,60,70,80$ nm.}
\label{3D_PHASE_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3d_DMC_all_e.pdf}
\caption{The energy of all the states calculated by 3D-DMC on the torus in the thermodynamic limit. The energy of each state is measured relative to that of the $(3,3,1)$ state.}
\label{3D_1L_strict2L_energy}
\end{figure}
Because the 3D-DMC automatically includes $\Delta_{\rm SAS}$, we can directly compare all the states. Their energies are given in Fig.~\ref{3D_1L_strict2L_energy}, and the resulting phase diagram is shown in Fig.~\ref{3D_PHASE_2}. This phase diagram is different from that found by VMC or 2D-DMC and suggests that the experimentally observed incompressible state is likely to be the one-component Pfaffian state.
\section{Discussion}
This work concerns the nature of the FQHE at $\nu=1/2$ in wide quantum wells. We have evaluated the phase diagram of states at $\nu=1/2$ as a function of the quantum well width and the carrier density at three different levels of approximation.
Figure~\ref{VMC_PHASE_DIAGRAM_2} shows the phase diagram obtained by a variational Monte Carlo calculation. In this calculation, we evaluate an effective 2D interaction with the help of a transverse wave function calculated by LDA at zero magnetic field. A shortcoming of this method is the assumption that the transverse wave function and $\Delta_{\rm SAS}$ evaluated at zero magnetic field remain valid at finite magnetic fields as well. This is of particular concern for the phase boundaries separating one- and two-component states, because these phase boundaries depend sensitively on $\Delta_{\rm SAS}$, which is a relatively large energy, and also a rapidly varying function of the quantum well width and density. For that reason, in Fig.~\ref{VMC_PHASE_DIAGRAM_2} the phase boundary separating the $(3,3,1)$ and $1/4+1/4$ CFFS states is more reliable than that separating the one-component CFFS and the $(3,3,1)$ states.
The VMC calculation also does not include the effect of LLM directly. Figure~\ref{2D_DMC_BOUNDARY_ALL} includes LLM for electrons interacting with an effective 2D interaction within a fixed phase DMC calculation.
The principal result of the present work is given in Fig.~\ref{3D_PHASE_2}, which is the phase diagram obtained from a 3D fixed phase DMC. This method produces the ground state energy directly at a finite magnetic field, including, in principle, the effect of finite width and LLM. This suggests, although does not prove, that the incompressible state observed in experiments is the one-component Pfaffian state.
A technical difficulty of the 3D-DMC method is that for two-component states we must assume that the transverse wave function vanishes at the center of the quantum well. One may question if this affects comparisons between one- and two-component states. Fortunately, this is an excellent approximation near the upper phase boundary of Fig.~\ref{3D_PHASE_2}, which separates the single component ``CFFS/Pfaffian" state from the two-component ``$1/4+1/4$ CFFS" state. That gives us some degree of confidence that the transition from the two-component $1/4+1/4$ CFFS occurs into the one-component Pfaffian state.
Nonetheless, a definitive confirmation must await further experimental studies. In particular, thermal Hall measurements, which have shown half-quantized value at $5/2$~\cite{Banerjee18}, can convincingly reveal whether the FQHE state here has a non-Abelian origin.
We also note that we do not consider the anti-Pfaffian state, which is the hole partner of the Pfaffian state~\cite{Levin07, Lee07, Balram18}. These two are degenerate in energy in the absence of LLM, but LLM is expected to select one of them. We have not investigated this issue here, both because the anti-Pfaffian is harder to deal with numerically, and because the energy differences are expected to be small compared to the Monte Carlo uncertainty.
Before ending we list other assumptions made in our study. We do not consider the crystal phase. Previous theoretical studies of possible states in an ideal bilayer~\cite{Faugno18, Faugno20} (i.e. two 2D layers separated by a distance $d$) did not find any crystal states, but a crystal may occur in wide quantum wells~\cite{Thiebaut15}. Such a crystal might be responsible for the fact that the experiments see an insulator on the either side of the FQHE state, rather than the compressible $1/4+1/4$ CFFS state. Of course, an alternative possibility is that disorder, omitted in our study, may turn the $1/4+1/4$ CFFS into an insulator. Experimental studies in better quality samples can clarify the situation.
\textit{Acknowledgement}: We thank Mansour Shayegan for many insightful discussions. The work was made possible by financial support from the U.S. Department of Energy under Award No. DE-SC0005042. The VMC and DMC calculations were performed using Advanced CyberInfrastructure computational resources provided by The Institute for CyberScience at The Pennsylvania State University. A. C. B. acknowledges the Science and Engineering Research Board (SERB) of the Department of Science and Technology (DST) for funding support via the Start-up Grant SRG/2020/000154. Computational portions of exact diagonalization research work were conducted using the Nandadevi supercomputer, which is maintained and supported by the Institute of Mathematical Science's High-Performance Computing Center. Some of the numerical diagonalizations were performed using the DiagHam package, for which we are grateful to its authors.
\begin{appendix}
\counterwithin{figure}{section}
\section{VMC results from the spherical geometry}
\label{VMC_SPHERE_SEC}
All the above calculations have been performed in the torus geometry. In this section, we present results from our VMC calculations in the spherical geometry. The energy extrapolations are shown in Fig.~\ref{VMC_extrap_CFFS_sphere} for the one-component CFFS, Fig.~\ref{VMC_extrap_Pfaffian_sphere} for the Pfaffian state, Fig.~\ref{VMC_extrap_331_sphere} for the $(3,3,1)$ state, Fig.~\ref{VMC_extrap_2CFFS_sphere} for the $1/4+1/4$ CFFS and \ref{VMC_extrap_singlet_sphere} for the single component CFFS. Figs.~\ref{sphere_VMC_FW_1} and \ref{sphere_VMC_FW_2} depict the energies as a function of density for several quantum well widths. The resulting phase diagrams within the one-component and the two-component regimes are shown in Figs.~\ref{sphere_VMC_PT_1}. While the phase diagram of two-component states is almost identical to that on the torus, the phase diagram of one-component states is different: in particular, a phase transition occurs from the one-component CFFS to the Pfaffian state at sufficiently large densities. The final phase diagram shown in Fig.~\ref{sphere_VMC_PT_2} is similar to but slightly different from, the VMC phase diagram obtained from the torus geometry, shown in the main text.
We believe that the results from the torus geometry are more reliable for the following reasons. (i) As one can see, the thermodynamic extrapolations in the spherical geometry are not as linear as in the torus geometry, and thus entail greater uncertainty in the thermodynamic limit. This is because the finite width effect is only considered in the calculation of the electron-electron repulsion, whereas the electron-background and background-background interactions are chosen to be the same as those for zero-width well, for the simplicity of the calculation. (The form of the background-background interactions in the spherical geometry can be found in the appendix of Ref.~[\onlinecite{Jain07}], while in the torus geometry, the electron-background and background-background interactions are included through Ewald summation which assumes the same form for all interactions.) (ii) The torus geometry is better for the CFFS states. While one can directly construct the CFFS on the torus by attaching flux quanta to electron Fermi sea for any particle number, one must work with the Jain states of filling factor $\nu=\frac{n}{2n+1}$ and take the limit $n\to \infty$ to obtain the energy of the CFFS. Alternatively, one can consider systems with zero effective flux and take the thermodynamic limit~\cite{Rezayi94,Balram15b,Balram17}. The filled shell systems occur at particle numbers $N_e=4,9,16,25,36, ...$. However, due to the complexity of the wave functions, we cannot go beyond $N_e=36$ in VMC. This size limitation makes the energy comparisons less reliable. (iv) Finally, when it comes to DMC, very few CFFS systems are accessible in the spherical geometry, making thermodynamic extrapolations even more unreliable. For that reason, we have not performed DMC calculations in the spherical geometry.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_sphere_VMC_extrap_1L_CFFS.pdf}
\caption{Finite-size extrapolation of the energy for the one-component CFFS state for different widths and carrier densities. The calculation is done by VMC on the sphere. The well widths are shown on the plots.}
\label{VMC_extrap_CFFS_sphere}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_sphere_VMC_extrap_1L_Pfaf.pdf}
\caption{Finite-size extrapolation of the energy for the Pfaffian state for different widths and carrier densities. The calculation is done by VMC on the sphere. The well widths are shown on the plots.}
\label{VMC_extrap_Pfaffian_sphere}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_sphere_VMC_extrap_2L_hp331.pdf}
\caption{Finite-size extrapolation of the energy for the $(3,3,1)$ state for different widths and carrier densities. The calculation is done by VMC on the sphere. The well widths are shown on the plots.}
\label{VMC_extrap_331_sphere}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_sphere_VMC_extrap_2L_biCFFS.pdf}
\caption{Finite-size extrapolation of the energy for the $1/4+1/4$ CFFS state for different widths and carrier densities. The calculation is done by VMC on the sphere. The well widths are shown on the plots.}
\label{VMC_extrap_2CFFS_sphere}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_sphere_VMC_extrap_2L_singlet.pdf}
\caption{Finite-size extrapolation of the energy for the pseudo-spin singlet CFFS state for different widths and carrier densities. The calculation is done by VMC on the sphere. The well widths are shown on the plots.}
\label{VMC_extrap_singlet_sphere}
\end{figure}
\begin{figure}[H]
\includegraphics[width=0.95\columnwidth]{./2d_VMC_Sphere_1L_E.pdf}
\includegraphics[width=0.95\columnwidth]{./2d_VMC_Sphere_2L_E.pdf}
\caption{The VMC energies of different states relative to either the CFFS state or the $(3,3,1)$ state, as labeled in each figure. All energies are thermodynamic limits evaluated on the sphere. The statistical errors are smaller than the symbol sizes.}
\label{sphere_VMC_FW_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=0.95\columnwidth]{./2d_VMC_Sphere_ALL_E.pdf}
\caption{The VMC energies of all states relative to the $(3,3,1)$ state. All energies are evaluated on the sphere, and represent the thermodynamic limit. An offset of $\frac{1}{2}\Delta_{\rm SAS}$ per particle is included for the two-component states. The statistical errors are smaller than the symbol sizes.}
\label{sphere_VMC_FW_2}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./VMC_Sphere_PB_1L.pdf}
\includegraphics[width=\columnwidth]{./VMC_Sphere_PB_2L.pdf}
\caption{(a) The phase diagram of one-component states. (b) The phase diagram of two component states. The phase boundaries are obtained from VMC calculation in the spherical geometry.
The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines. The uncertainty in the density of the transition point is approximately $1\times 10^{10} \text{cm}^{-2}$}
\label{sphere_VMC_PT_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=0.95\columnwidth]{./VMC_phase_diagram.pdf}
\caption{The full phase diagram of states at half filling, calculated by VMC method on the sphere. The uncertainty in the density at the transition point is approximately $1\times 10^{10} \text{cm}^{-2}$. The region where experiments find an incompressible state~\cite{Shabani09b} is indicated by light dashed grey lines.}
\label{sphere_VMC_PT_2}
\end{figure}
\section{Jacobi $\theta$ function and its periodicity}\label{theta_function_definition}
Here we list the definition and properties of the Jacobi $\theta$ function, following the conventions in the text book by David Mumford\cite{Mumford07}. In general, the $\theta$ function is defined as
\begin{equation}
\begin{aligned}
&\theta_{a,b}(z|\tau)\\
&=\sum_{n=-\infty}^{+\infty}\exp\left[\pi i(n+a)^2 \tau+2\pi i(n+a)(z+b)\right],
\end{aligned}
\end{equation}
which satisfies the periodicity properties:
\begin{equation}
\begin{aligned}
&\theta_{a,b}(z+1|\tau)=e^{2\pi a i}\theta_{a,b}(z|\tau)\\
\end{aligned}
\end{equation}
and
\begin{equation}
\begin{aligned}
&\theta_{a,b}(z+\tau|\tau)\\
&=\exp\left[-\pi i \tau-2\pi i(z+b)\right]\theta_{a,b}(z|\tau)
\end{aligned}
\end{equation}
For simplicity of notation, we have dropped the subscripts and defined $\theta_{1/2, 1/2}\left(z|\tau\right)=\theta\left(z|\tau\right)$ in the main text.
The other three Jacobi theta functions for the Pfaffian states on a torus are defined as follows:
\begin{equation}
\begin{aligned}
\theta_2\left(z|\tau\right)&=\theta\left(z+1/2|\tau\right)\\
\theta_3\left(z|\tau\right)&=e^{i\pi \tau/4}e^{i\pi z}\theta\left(z+1/2+i/2|\tau\right)\\
\theta_4\left(z|\tau\right)&=e^{i\pi \tau/4}e^{i\pi z}\theta\left(z+i/2|\tau\right)\\
\end{aligned}
\end{equation}
\section{Quasi-degeneracy of the Pfaffian state on the torus}
\label{PF_DEGENERACY}
We have given in Eq.\,\ref{Pfaffian_wfn} the explicit form for three Pfaffian wave functions, called Pfaffian (1), Pfaffian (2), and Pfaffian (3), which correspond to the choices $a=2$, 3 and 4, respectively. These are not related by CM translation, and as a result, have different Coulomb energy expectation values for finite systems. We have calculated the thermodynamic limits for the energies of these three states by the VMC method. We present the extrapolations of the VMC energies of the Pfaffian (2) and Pfaffian (3) in Figs.~\ref{Pf_degeneracy_1} and \ref{Pf_degeneracy_2} for various quantum well widths and densities. We compare these energies with the energy of the Pfaffian (1) (Fig.~\ref{VMC_EXTRAP_PFAF}) in Fig.~\ref{Pf_degeneracy_3}. At the lowest density of $n=10^{10} \text{cm}^{-2}$, the energy differences can be on the order of $\sim 0.008\pm0.003 e^2/\epsilon l_B$, which is approximately $1/4$ of the energy difference between the one-component CFFS and Pfaffian (1). As the carrier density increases, the differences between the various Pfaffian wave functions quickly drop to $\sim 0.0001 e^2/\epsilon l_B$. (Peterson{\it et al.}\cite{Peterson08} have also found similar behavior as a function of the well-width in their ED studies.) Around transition densities in experiments, the difference is smaller than the uncertainty of either 2D-DMC or 3D-DMC, which is generally of the order of $0.001 e^2/\epsilon l_B$. Due to this fact, we conclude that at least in this work the choice on the $\theta_a(z)$ in the Pfaffian does not affect our result, and we have used Pfaffian (1) with $a=2$ in our calculations.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Pf2_VMC.pdf}
\caption{The energy of the Pfaffian (2) state [in which $\theta_a(z)$ is chosen to be $\theta_3(z)$] as a function of $1/N_e$. Each energy is obtained by VMC method with the effective interaction defined in Eq.\,\ref{V_eff_explicit}.}
\label{Pf_degeneracy_1}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Pf3_VMC.pdf}
\caption{The energy of the Pfaffian (3) state [in which $\theta_a(z)$ is chosen to be $\theta_4(z)$] as a function of $1/N_e$. Each energy is obtained by VMC method with the effective interaction defined in Eq.\,\ref{V_eff_explicit}.}
\label{Pf_degeneracy_2}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./Pf_degenracy.pdf}
\caption{Comparison of the VMC thermodynamic energies of the three Pfaffian states.}
\label{Pf_degeneracy_3}
\end{figure}
\section{Exact diagonalization studies for the LDA interaction}
\label{ED_Ajit}
In the main article, we found that, within the single component states, VMC with the LDA interaction (without LLM) supports the CFFS state in the entire parameter range considered. In this section, we present results obtained from exact diagonalization for the LDA interaction. Before we go on to the states at $\nu=1/2$, we first show that the $1/3$ Laughlin, and the $2/5$ and $3/7$ Jain states are robust to the effects of finite-width and density changes in the LLL. Using the pseudopotentials of the interaction obtained from the finite-width LDA discussed above [with parameters $W=18-70 \text{nm}$ and $n=1\times 10^{10}-30\times 10^{10}$ cm$^{-2}$], we obtain the exact ground states at $1/3$, $2/5$, and $3/7$ in the LLL at the $1/3$ Laughlin, $2/5$ Jain, and $3/7$ Jain fluxes, respectively. All our calculations are carried out for a system of $N_e=12$ electrons which is the largest system for which the $2/5$ and $3/7$ Jain states (obtained by a brute-force projection to the LLL) have been constructed in the Fock space~\cite{Yang19a}.
We also evaluate the charge and neutral gaps for the same system of $N_e=12$ electrons using exact diagonalization. The neutral gap is defined as the difference in energies of the two lowest-lying states of the system of $N$ electrons at the flux $2Q_{gs}$ corresponding to the incompressible ground state. The charge gap is defined as $\Delta_{c} = [E(2Q_{gs}+1)+E(2Q_{gs}-1)-2E(2Q_{gs})]/n_{q}$, where $E(2Q)$ is the background-subtracted~\cite{Balram20} ground-state energy of $N_e$ electrons at flux $2Q$, and $n_{q}$ is the number of quasiparticles (quasiholes) created by the removal (insertion) of a single flux quantum in the ground state. The charge gap measures the energy required to create a far-separated quasiparticle-quasihole pair in the ground-state. The value of $n_{q}$ is one, two, and three, for the $1/3$ Laughlin, $2/5$, and $3/7$ Jain states respectively.
The results for the overlaps and gaps obtained from exact diagonalization using the LDA pseudopotentials at $\nu=1/3$, $2/5$ and $3/7$ are shown in Fig. ~\ref{fig: Laughlin_Jain_overlaps_finite_width_LDA} (note that the scales on different plots are different). We find that the $1/3$ Laughlin, $2/5$ and $3/7$ Jain states provide a near-perfect representation of the exact ground state at all widths and densities considered. Furthermore, these states support robust charge and neutral gaps, which indicates that they are stable to perturbations in the interaction arising from finite-width corrections and density variations. These results are consistent with the experimental observation of incompressible states at $1/3$, $2/5$, and $3/7$ in wide quantum wells~\cite{Shabani09b}.
\begin{figure*}[htpb]
\begin{center}
\includegraphics[width=0.32\textwidth]{./CF13_overlap.pdf}
\includegraphics[width=0.32\textwidth]{./CF25_overlap.pdf}
\includegraphics[width=0.32\textwidth]{./CF37_overlap.pdf}\\
\vspace{0.3cm}
\includegraphics[width=0.32\textwidth]{./CF13_neutral_gap.pdf}
\includegraphics[width=0.32\textwidth]{./CF25_neutral_gap.pdf}
\includegraphics[width=0.32\textwidth]{./CF37_neutral_gap.pdf} \\
\vspace{0.3cm}
\includegraphics[width=0.32\textwidth]{./CF13_charge_gap.pdf}
\includegraphics[width=0.32\textwidth]{./CF25_charge_gap.pdf}
\includegraphics[width=0.32\textwidth]{./CF37_charge_gap.pdf}
\end{center}
\caption{
Overlaps with the exact lowest Landau level ground state [top panels (a), (b), and (c)], neutral gaps [middle panels (d), (e), and (f)] and charge gaps [bottom panels (g), (h), and (i)] in the spherical geometry for the $\nu=1/3$ Laughlin [left panels (a), (d), and (g)] $\nu=2/5$ Jain [center panels (b), (e), and (i)] and $\nu=3/7$ Jain [right panels (c), (f), and (i)] state evaluated using the pseudopotentials of the finite-width interaction obtained using a local density approximation (LDA). All the panels are for $N_{e}=12$ electrons.}
\label{fig: Laughlin_Jain_overlaps_finite_width_LDA}
\end{figure*}
We next consider the 1/2 state and evaluate its charge and neutral gaps as well as its overlaps with the Moore-Read Pfaffian wave function as a function of the width and density. Here we consider the three systems of $N_e=14,~16$ and $18$ electrons that do not alias with any of the Jain states~\cite{Scarola02b}. The overlap maps shown in Fig. ~\ref{fig: MR_Pfaffian_overlaps_finite_width_LDA} indicate that the overlap of the Pfaffian state with the exact ground state increases with increasing width and density and reaches a value comparable to the overlap of the Pfaffian wave function with the 5/2 Coulomb ground state~\cite{Balram20}. We next look at the neutral and charge gaps ($n_{q}=2$) of the 1/2 Moore-Read Pfaffian state. These results, shown in Fig. ~\ref{fig: MR_Pfaffian_overlaps_finite_width_LDA}, suggests that the 1/2 Moore-Read Pfaffian state does not consistently, i.e. for all values of $N_e$, support a robust charge / neutral gap. For the system of $N_e=14$ and $N_e=16$ electrons, we find that the charge gap is negative for most widths and densities, which indicates that the 1/2 Moore-Read Pfaffian state is not stabilized for these interactions. Even for the system of $N_e=18$ electrons, where the charge gaps are positive, the 1/2 Moore-Read Pfaffian state has a gap that is an order of magnitude lower than that of the Laughlin and Jain states. Thus, we conclude that the LDA interaction does not stabilize the 1/2 Moore-Read Pfaffian state in the LLL for the LDA interaction (without LLM).
\begin{figure*}[htpb]
\begin{center}
\includegraphics[width=0.32\textwidth]{./Pf_Overlap_N14.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_Overlap_N16.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_Overlap_N18.pdf} \\
\vspace{0.3cm}
\includegraphics[width=0.32\textwidth]{./Pf_neutral_gap_N14.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_neutral_gap_N16.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_neutral_gap_N18.pdf}\\
\vspace{0.3cm}
\includegraphics[width=0.32\textwidth]{./Pf_charge_gap_N14.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_charge_gap_N16.pdf}
\includegraphics[width=0.32\textwidth]{./Pf_charge_gap_N18.pdf}
\end{center}
\caption{Overlaps of the $\nu=1/2$ Moore-Read Moore-Read Pfaffian state with the exact lowest Landau level ground state [top panels (a), (b), and (c)], neutral gaps [middle panels (d), (e), and (f)] and charge gaps [bottom panels (g), (h), and (i)] in the spherical geometry evaluated using the pseudopotentials of the finite-width interaction obtained using a local density approximation (LDA). The left, center and right panels correspond to $N_{e}=14$ [panels (a), (d), and (g)],~$16$ [panels (b), (e), and (h)] and $18$ [panels (c), (f), and (i)] respectively.}
\label{fig: MR_Pfaffian_overlaps_finite_width_LDA}
\end{figure*}
Finally, we turn to the CFFS state at $\nu=1/2$ and consider its overlap with the exact ground state. For this purpose, we consider the exact zero-width LLL Coulomb ground state of $N_e=14$ electrons at $2Q=2N_e-3$, since this system has a uniform ($L=0$) ground state. We take this ground state to represent the CFFS state and calculate its overlap with the exact LDA ground state as a function of width and density. These overlaps are shown in Fig. ~\ref{fig: CFFS_overlaps_finite_width_LDA} and are essentially unity in the entire parameter space we have considered. (For comparison, the overlap of the Moore-Read Pfaffian state with the exact zero-width LLL Coulomb ground state for this system size is $0.72$~\cite{Balram20}.)
\begin{figure}[htpb]
\begin{center}
\includegraphics[width=0.47\textwidth]{./CFFS_Overlap_N14.pdf}
\end{center}
\caption{Overlaps of the composite fermion Fermi sea (zero-width Coulomb ground state in the lowest Landau level [see text]) with the ground state of the finite-width LDA interaction for $N_{e}=14$ electrons at flux $2Q=25$. The overlap is essentially unity in the entire range of widths and densities considered.}
\label{fig: CFFS_overlaps_finite_width_LDA}
\end{figure}
To summarize, our exact diagonalization results are consistent with the VMC results given in the main article. In the entire parameter range that we explored, the CFFS has almost unit overlap with the exact ground state. Thus the CFFS state is favored over the Moore-Read Pfaffian state for all the LDA interactions that we have looked at in the absence of LLM.
\section{Additional details on the diffusion Monte Carlo}
\label{DMC_algorithm}
The fixed-phase DMC, which is a generalization of the standard DMC method~[\onlinecite{Mitas98, Foulkes01}], was developed in Ref.~[\onlinecite{Ortiz93}] and also described in Refs.~[\onlinecite{Zhang16, Zhao18}]. The method we use in this paper is based on these articles. Here we give some details that are specific to our work.
We use parameters appropriate for Gallium Arsenide. We express lengths in units of $l_B$ and energies in units of $\frac{e^2}{\epsilon l_B}$. The local energy for a 2D system is simply $E_L(\vec{R})=\frac{N_e}{2\kappa}+V_\text{Ewald}(\vec{R})$ and for a 3D system an extra term $\sum_i{E}_\text{trans}\left( w_i \right)$ is introduced due to the transverse degree of freedom:
\begin{equation}
E_L\left(\vec R \right)=\frac{N_e}{2 \kappa}+V_\text{Ewald}(\vec{R})+\sum_i{E}_\text{trans}\left( w_i \right)
\end{equation}
where $N_e/2\kappa$ is the cyclotron energy for $N_e$ particles in the initial trial state. $V_\text{Ewald}(\vec{R})$ is the Coulomb interaction extended periodically in the x-y plane; it satisfies open boundary conditions in the transverse dimension as appropriate for our 3D quantum wells (for 2D systems we simply set all $w_i$'s to be $0$). Its explicit form is given below in Appendix~\ref{Ewald_V}.
The transverse local energy of a one-component state is given by:
\begin{equation}
\begin{aligned}
&E_\text{trans}\left( w \right)\\
=&\begin{cases}\frac{1}{\kappa} \frac{\pi^2}{2 W^2} \left(9-\frac{8}{1+\alpha-2\alpha \cos(2\pi w/W)}\right), &| w|<W/2\\
\infty, &| w|\geq W/2
\end{cases}
\end{aligned}
\end{equation}
For two-component states, the energies for the left-layer and right-layer are as follows:
\begin{equation}
\begin{aligned}
&E_\text{trans}^L\left( w\right)=\begin{cases}\frac{1}{\kappa} \frac{\beta (2 W-\beta w)}{2 W^2 w},&-W/2< w<0\\
\infty, & w\geqslant0
\end{cases}\\
&E_\text{trans}^R\left( w\right)=\begin{cases}\frac{1}{\kappa} \frac{\beta \left[2 W-\beta (w0-w)\right]}{2 W^2 (W-w)},&0< w<W/2\\
\infty, & w\leqslant 0
\end{cases}
\end{aligned}
\end{equation}
We use the mixed estimator method\cite{Foulkes01} to calculate the ground state energy.
\section{Transverse distribution of fully antisymmetrized two-component states}
In the main text, we make the approximation that the two transverse basis wave functions of two-component states do not overlap, i.e. they are located entirely either in the left or the right half of the quantum well. The approximation becomes quantiatively valid when the well-width or the density is very high, in which case which both the lowest symmetric and asymmetric subbands have vanishing density at the center, and the linear combinations of them form the left- and right-layer bases. This approximation simplifies the calculation because
the system's energy can be evaluated without doing an antisymmetrization over all particles.
In this section, we test the dependence of the transverse density on the well-width and the carrier density numerically with fully-antisymmetrized wave functions in 3D space and ascertain to what extent the system can be approximated with two non-overlapping bases. Because the number of permutations increases rapidly with the system size, and because one does not have analytical ways to simplify the calculation of the drift velocity in the 3D-DMC, we estimate that the study of a system with more than 8-10 particles is out of our reach.
Fortunately, we have found that the system's transverse distribution is largely insensitive to the size of the system and the type of the in-plane wave function. Therefore we study a 4-particle system with its in-plane wave function given by the $(3, 3, 1)$ state. We choose transverse wave functions that are not strictly orthogonal, i.e. incorporate a small tunneling between the two layers. Specifically, we choose
\begin{equation}
\begin{aligned}
\psi_L\left( w_i \right)&=\frac{w_i}{W} \exp{\left[-8 \frac{w_i}{W}\right]}\\
\psi_R\left( w_i \right)&=(1-\frac{w_i}{W}) \exp{\left[-8\left(1-\frac{w_i}{W}\right)\right]}\\
\end{aligned}
\end{equation}
Here we have shifted the quantum well's location to the range $[0, W]$ for simplicity.
We do not enforce the central density to be zero; as a result, whether the system is a well-defined bilayer is determined by the diffusion process itself.
The bases chosen here are not strictly orthogonal but they are still linearly independent. If the final distribution breaks into two well-separated density lobes, then it indicates that the system can be treated as a two-component state. On the contrary, if the final distribution is not well-separated, then one should not treat the system as a two-component state. [This is the reason why we call the state $(3, 3, 1)$-like state rather than $(3, 3, 1)$ state in the caption of Fig.~\ref{Full_Antisymmtrized}.] This also offers an estimation of the width and density beyond which the system can be treated as a two-component state. Our 3D-DMC results for the density are shown in Fig.~\ref{Full_Antisymmtrized}. As one can see, the system is only well-separated and has negligible density in the center when $n\gtrsim 2\times 10^{11} \text{cm}^{-2}$ for $W=70 \text{nm}$ and $n\gtrsim 1\times 10^{11} \text{cm}^{-2}$ for $W=80 \text{nm}$. Recalling that in the main text we show a phase transition from a one-component state to a two-component state occurring around $n=2.2\times10^{11} \text{cm}^{-2}$ for $W=70 \text{nm}$ $n=1.5\times10^{11} \text{cm}^{-2}$ for $W=80 \text{nm}$, this calculation of the fully-antisymmetrized state justifies our approximation in the main text.
\begin{figure}
\includegraphics[width=\columnwidth]{./Full_Antisymmetrized_W70.pdf}
\includegraphics[width=\columnwidth]{./Full_Antisymmetrized_W80.pdf}
\caption{The transverse density profiles of the $(3,3,1)$-like state for 4-particle system of the widths $W=70 \text{nm}$ (top) and $W=80 \text{nm}$ (bottom). The legend shows the carrier density in units of $10^{10} \text{cm}^{-2}$.}
\label{Full_Antisymmtrized}
\end{figure}
\section{Thermodynamic extrapolations of energy}
The phase diagrams in the main text are obtained by comparing the energies of different states in the thermodynamic limit. For completeness, we show the extrapolations of the energies of various states calculated by either VMC, 2D-DMC, or 3D-DMC in this section.
Figs.~\ref{VMC_EXTRAP_CFFS}-\ref{VMC_EXTRAP_SINGLET} show the energy extrapolation for the VMC calculation;
Figs.~\ref{2D_DMC_EXTRAP_CFFS}-\ref{2D_DMC_EXTRAP_SINGLET} show the energy extrapolation for the 2D-DMC calculation; and
Figs.~\ref{3D_DMC_EXTRAP_CFFS}-\ref{3D_DMC_EXTRAP_SINGLET} show the energy extrapolation for the 3D-DMC calculation.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_torus_VMC_extrap_1L_CFFS.pdf}
\caption{The VMC energy of the one-component CFFS as a function of $1/N_e$.}
\label{VMC_EXTRAP_CFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_torus_VMC_extrap_1L_Pfaf.pdf}
\caption{The VMC energy of the one-component Pfaffian as a function of $1/N_e$.}
\label{VMC_EXTRAP_PFAF}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_torus_VMC_extrap_2L_hp331.pdf}
\caption{The VMC energy of the two-component $(3, 3, 1)$ as a function of $1/N_e$.}
\label{VMC_EXTRAP_331}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_torus_VMC_extrap_2L_biCFFS.pdf}
\caption{The VMC energy of the two-component $1/4+1/4$ CFFS as a function of $1/N_e$.}
\label{VMC_EXTRAP_BICFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_torus_VMC_extrap_2L_singlet.pdf}
\caption{The VMC energy of the two-component pseudo-spin singlet CFFS as a function of $1/N_e$.}
\label{VMC_EXTRAP_SINGLET}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_DMC_extrap_1L_CFFS.pdf}
\caption{2D-DMC energy of the one-component CFFS as a function of $1/N_e$.}
\label{2D_DMC_EXTRAP_CFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_DMC_extrap_1L_Pfaffian.pdf}
\caption{2D-DMC energy of the one-component Pfaffian as a function of $1/N_e$.}
\label{2D_DMC_EXTRAP_PFAF}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_DMC_extrap_2L_hp331.pdf}
\caption{2D-DMC energy of the two-component $(3, 3, 1)$ as a function of $1/N_e$.}
\label{2D_DMC_EXTRAP_331}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_DMC_extrap_2L_biCFFS.pdf}
\caption{2D-DMC energy of the two-component $1/4+1/4$ CFFS as a function of $1/N_e$.}
\label{2D_DMC_EXTRAP_BICFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./2d_DMC_extrap_2L_singlet.pdf}
\caption{2D-DMC energy of the two-component pseudo-spin singlet CFFS as a function of $1/N_e$.}
\label{2D_DMC_EXTRAP_SINGLET}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3D_DMC_E_CFFS.pdf}
\caption{3D-DMC energy of the one-component CFFS as a function of $1/N_e$.}
\label{3D_DMC_EXTRAP_CFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3D_DMC_E_PFAF.pdf}
\caption{3D-DMC energy of the one-component Pfaffian as a function of $1/N_e$.}
\label{3D_DMC_EXTRAP_PFAF}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3D_DMC_E_331.pdf}
\caption{3D-DMC energy of the two-component $(3, 3, 1)$ as a function of $1/N_e$.}
\label{3D_DMC_EXTRAP_331}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3D_DMC_E_BICFFS.pdf}
\caption{3D-DMC energy of the two-component $1/4+1/4$ CFFS as a function of $1/N_e$.}
\label{3D_DMC_EXTRAP_BICFFS}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./3D_DMC_E_SINGLET.pdf}
\caption{3D-DMC energy of the two-component pseudo-spin singlet CFFS as a function of $1/N_e$.}
\label{3D_DMC_EXTRAP_SINGLET}
\end{figure}
\section{The system size dependence of transverse density}
\label{transverse density}
The profiles of the transverse density for the CFFS, obtained from our 3D-DMC calculation, are shown in Fig.~\ref{FINITE_SIZE_DEN} for several system sizes. These show that the 3D-DMC transverse density has negligible dependence on the system size. This conclusion also applies to other states considered in this paper. We, therefore, believe that the various transverse density profiles shown in this article represent the thermodynamic limit.
\begin{figure}[H]
\includegraphics[width=\columnwidth]{./CFFS_finite_size.pdf}
\caption{The transverse density profiles of the CFFS state for several particle numbers. They are identical within the statistical uncertainty. The different colors are for different densities, following the same color scheme as in Fig.~\ref{Density_1L}.}
\label{FINITE_SIZE_DEN}
\end{figure}
\section{Periodic Coulomb interaction and Ewald Summation}
\label{Ewald_V}
On the torus, we must work with a periodic version of the Coulomb interaction. A naive strategy is to extend the normal Coulomb potential periodically. Although this approach is theoretically possible, it is impractical because of slow convergence.
The Ewald-summation method overcomes this difficulty.
The idea is to split the Coulomb interaction into a short-ranged part and a long-ranged part. The short-ranged part can be summed in real space quickly; the long-ranged part in the real space becomes short-ranged in the momentum space, hence can be summed conveniently in the momentum space.
We follow Yeh's approach\cite{Yeh99a} in which a generalized summation is explicitly formalized including the transverse dimension with an open boundary:
\begin{widetext}
\begin{equation}
\begin{aligned}
V_\text{Ewald}=&\frac{1}{2}\sideset{}{'}\sum_{i,j=1}^{N_e} \sum_{|\vec{m}=0|}^{\infty}q_i q_j \frac{\text{erfc}(\alpha|\vec r_{ij}+\vec m|)}{|\vec r_{ij}+\vec m|}+\frac{\pi}{2A}\sum_{i,j=1}^{N_e}\sum_{\vec h\neq 0}q_i q_j\frac{\cos(\vec h\cdot \vec r_{ij})}{h}\\
&\times \left\{ \exp{(h z_{ij})}\text{erfc}(\alpha z_{ij} +\frac{h}{2\alpha})+\exp{(-h z_{ij})}\text{erfc}(-\alpha z_{ij}+\frac{h}{2\alpha}) \right\}\\&-\frac{\pi}{A} \sum_{i=1}^{N_e}\sum_{i=1}^{N_e}q_i q_j\left\{ z_{ij} \text{erf}(\alpha z_{ij})+\frac{1}{\alpha\sqrt{\pi}}\exp(-\alpha^2z_{ij}^2)\right\}-\frac{\alpha}{\sqrt{\pi}}\sum_{i=1}^{N_e}q_i^2
\end{aligned} \label{Ewaldsum}
\end{equation}
\end{widetext}
The prime on the summation $\sideset{}{'}\sum_{i,j=1}^{N_e}$ is to remind us that terms with $i=j$ are included only for $\vec{m}\neq 0$.
It is worth noting that this definition of the interaction properly includes the charge-neutrality condition, i.e. it contains the electron-electron, background-background repulsion, and the electron-background attraction. To be more explicit, the omission of the term with $\vec{h}=0$ in the summation and the last of Eq.~\ref{Ewaldsum} term are due to the electron-background and background-background interaction. We refer the reader to Refs.~[\onlinecite{Heyes97}] and [\onlinecite{Parry75}],
for a thorough discussion of the technical aspects of this method.
\end{appendix}
|
1,314,259,995,041 | arxiv | \section{Introduction}
The evidences from neutrino experiments have established the neutrino
oscillation phenomenon. The experimental data can be explained by flavor
mixings of three (active) neutrinos, and oscillation probabilities are
described by three generation mixing angles and two mass squared differences.
The presence of non-vanishing neutrino masses means a necessity
of physics beyond the standard model (SM). Furthermore, the smallness of neutrino
mass squared differences compared with the charged fermion masses in the SM is one of
striking properties of neutrinos. The type-I seesaw mechanism~\cite{seesaw} is
the most promising approach to explain such smallness of neutrino masses. In this
mechanism, the sterile (right-handed) neutrinos are added to the SM.
Since these sterile neutrinos are Majorana particles, they can have Majorana masses, which
violate the lepton number. Consequently, the heavy enough Majorana masses of the sterile
neutrinos can lead to tiny active neutrino masses.
On the other hand, the elucidation of the origin of dark matter
(DM)~\cite{Zwicky:1933gu}, which governs about $23\%$ of the energy density of
the Universe~\cite{Komatsu:2010fb}, is also one of the most important goals in
particle physics today. Recently, a large number of DM models have been discussed in the
literature (e.g. see~\cite{Garrett:2010hd} for a recent review, and references
therein). Among them, one of interesting candidates for DM is a sterile
neutrino. In particular, models with three sterile neutrinos whose masses are
below the EW scale have been proposed
in~\cite{Gouvea:2005er,Asaka:2005an,Asaka:2005pn}. Note that some astrophysical
data possibly support the existence of sterile
neutrinos~\cite{Kusenko:2009up}. In addition to a candidate for DM, the sterile
neutrino can also play a role in other cosmological issues, such as the origin
of the baryon asymmetry of the Universe (BAU). For instance, relatively heavy
sterile neutrinos in the type-I seesaw mechanism can generate the BAU via
leptogenesis~\cite{RIFP-641}. The split seesaw mechanism~\cite{Kusenko:2010ik}
can give a hierarchical mass spectrum of sterile neutrinos, which can
incorporate the usual leptogenesis with a keV sterile neutrino DM
scenario.\footnote{See also~\cite{Adulpravitchai:2011rq} for discussions of
the realistic flavor mixing in the mechanism.}
A possible mass spectrum of sterile neutrinos to explain
the BAU~\cite{hep-ph/0203194},
DM, and MiniBooNE/LSND
oscillation anomalies~\cite{Aguilar:2001ty}, as well as realize the tiny active
neutrino masses, has been proposed in~\cite{Chen:2011ai}.
Clearly, the nature of
neutrinos would be a key to find physics beyond the SM and understand some
cosmological issues.
In this letter, we focus on the magnetic property of neutrinos.
In particular, we concentrate on a simple
extension of the SM~\cite{Aparici:2009mj}, in which the right-handed neutrino
magnetic moment can be generated by the interaction between new charged particles and sterile neutrinos.
Note that the
induced magnetic interaction could result in some interesting consequences for
cosmology and high energy physics.
\section{Magnetic dipole moments}
\subsection{Active neutrinos}
By introducing three generations of the right-handed neutrinos in the SM, the
Yukawa interactions are given by
\begin{eqnarray}
\mathcal{L}=\mathcal{L}_{{\rm SM}}-(y_\nu\bar{L}\nu_R\Phi+h.c.),
\label{Lag}
\end{eqnarray}
where $L$, $\nu_R$, and $\Phi$ are the left-handed lepton doublet, right-handed
neutrino, and the SM Higgs, respectively.
%
The Dirac
neutrino mass is given by $M_D=y_\nu v$, where $v$ is the vacuum expectation value (VEV) of the
Higgs.
The Dirac neutrino can have a magnetic dipole moment induced by
radiative corrections~\cite{Gunn:1978gr} as
\begin{eqnarray}
\mu_{\nu_i}=\frac{3eG_F}{8\sqrt{2}\pi^2}m_{\nu_i}\simeq3\times10^{-19}
\left(\frac{m_\nu}{1\mbox{ eV}}\right)\mu_B, \label{mag}
\end{eqnarray}
where $G_F$ is the Fermi constant, $m_{\nu_i}$ are the corresponding (Dirac)
neutrino mass eigenvalues, and $\mu_B$ is the Bohr magneton, given by
\begin{eqnarray}
\mu_B=\frac{e}{2m_e}=5.79\times10^{-9}\mbox{ eV}\cdot\mbox{Gauss}^{-1}
=1.93\times10^{-11}\mbox{ e cm}.
\end{eqnarray}
In Eq.~\eqref{mag}, we have assumed $m_\nu\simeq m_{\nu_i}$ as
the typical neutrino mass scale. The current upper bounds on the
neutrino magnetic moments for three flavors are given by the Borexino
experiment as~\cite{Studenikin:2008bd}
\begin{eqnarray}
\mu_{\nu_e}<5.4\times10^{-11}\mu_B,~~~
\mu_{\nu_\mu}<1.5\times10^{-10}\mu_B,~~~
\mu_{\nu_\tau}<1.9\times10^{-10}\mu_B,
\end{eqnarray}
respectively. Note that a stronger bound on the typical neutrino dipole moment ($\mu_\nu$) is inferred
in~\cite{Raffelt:1990pj} from an estimate of effects on the core mass of the red
giants at the helium flash as
\begin{eqnarray}
\mu_\nu<3\times10^{-12}\mu_B~~~\mbox{ with }~~~
\mu_\nu^2\equiv\sum_{i,j=1}^3(|\mu_{ij}|^2+|\epsilon_{ij}|^2), \label{rg}
\end{eqnarray}
where $\mu_{ij}$ and $\epsilon_{ij}$ are the elements of the magnetic and electric dipole
matrices, respectively.
Here, we mention that the transition magnetic moment, which is relevant to
$\nu_i\rightarrow\nu_j+\gamma$, may exist for both Dirac and Majorana neutrino
cases. Explicitly, one has
\cite{petcov}
\begin{eqnarray}
\mu_{ij}^D=\frac{3eG_F}{32\sqrt{2}\pi^2}(m_{\nu_i}+m_{\nu_j})
\sum_{\alpha=e,\mu,\tau}U_{j\alpha}^\dagger U_{\alpha i}
\left(\frac{m_\alpha}{m_W}\right)^2,
\end{eqnarray}
for the Dirac neutrinos, where $U$ and $m_\alpha$ are the
Pontecorvo-Maki-Nakagawa-Sakata (PMNS) matrix and the corresponding charged
lepton masses, respectively. If the neutrinos are Majorana particles, one can
only have a flavor changing dipole operator,
\begin{eqnarray}
\mathcal{L}_{\text{int}}
=\mu_{ij}^M\nu_iC^{-1}\sigma_{\mu\nu}\nu_jF^{\mu\nu}+h.c.,
\end{eqnarray}
where $\nu_i$ are active neutrinos,\footnote{They describe the left-handed
neutrinos as $\nu_{L\alpha}=U_{\alpha i}\nu_i+\theta_{\alpha I}N_I^c$ with the
left-right mixing angles $\theta_{\alpha I}\equiv(y_\nu)_{\alpha I}v/M_I$ and
other mass eigenstates $N_I$ (so-called sterile neutrinos) almost corresponding
to the right-handed neutrinos, $i.e.$ $N_I\simeq\nu_{RI}$. Throughout this letter,
indices $i,j=1\sim3$ and $I,J=1\sim3$ stand for generations of active and
sterile neutrinos, respectively.} leading to the transition magnetic moments~\cite{pal}:
\begin{eqnarray}
\mu_{ij}^M=\frac{3eG_F}{16\sqrt{2}\pi^2}(m_{\nu_i}+m_{\nu_j})
\sum_{\alpha=e,\mu,\tau}\mbox{Im}\left[U_{j\alpha}^\dagger
U_{\alpha i}\left(\frac{m_\alpha}{m_W}\right)^2\right],
\end{eqnarray}
for the Majorana neutrinos.
\subsection{Right-handed neutrinos}
We now consider the magnetic moments of the right-handed neutrinos. If the
neutrinos are Majorana particles, one can have the Majorana mass terms for the
right-handed neutrinos, which violate the lepton number,
with the Lagrangian, given by
\begin{eqnarray}
\mathcal{L}_{{\rm Majorana}}=-\frac{M_R}{2}\overline{\nu_{R}^c}\nu_R.
\label{Majorana}
\end{eqnarray}
It is well known that the Lagrangians in Eqs.~\eqref{Lag} and \eqref{Majorana} give
light active neutrino masses through the seesaw mechanism~\cite{seesaw},
\begin{eqnarray}
M_\nu=-M_D^TM_R^{-1}M_D, \label{seesaw}
\end{eqnarray}
after integrating out the heavy right-handed neutrinos.
Note that
$y_\nu$, $M_R$,
$M_D$ and $M_\nu$ are all $3\times3$ matrices.
Since the right-handed (sterile) neutrinos are Majorana particles, only the transition magnetic moments can be induced,
described by the following flavor changing dipole operator,
\begin{eqnarray}
\mathcal{L}_{{\rm int}}=\mu_{IJ}^NN_IC^{-1}\sigma_{\mu\nu}N_JF^{\mu\nu}+h.c..
\end{eqnarray}
There are some models to induce the magnetic moments of right-handed (sterile)
neutrinos.
\subsection
Neutrino dark matter model}
Recently, Aparici, Santamaria, and Wudka (ASW) have proposed a
model~\cite{Aparici:2009mj} which enlarges the SM by adding a negatively
charged scalar, $\omega$, and one negatively charged vector-like fermion, $E$,
with non-vanishing hypercharges, $Y(\omega)=-1$ and $Y(E)=-1$, in addition to
the right-handed neutrinos. When one imposes a discrete symmetry, which affects
only $\omega$ and $E$ as $\omega\rightarrow -\omega$ and $E\rightarrow-E$, the
relevant Lagrangian allowed by the SM gauge and additional discrete symmetries
is
\begin{eqnarray}
\mathcal{L}_{{\rm ASW}}
&=& \mathcal{L}_{{\rm SM}}+\mathcal{L}_{{\rm Majorana}}+\mathcal{L}_K
-\mathcal{L}_Y-V, \label{ASW} \\
\mathcal{L}_K
&=& D_\mu\omega^\dagger D^\mu\omega+i\bar{E}D\hspace{-2.5mm}/\hspace{0.5mm}E
+i\bar{\nu}_R\partial\hspace{-2.4mm}/\hspace{0.5mm}\nu_R-M_E\bar{E}E,
\label{LK} \\
\mathcal{L}_Y
&=& y_\nu\bar{L}\nu_R\Phi+h\bar{\nu}_RE\omega^++h.c.,
\end{eqnarray}
where $V$
is the scalar potential for the SM Higgs and $\omega$.
In this model, 1-loop diagrams involving $\omega$ and $E$ contribute to the magnetic
moments of the right-handed neutrinos. The same interactions
also give rise to contributions
to the right-handed neutrino Majorana masses through the operator
$\xi(\Phi^\dagger\Phi)\overline{\nu_R^c}\nu_R$. An invisible Higgs decay
through the interaction has been discussed in~\cite{Aparici:2009mj}.
Moreover, it has been pointed out that the new charged particles
can be produced at the CERN LHC experiment through the Drell-Yan process
because of their charged properties if they are light enough. We further
investigate DM properties of this kind of the model.
We now proceed with DM in the model.
It is known that one of interesting neutrino DM models is the keV sterile
neutrino DM model (e.g. see~\cite{Asaka:2005an,Asaka:2005pn,deGouvea:2005er}).\footnote{See also \cite{vega} and \cite{itoh}
for general discussions on DM properties with the keV mass and neutrino energy loss in stellar interiors, respectively.
}
In this scenario, the lightest sterile neutrino with the keV mass is a
decaying DM candidate. To be DM, the lifetime of the lightest sterile neutrino
should be larger than the age of the Universe. The lightest sterile neutrino
can radiatively decay into a photon ($\gamma$) and an active neutrino ($\nu_i$)
through the
left-right mixing.
Since we have new interactions which generate the right-handed neutrino
magnetic moments, the lightest sterile
neutrino can radiatively decay into $\gamma$ and $\nu_i$ with the decay width,
given by
\begin{eqnarray}
\Gamma_{N_1\rightarrow\nu_i\gamma}^{\rm mag}
=\frac{(M_1^2-m_{\nu_i}^2)^3}{8\pi M_1^3}|\mu_{1i}|^2
\simeq\frac{M_1^3}{8\pi}|\mu_{1i}|^2, \label{gammaIi}
\end{eqnarray}
where $|\mu_{1i}|$ denotes the magnetic moment
and $M_1$ is the mass of the (keV) sterile
neutrino ($N_1$).
Here, the active neutrino mass has been neglected in the second equality of Eq.~(\ref{gammaIi}).
On the other hand, the
keV sterile neutrino DM model also has a constraint from its decay into $\gamma$ and $\nu_i$
through the gauge boson and charged lepton loops with
the left-right mixing angle. The decay width is given by
\begin{eqnarray}
\Gamma_{N_1\rightarrow\nu_i\gamma}
=\frac{9\alpha G_F^2}{1024\pi^4}\sin^2(2\theta_1)M_1^5
\simeq5.5\times10^{-22}\theta_1^2\left(\frac{M_1}{\mbox{keV}}\right)^5
\mbox{ s}^{-1},
\end{eqnarray}
where $\theta_1\equiv\sum_{\alpha=e,\mu,\tau}(y_\nu)_{\alpha1}v/M_{1}$.
Clearly, both decay mechanisms could produce
a narrow line in the X-ray back
ground~\cite{Boyarsky:2009ix,NewAdded,0709.2301}.
As a result, for the latter case,
the left-right mixing angle
is restricted as $\theta_1^2\lesssim1.8\times10^{-5}(\mbox{keV}/M_1)^5$,
equivalently
$\Gamma_{N_1\rightarrow\nu_i\gamma}\lesssim9.9\times10^{-27}\mbox{ s}^{-1}$.
For the former, it is reasonable to
impose a bound
$\Gamma_{N_1\rightarrow\nu_i\gamma}\lesssim(10^{-28}-10^{-26})\mbox{ s}^{-1}$
in a region of the emission photon energy $0.5\mbox{ keV}\leq E_\gamma\leq 12$
keV given in \cite{0709.2301} on
$\Gamma_{N_1\rightarrow\nu_i\gamma}^{\rm mag}$. The emission photon energy is
related with the decaying sterile neutrino mass as $E_\gamma=M_1/2$. For
$\Gamma_{N_1\rightarrow\nu_i\gamma}^{\rm mag}\lesssim10^{-28}\mbox{ s}^{-1}$,
one obtains
\begin{eqnarray}
|\mu_{1i}|\lesssim3.89\times10^{-16}\mu_B, \label{new}
\end{eqnarray}
where $M_1=5$ keV has been
used.\footnote{$\Gamma_{N_1\rightarrow\nu_i\gamma}\lesssim10^{-26}\mbox{
s}^{-1}$ is also allowed for $M_1\simeq24$ keV. In this case, a more severe
bound $|\mu_{1i}|\lesssim3.70\times10^{-16}\mu_B$ can be derived.} It is seen that
the constraint in Eq.~(\ref{new}) on the neutrino magnetic moment is much stronger
than the one from the consideration of the red giants in Eq.~(\ref{rg}). Note
that Eq.~(\ref{rg}) is obtained from the discussion of the plasmon decay into
neutrinos where the masses of neutrinos are lower than
$\mathcal{O}(\mbox{keV})$. Therefore, once the sterile neutrinos have
magnetic interactions mediated by new particles, the keV sterile neutrino DM
scenario should satisfy the constraint in Eq.~(\ref{new}), which is
model-independent~\cite{Aparici:2009mj}, rather than the one from the red giants.
We now investigate the neutrino magnetic moment in a model-dependent way. The
magnetic moment $|\mu_{1i}|$ induced from the model in Eq.~(\ref{ASW}) is
calculated as
\begin{eqnarray}
|\mu_{1i}|&=& \frac{g'f(r)}{2(4\pi)^2M_E}\sum_{J=2,3}
\sum_{\alpha=e,\mu,\tau}
\mbox{Im}[h_1^\ast h_J\theta_{J\alpha}U_{\alpha i}],
\label{mu-dep} \\
f(r) &\equiv& \frac{1}{1-r}+\frac{r}{(1-r)^2}\log(r), \label{fr}
\end{eqnarray}
for the case of $M_1\ll M_E$ and $M_\omega$ with $r\equiv M_\omega^2/M_E^2$.
Here, the active neutrino as the final state is converted from the internal
sterile state $N_J\simeq\nu_{RJ}$ $(J=2,3)$ through the corresponding
left-right mixing $\theta_{J\alpha}$. Since the Majorana neutrinos can only
have the transition magnetic moments, the sum of $J$ is performed for $J=2$ and
$3$. The external momenta and masses can be neglected as
in~\cite{Aparici:2009mj}.
Two of three sterile neutrinos can generically play a role to realize the
active neutrino mass scales through the seesaw mechanism in the keV sterile
neutrino DM model, e.g.~\cite{Asaka:2005an}. Therefore, the left-right mixing
angle for the corresponding generations can be described by the typical active
neutrino mass scale $m_\nu$ and two heavier sterile neutrino mass scales
$M_{2,3}$, given by $\theta_{J\alpha}=\sqrt{m_\nu/M_{2,3}}$. On the
other hand, since the Yukawa coupling of the lightest sterile neutrino to the
left-handed lepton doublet and SM Higgs should be tiny, the sterile neutrino DM
with the keV mass is not responsible for the active neutrino mass scales. Because
of this smallness of the Yukawa coupling, the keV sterile neutrino cannot be
in the equilibrium even at a high temperature. This feature is crucial for
the various production mechanisms of the keV sterile neutrino DM with the correct
abundance~\cite{Kusenko:2010ik,hep-ph/9303287,astro-ph/9810076,0609081}.
We now explicitly examine
a specific and economical model~\cite{Asaka:2005an,Asaka:2005pn}
with right-handed neutrinos and new charged particles as an
example. In this model, one of heavier sterile neutrinos is in the
thermal equilibrium before the sphaleron process becomes
inactive~\cite{akhmedov}. When the Yukawa coupling of
the remaining heavier sterile neutrino is naively estimated as
$(y_{\nu2})^2\sim\sqrt{\Delta m_{\rm sol}}M_2/v^2\sim\mathcal{O}(10^{-15})$,
the sterile neutrino is out of equilibrium at the time without the sphaleron process.
The $2\leftrightarrow2$ interactions among the right-handed neutrinos and new charged
particles, such as the scalar exchange $\nu_RE\leftrightarrow\nu_RE$ interaction,
are important for the condition of the non-equilibrium of DM. The rates of those new interactions are
described by the new Yukawa couplings $h_I$ given in Eq.~(\ref{LK}), where $I$
denotes the generation of the right-handed neutrinos.
Note that these Yukawa couplings do not affect the active neutrino masses. When
$|h_I|^2\lesssim\mathcal{O}(10^{-14})$, the corresponding sterile neutrino is
out-of-equiribrium at the time when the spharelon process becomes ineffective.
Under these discussions, we impose $\Gamma_{N_1\rightarrow\nu_i\gamma}^{\rm
mag}\lesssim10^{-28}\mbox{ s}^{-1}$ on Eq.~(\ref{new}) with Eqs.~(\ref{mu-dep})
and (\ref{fr}). Then, we obtain a constraint on the model parameter as
\begin{eqnarray}
M_E\geq 24.3\mbox{ MeV}\,,
\label{EqConstraint}
\end{eqnarray}
where we have taken that $g'=0.35$, $f(r)=1/2$,
$\theta_{J\alpha}=\sqrt{m_\nu/M_{2,3}}$, $m_\nu=0.01$ eV, $M_{2,3}=10$ GeV, and
Im$[h_1h_3U_{\alpha i}]=5\times10^{-9}$. Note that these values can satisfy the above
conditions in the keV sterile neutrino DM model realizing the BAU via the
oscillation of the heavier sterile neutrino with a mass spectrum of
$(M_1,M_2,M_3)=(\mbox{keV},\mathcal{O}(1-10)\mbox{ GeV},\mathcal{O}(1-10)\mbox{
GeV})$. Note also that $f(r)\rightarrow1/2$ if $M_\omega/M_E\rightarrow1$.
It is clear the constraint in Eq.~(\ref{EqConstraint}) is much weaker than that from high energy experiments
in the presence of the new charged particles.
In other models of the BAU, the constraint on the model parameters becomes
weaker because of the largeness of the heavier sterile neutrino masses.
\section{Summary}
We have investigated the magnetic dipole moments in the keV sterile neutrino DM
model. In this DM model, the lightest sterile neutrino with the keV scale mass
is a decaying DM candidate with its lifetime greater than the age of the
Universe. Since the width of the radiative DM decay into a photon and an active
neutrino is constrained by X-ray observations, we have obtained a
model-independent constraint on the magnetic interactions, leading to
$|\mu_{1i}|\lesssim3.89\times10^{-16}\mu_B$ for $M_1=5$ keV, which is stronger
than the bound from the consideration of the plasmon decay in the red giants.
We have also studied the magnetic dipole moment in a model-dependent way.
Explicitly,
the same condition from the X-ray observations gives a constraint of $M_E\gtrsim 24.3\mbox{ MeV}$ in the model of baryogenesis
from the heavier right-handed neutrino oscillation.
\subsection*{Acknowledgement}
This work was supported in part by the National Science Council of
Taiwan under Grant No. NSC-98-2112-M-007-008-MY3 and National Center for
Theoretical Sciences, Taiwan.
|
1,314,259,995,042 | arxiv | \section*{Abstract}
\small{
This note proposes rapidly convergent computational formulae for evaluating scattering kernels from radiative transfer theory. The approach used here does not rely on Legendre expansions, but rather uses exponentially convergent numerical integration rules. A closed form for the \textsc{Henyey-Greenstein}
scattering kernel in terms of complete elliptic integrals is also derived.
}
\medskip
\noindent
\small{\textbf{Keywords.} Plane-parallel scattering, scattering kernel, phase function, trapezoidal rule, complete elliptic integral.}
\section{Introduction and Background}
In theories of radiative transfer and of neutron transport, the interaction between radiation and a scattering medium is described by a \emph{phase function} $p:[-1,1] \to \mathbb{R}^+$ with the property $\int_{-1}^1 p(t) dt = 1$. Let $S^2$ denote the unit sphere in $\mathbb{R}^3$. In a single scattering event, a photon or neutron that arrives from a given direction $\Theta \in S^2$ is scattered into the direction
$\Theta' \in S^2$ according to the probability density $\Theta' \mapsto \frac{1}{4\pi} p(\Theta \cdot \Theta')$. This probability density then appears as an integral kernel in an integro-differential equation that describes multiple scattering. In the special case of plane-parallel scattering, one rewrites this equation in spherical coordinates and expands it as a Fourier series in the azimuthal angle $\phi$.
This requires the evaluation of
\begin{equation}
c_m \int_0^{2 \pi} p(\cos \theta \cos \mu + \sin \theta \sin \mu\cdot \cos \phi') \cos m \phi' \, d \phi'
\label{eq_average}
\end{equation}
for $0 \le \theta, \, \mu \le \pi, \, m = 0, \, 1, \, 2, \dots$, where $c_m = \frac{1}{2\pi}$ for $m > 0$ and $c_0 = \frac{1}{\pi}$.
Using the notation $x = \cos \theta, \, y = \cos \mu$, one therefore arrives at the problem of evaluating the \emph{scattering kernels} $P_m(x,y)$, defined for $-1 \le x, \, y \le 1$ and for $m = 0, \, 1, \dots$ as
\begin{equation}
c_m \int_0^{2 \pi}
p(xy + \sqrt{1-x^2}\sqrt{1-y^2}\cos s) \cos m s \, d s
\label{eq_matrix}
\end{equation}
Let $L_n$ denote the n-th Legendre polynomial. From spherical harmonics one obtains that the Legendre expansion of the phase function $p$ directly
leads to expansions for the $P_m$. If $p(x) = \sum_{n = 0}^\infty \alpha_n L_n(x)$ for all $x \in [-1,1]$, then in particular
\begin{equation}
P_0(x,y) = \sum_{n = 0}^\infty \alpha_n L_n(x)L_n(y)
\label{eq_LegendreExp}
\end{equation}
for all $-1 \le x, y \le 1$. The higher order terms $P_m, \, m > 0$ can be expanded into associated Legendre functions, using again the Legendre coefficients of $p$. The classical reference \cite{chandrasekhar1960} contains a mathematical presentation of the theory of radiative transfer. A modern account with more physical details may be found in \cite{liou2002}.
While the evaluation of eq.~(\ref{eq_LegendreExp}) and its higher order versions is in theory straightforward, there are several practical difficulties. Firstly, the formula requires knowledge of the Legendre expansion of $p$. While the methods to find $p$ for a particular scattering medium indeed produce Legendre expansions (see \cite{wiscombe1980}), it may be desirable to have more compact representations of a scattering function, in which case the Legendre expansion is not readily available and not easy to compute (there is no ``fast Legendre transform"). Secondly, for cases of strongly forward-peaked scattering, several hundred terms of the Legendre expansion may be needed to evaluate each $P_m$ even to modest accuracy. This requires care when evaluating the Legendre polynomials in eq.~(\ref{eq_LegendreExp}), and it is computationally expensive in any case.
In this note, the direct numerical integration scheme known as trapezoidal rule is proposed to evaluate $P_m$ from eq.~(\ref{eq_matrix}), as an alternative to the Legendre expansion in eq.~(\ref{eq_LegendreExp}). The method is known (\cite{Trefethen2014}) to converge exponentially if the integrand is analytic. This highly desirable property is exploited systematically in this note.
The relation between the convergence rate and the location of the singularities (points or regions of non-analyticity) of the phase function is explained. The second contribution of this note is the derivation of a closed form of the scattering kernel $P_0$, in the special case of the Henyey-Greenstein phase function (\cite{henyey1941}). It expresses $P_0$ in terms of a complete elliptic integral and can be evaluated very rapidly without any expansions or numerical integration, even for cases of extremely forward-peaked scattering. Numerical examples are given to demonstrate the approach.
\subsection{Henyey-Greenstein Scattering Function}
The Henyey-Greenstein phase function (\cite{henyey1941}) \footnote{It appears that Chandrasekhar was not aware of this work when he wrote his classical treatise
\cite{chandrasekhar1960} in 1950.} was proposed to describe interstellar scattering and is given by the formula
\begin{equation}
p_{HG}(x,g) = \frac{1}{2} \frac{1-g^2}{(1 + g^2 - 2g x)^{3/2}}
= \sum_{\ell = 0}^\infty \frac{2 \ell + 1}{2}g^\ell L_\ell(x)
\label{HG}
\end{equation}
where $-1 < g < 1$ is known as the asymmetry factor. This phase function has since been used in areas as diverse as scattering in cloudy and hazy atmospheres (\cite{hansen1969}), light scattering in seawater (\cite{haltrin2002}) and in tissue (\cite{niemz2007}), and even in computer graphics. Then eq.~(\ref{eq_matrix}) together with eq.~(\ref{eq_LegendreExp}) lead to the problem of evaluating
\begin{equation}
H(x,y;g) = \sum_{\ell = 0}^\infty \frac{2 \ell + 1}{2}g^\ell L_\ell(x) L_\ell(y)
\label{HG0}
\end{equation}
for $-1 \le x,y \le 1$. The series may be evaluated by using the first $N$ terms.
Since $|L_\ell(x) L_\ell(y)| \sim \frac{2}{2 \ell + 1}$ with indeterminate sign, the direct evaluation of the series leads to problems when $g$ is close to 1, because then the series converges very slowly. This is illustrated in fig.~\ref{fig-HG1a}.
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG1a.png}}
\caption{Henyey-Greenstein scattering kernel $H(x,y_0;g)$ for $g = 0.95, \, y_0 = 0.4, \, -1 \le x \le 1$. Black: exact evaluation using eq.~(\ref{eq_HG_exact}). Blue: Eq.~(\ref{HG0}) with $N = 40$ terms. Red: Eq.~(\ref{eq_trapezoidal}) with $N = 40$ terms.}
\label{fig-HG1a}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG1b.png}}
\caption{Log Errors for evaluating Henyey-Greenstein scattering kernel in fig.~\ref{fig-HG1a}. Blue: Eq.~(\ref{HG0}) with $N = 40$ terms. Black, red, green: Eq.~(\ref{eq_trapezoidal}) with $N = 40, \, 80, \, 160$ terms. Thick: Computed errors. Thin: Error estimates from eq.~(\ref{eq_traperror}).}
\label{fig-HG1b}
\end{center}
\end{figure}
\section{An Exact Formula}
We start with the product formula (see 18.17.6 in \cite{nist2010}) for Legendre polynomials
\begin{eqnarray*}
L_n(\cos \theta) L_n(\cos \mu) &=& \frac{1}{\pi} \int_0^\pi L_n (\cos \theta \cos \mu \\
& \; & + \sin \theta \sin \mu \cos s ) ds \, .
\end{eqnarray*}
Let
\begin{equation}
H_0(x,y;g) = \sum_{\ell = 0}^\infty g^\ell L_\ell(x) L_\ell(y) \, .
\label{HG00}
\end{equation}
Using the generating function for Legendre polynomials (see 18.12.11 in \cite{nist2010}), we therefore obtain
\begin{eqnarray}
& \,& H_0(\cos \theta, \cos \mu; g) \\
&=& \frac{1}{\pi} \int_0^\pi \sum_{k = 0}^\infty g^n L_n\left(\cos \theta \cos \mu + \sin \theta \sin \mu \cos s \right) \, ds\\
&=& \frac{1}{\pi} \int_0^\pi \frac{ds}{\sqrt{1 - 2g\left(\cos \theta \cos \mu + \sin \theta \sin \mu \cos s \right) + g^2}}
\, .
\end{eqnarray}
By eq.~(\ref{eq_K1}) with
\begin{eqnarray*}
\alpha &=& 1 + g^2 - 2g\cos \theta \cos \mu \\
\beta &=& 2 g \sin \theta \sin \mu \\
\alpha + \beta &=& 1 + g^2 - 2g\cos(\theta + \mu)
\end{eqnarray*}
this implies
\begin{equation}
H_0(\cos \theta, \cos \mu;g) =
\frac{2}{\pi\sqrt{\alpha + \beta}} K_0\left(\frac{2 \beta}{\alpha + \beta} \right)
\label{eq_HSum1}
\end{equation}
where $K_0$ is the complete elliptic integral of the first kind defined in eq.~(\ref{eq_K0}).
Setting
\begin{equation}
u_\pm = 1 + g^2 - 2g\cos(\theta \pm \mu)
\label{eq-upm}
\end{equation}
this may also be written as
\[
H_0(\cos \theta, \cos \mu;g)
= \frac{2}{\pi\sqrt{u_+}} K_0\left(\frac{u_+ - u_-}{u_+} \right) \, .
\]
This formula is closely related to formula (5.10.2.1)\footnote{This was pointed out to me by an anonymous contributor on \texttt{math.stackexchange.com}.} in \cite{prudnikov1986} which has the form
\begin{equation}
H_0(\cos \theta, \cos \mu;g) = \frac{4}{\pi(\sqrt{u_+} + \sqrt{u_-})} K\left(\frac{\sqrt{u_+} - \sqrt{u_-}}{\sqrt{u_+} + \sqrt{u_-}}\right)\, .
\label{eq_HSum2}
\end{equation}
From eq.~(\ref{eq_HSum1}) and eq.~(\ref{eq-dK}) we obtain finally
\begin{eqnarray}
&\,& H(\cos \theta, \cos \mu ; g)\\
&=& \left(g \frac{d}{dg} + \frac{1}{2} \right)
\frac{2}{\pi\sqrt{u_+}} K_0\left(\frac{u_+ - u_-}{u_+} \right)\\
&=& \frac{(1-g^2)}{\pi
u_- \sqrt{u_+}} E_0\left(\frac{u_+ - u_-}{u_+} \right)
\label{eq_HG_exact}
\end{eqnarray}
where $E_0$ is the complete elliptic integral of the second kind defined in eq.~(\ref{eq_E0}). In terms of $x, \, y$, this becomes
\begin{equation}
H(x,y;g) = \frac{(1-g^2)}{\pi
w_- \sqrt{w_+}} E_0\left(\frac{4g\sqrt{1-x^2}\sqrt{1-y^2}}{w_+} \right)
\label{eq_HG_exactxy}
\end{equation}
with $w_\pm = 1 + g^2 - 2g \left(xy \mp \sqrt{1-x^2}\sqrt{1-y^2}\right)$. The formula may be used to evaluate $H(\cdot, \cdot; g)$ reliably even if $g$ is extremely close to 1. If $g = 1 - \varepsilon$ and $\varepsilon_0 \approx 10^{-16}$ is ``machine epsilon" in IEEE arithmetic, then $H(x,y;g)$ can be evaluated to relative accuracy $\varepsilon_0/\varepsilon$.
\section{Fast Evaluation}
We now turn to the general case. For a given phase function $p$ and given $x, \, y \in [-1,1], \, m \in \{0, \, 1, 2, \dots\} $, we need to evaluate the integral given by eq.~(\ref{eq_matrix}). Let
\begin{eqnarray}
h_m(z) &=& p(A + B \cos z)\cos mz \\
A &=& xy, \, B = \sqrt{1 - x^2}\sqrt{1 - y^2} \, .
\label{eq_def_h}
\end{eqnarray}
Note that $|A \pm B| \le 1$, with equality in one of the cases where $x = \pm y$.
The function $h_m$ is $2 \pi$-periodic on $\mathbb{R}$. It is known (\cite{Trefethen2014}) that if a function $f$ is periodic and analytic in a strip about the real axis, then the trapezoidal rule for approximating the integral $\int_0^{2\pi} f(t) dt$ converges exponentially fast. More precisely, assume that $f$ is $2\pi$-periodic and analytic in the strip $\mathcal{S}_\alpha = \{z \in \mathbb{C} \, | \, |\Im z | < \alpha\}$ and satisfies $|f(z)| \le M$ for some constant $M \ge 0$ there.
Choose a positive integer $N$ and set
\begin{equation}
I_N = \frac{2 \pi}{N} \sum_{k = 1}^N f\left( \frac{2 \pi k}{N} \right)\, ,
\label{eq_trapezoidal}
\end{equation}
then
\begin{equation}
\big| \int_0^{2 \pi} f(t) dt - I_N \big|
\le \frac{4 \pi M}{e^{\alpha N} -1} = e^{-\alpha N} \frac{4 \pi M}{1 - e^{-\alpha N}} \, .
\label{eq_traperror}
\end{equation}
The sum is just an approximation of the integral with the composite trapezoidal rule. The rate of convergence thus is much faster than for ordinary smooth (twice differentiable) functions, for which the error estimate has the form
\begin{equation}
\big| \int_0^{2 \pi} f(t) dt - I_N \big|
= \frac{2 \pi^3 |f´´(\xi)| }{3 N^2}
\label{eq_traperrorC2}
\end{equation}
for some $\xi \in (0, 2 \pi)$.
To use this result in the computation of eq.~(\ref{eq_matrix}), note first that for any $\alpha \in \mathbb{R}$, the function that takes $u + i \cdot v = z$ to
$$
A + B \cos z = A + B
\cos u \cosh v + B \sin u \sinh v \cdot i
$$
maps the strip $\mathcal{S}_\alpha$ to an ellipse about $[A-B,A+B]$ in the complex plane which has focal points $A \pm B$ and major and minor axes with lengths $B \cosh \alpha$ and $B \sinh \alpha$.
Therefore, if the phase function $p$ is analytic in a neighborhood surrounding the set $[-1,1] \subset \mathbb{C}$,
then $h_m$ is analytic in a suitable strip $\mathcal{S}_\alpha$ with $\alpha > 0$. The domain of analyticity of $p$ is automatically symmetric with respect to the real axis. It should be emphasized that it is of course not necessary to determine this domain in order to use eq.~(\ref{eq_trapezoidal}).
For an illustration, refer to fig.~\ref{fig-HG2a}. The plot shows the domain of an assumed phase function that is originally defined on the interval $[-1,1]$ (thin horizontal black line) and that can be extended into the complex plane (everywhere except at singularities shown as colored lines and circles). For a particular choice of $x,\, y$, the integral in eq.~(\ref{eq_matrix}) extends over $[A - B, A+B] \subset [-1,1]$ (black circles). A suitable strip $\mathcal{S}_\alpha$ is mapped to the ellipse surrounding this set where the phase function is analytic. Consequently, the trapezoidal approximation converges exponentially with a rate given by eq.~(\ref{eq_traperror}). The rate of convergence depends on $\alpha$ which in turn comes from the location of $[A-B,A+B]$ relative to the set of singularities of the phase function.
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG2a.png}}
\caption{Integration domain for a phase function. }
\label{fig-HG2a}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG2b.png}}
\caption{Multimodal phase function given by
eq.~(\ref{eq_multimodal}).}
\label{fig-HG2b}
\end{center}
\end{figure}
\begin{example} \textbf{Henyey-Greenstein Phase Function}. Consider a phase function that is analytic in $\mathbb{C}$ minus the ray $[\frac{1 + g^2}{2g}, \infty)$. Then the integrand in eq.~(\ref{eq_matrix}) is analytic in the strip defined by
$$
\cosh \Im z < \left| \frac{(1+g^2)/2g - A}{B}
\right| = \left| \frac{(1+g^2)/2g - xy}{\sqrt{1-x^2}\sqrt{1-y^2}}
\right| \, .
$$
\end{example}
This case is illustrated by the magenta line in fig.~\ref{fig-HG2a}. Similar comments apply to the case where $g < 0$ (red line). The Henyey-Greenstein phase function defined in eq.~(\ref{HG}) is of this form. The error behavior of the trapezoidal rule~(\ref{eq_trapezoidal}) is illustrated in fig.~\ref{fig-HG1b} which shows the log-errors (thick lines) when this rule is used to approximate the integral in eq.~(\ref{eq_matrix}) for $x \in [-1,1]$ and for fixed $y = y_0, \, g$, for three different choices of $N$. The plot shows that errors are maximal for $x \approx y_0$ and that the errors have the slowest decrease near this value as $N$ increases. Also plotted (thin lines, same colors) are error estimates from eq.~(\ref{eq_traperror}), using $M = 1$ for simplicity. It is seen that the actual errors track the estimate very closely.
\begin{example} Consider a phase function that is analytic except possibly on rays
$x_0 \pm i \, \delta$ to $x_0 \pm i \, \infty$, where $x_0 \in \mathbb{R}, \, \delta > 0$.
Then the integrand in eq.~(\ref{eq_matrix}) is analytic in the strip defined by
\begin{equation}
- S < \sinh \Im z < S
\end{equation}
where $S$ is the unique positive solution of the equation
$$
\frac{(A - x_0)^2}{S^2 + 1} + \frac{\delta^2}{S^2} = B^2 \, .
$$
\label{ex_bump}
\end{example}
To see this, find $\alpha$ such that the ellipse parametrized by
$$
s \mapsto A + B (\cos s \cosh \alpha + i \cdot \sin s \sinh \alpha)
$$
passes through the points $x_0 \pm i \cdot \delta$, and set $S = \sinh \alpha$.
To obtain examples, fix $\delta > 0, \, x_0 \in \mathbb{R}, \gamma > 0$ and consider functions of $x \in \mathbb{R}$
\begin{eqnarray}
f_1(x;x_0,m,\gamma) &\propto& \left(1 + \left(\frac{x-x_0}{\delta} \right)^2 \right)^{- \gamma}
\label{eq_f1}
\\
f_2(x;x_0,m) &\propto& \text{sech} \, \left(\frac{x-x_0}{\delta} \right)
\label{eq_f2}
\end{eqnarray}
where the proportionality constants are chosen such that the integrals over $[-1,1]$ equal 1. Both functions have single maxima (peaks) at $x = x_0$ and the width of the peak is proportional to $\delta$. The Legendre expansions of these functions are generally not available in closed form. The function $f_1$ is analytic in the complex plane minus branch cuts from $x_0 \pm i \, \delta$ to $x_0 \pm i \, \infty$ (green lines in fig.~\ref{fig-HG2a}). The function $f_2$ is analytic in the complex plane minus poles at $z = x_0 \pm i \, \delta\left(\pi/2 + n \pi \right), \; n \in \mathbb{Z}^+$ (blue circles in fig.~\ref{fig-HG2a}). Therefore if $p = f_1$ or $p = f_2$, then the integrand in eq.~(\ref{eq_matrix}) is analytic in any strip $\mathcal{S}_\alpha$ where $\alpha = \text{arsinh} \, S$ and $S$ is as in example~\ref{ex_bump}.
The reader may note that the integrand in eq.~(\ref{eq_matrix}) contains factors $\cos m z$. These terms grow like $e^{ m |\Im z|}$ away from the real axis and their second derivatives contain the factor $m^2$. When the integrand is analytic and eq.~(\ref{eq_traperror}) can be used, the error is therefore proportional to
$e^{-\alpha N} e^{\alpha m} M = e^{-\alpha (N-m)}M$. Thus the additional factor $\cos m z$ has the same effect as using $N-m$ points instead of $N$ points for the evaluation of eq.~(\ref{eq_trapezoidal}), resulting in a modest loss of accuracy. On the other hand, if the integrand is merely twice differentiable, the error from eq.~(\ref{eq_traperrorC2}) becomes proportional to $m^2/N^2$. Thus the additional factor $\cos m z$ now has the same effect as using only $N/m$ points instead of $N$ points, leading to a much larger loss of accuracy. This illustrates the powerful effect of having an analytic integrand.
\subsection{Multimodal Phase Functions}
In practice, phase functions are obtained from scattering calculations using Mie theory, see e.g. \cite{wiscombe1980}. Such phase functions may have multiple local extrema. An artificial example (not obtained from Mie theory) is given in fig.~ \ref{fig-HG2b}. It uses the function
\begin{eqnarray}
p(x) &=& 0.8 p_{HG}(x;.9) + 0.1p_{HG}(x;-.6)\\
&\,& +
0.04f_1(x; .2, .01 ,3) + 0.06f_2(x;.6 , .02) \, .
\label{eq_multimodal}
\end{eqnarray}
where $f_1$ and $f_2$ are as in eq.~(\ref{eq_f1}, \ref{eq_f2}). The integrand in eq.~(\ref{eq_matrix}) turns out to be analytic in the strip $\mathcal{S}_\alpha$ with $\alpha \approx 10^{-2}$.
The scattering kernels $P_0$ and $P_7$ for this phase function were computed at $200 \times 200$ points with eq.~(\ref{eq_trapezoidal}), using $N = 128$ terms in each case. A logarithmic heat map of $P_0$ is shown in fig.~\ref{fig-HG3} and a heat map of $P_7$ is shown in fig.~\ref{fig-HG4}. The calculation took about 10 seconds per scattering kernel on a laptop equipped with a dual-core processor running at 1.40 GHz. The relative accuracy of each result is about $10^{-3}$.
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG3.png}}
\caption{Logarithmic heat map of scattering kernel $P_0$ for the phase function given by eq.~(\ref{eq_multimodal}).}
\label{fig-HG3}
\end{center}
\end{figure}
\begin{figure}[ht]
\begin{center}
\resizebox{3in}{!}{\includegraphics{HG4.png}}
\caption{Heat map of scattering kernel $P_7$ for the phase function given by eq.~(\ref{eq_multimodal}).}
\label{fig-HG4}
\end{center}
\end{figure}
\section{Conclusion}
A direct numerical integration method using the trapezoidal rule has been presented for the evaluation of scattering kernels that arise in plane-parallel radiation transfer equations. Its convergence is exponential, and the relation between the convergence rate and the domain of analyticity of the phase function is explained. The note also presents a closed form of the scattering kernel for the Henyey-Greenstein phase function, in terms of complete elliptic integrals of the second kind. The closed form can be used to assess the accuracy of the proposed numerical integration scheme.
Most computational approaches to plane-parallel radiative transfer use discretizations based on truncated versions of eq.~(\ref{eq_LegendreExp}) (Nystr\"om's method). However, some problems of this form also require the evaluation of intensities from scattered beams which may be computed from scattering kernels. This is where a fast and accurate computational scheme such as the one presented here will hopefully be of use.
\section{Appendix: Complete elliptic integrals} Legendre's complete elliptic integrals $K_0$ and $E_0$ of the first and second kind are defined as
\begin{eqnarray}
K_0(m) &=& \int_0^{\pi/2} \frac{ds}{\sqrt{1 - m \sin^2 s}} \\
&=&
\frac{1}{2} \int_0^\pi \frac{ds}{\sqrt{1 - \frac{m}{2} \pm \frac{m}{2} \cos s}}
\label{eq_K0}
\\
E_0(m) &=& \int_0^{\pi/2} \sqrt{1 - m \sin^2 s} \, ds \\
&=&
\frac{1}{2} \int_0^\pi \sqrt{1 - \frac{m}{2} \pm \frac{m}{2} \cos s} \, ds
\, .
\label{eq_E0}
\end{eqnarray}
where $m \in \mathbb{C}$ is known as the \emph{modulus}. Note that usually
these integrals are expressed in terms of the \emph{parameter} $k$ where $m = k^2$, leading to the more common notation
\begin{equation}
K(k) = K_0(k^2), \quad E(k) = E_0(k^2)
\label{eq-elliptic-parameter}
\end{equation}
For example, the treatment in \cite{nist2010} is in terms of $K, \, E$ while \textsc{Mathematica}\textsuperscript{\textregistered} uses $K_0, \, E_0$.
These integrals converge for $k \in \mathbb{C}$ with $\Re m < 1$ and the functions can be continued analytically to $\mathbb{C}$ minus a branch cut along $[1,\infty)$.
It is known (see eq.~(19.4.1) in \cite{nist2010}) that
\begin{equation}
2m\frac{d}{dm} K_0(m) = \frac{E_0(m)}{1-m} - K_0(m) \, .
\label{eq-dK}
\end{equation}
Therefore, for $\alpha,\, \beta \in \mathbb{C}$, we can set $m = \frac{2\beta}{\alpha + \beta}$ and obtain in the case when $\Re \frac{2\beta}{\alpha + \beta} < 1$
\begin{eqnarray}
\int_0^\pi \frac{ds}{\sqrt{\alpha - \beta \cos s}} &=& \frac{2}{\sqrt{\alpha + \beta}} K_0\left(\frac{2\beta}{\alpha + \beta} \right)
\label{eq_K1}
\\
\int_0^\pi \sqrt{\alpha - \beta \cos s} \, ds &=& 2\sqrt{\alpha + \beta} E_0\left(
\frac{2\beta}{\alpha + \beta} \right)
\label{eq_E1}
\end{eqnarray}
where the principal branch of the square root is used.
Given real $0 < m < 1$, then $E_0(m)$ and $K_0(m)$ may be evaluated rapidly using the iterations
\begin{eqnarray}
a_0 &=& 1, \quad g_0 = \sqrt{1 - m}, \quad c_0 = \sqrt{m}\\
a_{n+1} &=& \frac{a_n + g_n}{2} \quad g_{n+1} = \sqrt{a_n g_n} \\
c_{n+1} &=& \frac{c_n^2}{4a_{n+1}}
\end{eqnarray}
for $n = 0, \, 1, \, 2, \dots$.
Then
\[\lim_{n \to \infty} a_n = \lim_{n \to \infty} g_n = M
\]
where $M = AGM(1,g_0)$ is known as Gauss's arithmetic-geometric mean; see \cite{nist2010}. The convergence is quadratic. Moreover, $\lim_{n \to \infty} c_n = 0$, and the convergence is also quadratic. One then obtains the values of $K_0$ and $E_0$ from
\begin{equation}
K_0(m) = \frac{2}{\pi M}, \quad E_0(m) = \frac{2}{\pi M} \left(1 - \sum_{n = 0}^\infty 2^{n-1}c_n^2 \right)\, .
\label{eq-fastelliptic}
\end{equation}
Due to the rapid convergence, only a few terms need to be evaluated. An alternative fast computation method for $E_0(M)$ is given in \cite{adlaj2012}.
\medskip
\textbf{Acknowledgement} This research was supported by the Cooperative Institute for Research in the Atmosphere (CIRA) at Colorado State University. Part of this work was carried out at the Joint Center for Satellite Data Assimilation (JCSDA) at NCWCP, College Park, MD.
\bibliographystyle{plain}
|
1,314,259,995,043 | arxiv | \section{Introduction}
\label{se1}
It is usually very fruitful to explore the connection between classical and quantum mechanics but this connection is almost always done by going from classical mechanics toward quantum mechanics. Here, we go the other way round and exhibit the classical analogue of the transition between the Weisskopf-Wigner exponential decay \cite{WW1930a,WW1930b} and the Rabi oscillation \cite{RabiPR37}: these two quantum mechanics regimes are very well known but it is only in 1977 that the existence of a continuous transition between them was exhibited by C. Cohen-Tannoudji and P. Avan \cite{Cohentannoudji77}.
This transition is described in detail in the book ``Atom-photon interactions'', by C. Cohen-Tannoudji, J. Dupont-Roc and G. Grynberg \cite{Cohentannoudji88}. At first sight, these two regimes seem to be very different because the Weisskopf-Wigner exponential decay appears when a discrete state is coupled to a continuum while the Rabi oscillation occurs when two discrete states are resonantly coupled. However, in the presence of the radiation continuum, any atomic system has only one discrete state, its ground state (i.e. the atomic ground state and the radiation vacuum state), and all the excited atomic states are narrow continua with a width equal to their radiative natural width. As a consequence, the Rabi oscillation between the atomic ground and an excited state, treated as a discrete state, is an approximation of the real situation because the excited state is in fact a narrow continuum: this approximation is excellent for times shorter than the excited state lifetime and Rabi oscillation is observed if the Rabi period is shorter than the excited state lifetime. The Weisskopf-Wigner exponential decay is an excellent approximation in the opposite limit, when the Rabi period is larger than the excited state lifetime.
In classical mechanics textbooks, the dynamics of two coupled mechanical oscillators is usually discussed without damping terms. In this case, the frequencies of the coupled oscillators present an avoided crossing, which is fully analogous to the avoided crossings of the eigenvalues of a Hamiltonian \cite{Landau65} and the dynamics, with a periodic exchange of energy between the two oscillators, is fully analogous to the Rabi oscillation. However, in the absence of damping terms, it is not possible to observe the classical analogue of the Weisskopf-Wigner exponential decay. Some textbooks on mechanical vibrations take into account damping in their treatment of coupled oscillators: this is the case of the book ``Mechanical vibrations'', by J.P. Den Hartog \cite{DenHartog56} which studies the damping of an oscillator by coupling to another oscillator. This device, patented by H. Frahm \cite{Frahm11} in 1911, is now known as a ``tuned mass damper'' and it has many applications. However, this book does not consider the general case discussed here.
In the present paper, we study theoretically two coupled oscillators with damping and we observe two limiting cases:
i) if the coupling effect is dominant in a sense explained in section \ref{se4}, the mixing of the two oscillators is not substantially modified by the presence of damping. In particular, this mixing induces an averaging of the damping rates which are equal in the case of exact resonance. In this case, the dynamics remains an analogue of the Rabi oscillation;
ii) if the damping effect is dominant, the mixing of the two oscillators is strongly modified by the presence of damping. In particular, if one of the two oscillators has a negligible damping rate, the situation is completely analogous to the coupling of a discrete state to a continuum: the damping rate of this oscillator, which is only due to its mixing with the other oscillator, decreases if the damping rate of the other oscillator increases. A similar result is observed in the Weisskopf-Wigner model, with the decay rate decreasing when the continuum width increases. The resonant frequency of this oscillator also presents variations, which are fully analogous to the frequency shift of a discrete level due to its coupling to a continuum.
The content of the present paper is organized as follows: section \ref{se2} recalls the coupling of two mechanical oscillators without damping; the same problem with damping is discussed in section \ref{se3}; section \ref{se4} presents some concluding remarks. Two appendices recall Newton's equations of two coupled mechanical oscillators (section \ref{se5}) and general properties concerning the damping of mechanical oscillators (section \ref{se6}).
\section{Coupled mechanical oscillators without damping}
\label{se2}
\subsection{Calculation of the oscillation frequencies}
Newton's equations of two coupled pendulums or a double pendulum (see Appendix A) take the same form
\begin{eqnarray}\label{s0}
\frac{d^{2}x_1}{dt^{2}} &=& -\omega_1^{2} x_1 +\omega_{12}^{2} x_2, \nonumber \\
\frac{d^{2}x_2}{dt^{2}} &=& +\omega_{21}^{2}x_1-\omega_2^{2} x_2,
\end{eqnarray}
\noindent where $x_1$ and $x_2$ measure the distance to equilibrium (we assume that $\omega_1$, $\omega_2$, $\omega_{12}$ and $\omega_{21}$ are positive). Using complex notations, we search a solution of the form $x_j(t)= a_j \exp\left( i \omega t\right)$ with $j=1,2$. The amplitudes $a_j$ are solution of an homogeneous system
\begin{equation}
\label{s1}
\left[ \begin{array}{cc}
\left( \omega^{2} -\omega_1^{2}\right) & \omega_{12}^{2} \\
\omega_{21}^{2} & \left( \omega^{2} -\omega_2^{2}\right) \\
\end{array} \right] \left[ \begin{array}{c } a_1 \\a_2 \end{array} \right]=0.
\end{equation}
\noindent This system has a non-zero solution only if the matrix determinant vanishes and we thus get the equation verified by $\omega$
\begin{eqnarray}\label{s2}
\left( \omega^{2} -\omega_1^{2}\right) \left( \omega^{2} -\omega_2^{2}\right) -\omega_{12}^{2}
\omega_{21}^{2}= 0.
\end{eqnarray}
\noindent This equation has two roots
\begin{eqnarray}\label{s3}
\omega_{\pm}^{2} = \frac{\omega_1^{2}+\omega_2^{2}}{2} \pm\sqrt{\left( \frac{\omega_1^{2}-\omega_2^{2}}{2} \right) ^2+\omega_{12}^{2} \omega_{21}^{2} }.
\end{eqnarray}
\noindent Equation (\ref{s3}) is symmetric as the two oscillators play symmetric roles. In the following, we consider that $\omega_1$ is fixed and we use it as a frequency unit. Then, eq. (\ref{s3}) becomes
\begin{eqnarray}\label{s3a}
\frac{\omega_{\pm}^{2}}{\omega_1^{2}} = \frac{1}{2}\left[\left(1+\frac{\omega_2^2}{\omega_1^{2}} \right) \pm\sqrt{\left(1-\frac{\omega_2^2}{\omega_1^{2}} \right)^2 +4\kappa^2}
\right],
\end{eqnarray}
\noindent where we have introduced a dimensionless coupling parameter $\kappa$ defined by
\begin{eqnarray}\label{s3b}
\kappa \equiv \frac{\omega_{12} \omega_{21}}{\omega_1^2}.
\end{eqnarray}
\noindent $\omega_{\pm}/\omega_1$ are plotted as a function of $\omega_2/\omega_1$ in fig. \ref{fig1} for various values of $\kappa $. The two frequencies present an avoided crossing, with a width proportional to $\kappa$. For each root $\omega_{\pm}$, the oscillation amplitudes $a_{\pm,1}$ and $a_{\pm,2}$ are given by
\begin{eqnarray}\label{s4}
\frac{ a_{\pm,1}}{a_{\pm,2} } =\frac{\omega_{12}^{2} }{\omega_1^{2}-\omega_{\pm}^{2}}.
\end{eqnarray}
\noindent At exact resonance, $\omega_2 =\omega_1$, the mixing of the oscillation amplitudes is given by $ a_{\pm,1}/a_{\pm,2} = \mp\sqrt{\omega_{12}^{2}/\omega_{21}^{2}}$, equal to $\mp 1$ in the case of two identical pendulums coupled by a spring.
\begin{figure}[h]
\begin{center}
\includegraphics[width= 8 cm]{fig1.eps}
\caption{The values of $\omega_{\pm}/\omega_1$ plotted as a function of the ratio $\omega_2/\omega_1$ exhibit an avoided crossing with a width proportional to the coupling constant $\kappa$. The curves correspond to $\kappa= 0.05 $ dashed (blue) curves $\kappa=0.1$ full (red) curves and $\kappa=0.2$ dot-dashed (green) curves.}
\label{fig1}
\end{center}
\end{figure}
\subsection{Avoided crossings in quantum mechanics}
The avoided crossing of $\omega_{\pm}$ is an analogue of the avoided crossings observed in quantum mechanics \cite{Landau65}. If a Hamiltonian depends on a parameter $\lambda$, the energies of the eigenstates present avoided crossings, when plotted as functions of $\lambda$. This is illustrated by the atomic Zeeman effect in the presence of fine (or hyperfine) structure, $\lambda$ being the magnetic field: the energies of levels with the same $m_J$ (or $m_F$) values (where $m_J$ or $m_F$ is the projection of the total angular momentum on the field axis) present avoided crossings. Crossings can be observed if the Hamiltonian presents a symmetry: the energies, which cross each other, are associated to eigenstates belonging to different symmetry classes. As the Zeeman Hamiltonian has the rotation symmetry around the field axis, the levels with different $m_J$ (or $m_F$) values belong to different symmetry classes and crossings between levels with different $m_J$ (or $m_F$) values are observed.
\subsection{Classical analogue of the Rabi oscillation}
We consider the two-oscillator system with the following initial conditions, $x_1(0)=X$, $x_2(0)=0$ with vanishing velocities $dx_1(0)/dt=dx_2(0)/dt=0$, i.e. at $t=0$, the energy $E_1$ of oscillator $1$ is maximum while the energy $E_2$ of oscillator $2$ vanishes. The evolution of $x_1(t)$ and $x_2(t)$ is exactly given by:
\begin{eqnarray}\label{sr1}
\frac{x_1(t)}{X} &=& \frac{\left(\omega_{+}^2 -\omega_{1}^2 \right) \cos\left(\omega_{-}t \right)-\left(\omega_{-}^2 -\omega_{1}^2 \right) \cos\left(\omega_{+}t \right)}{\omega_{+}^2 -\omega_{-}^2} \nonumber \\
\frac{x_2(t)}{X} &=& \frac{\left(\omega_{+}^2 -\omega_{1}^2 \right)\left(\omega_{-}^2 -\omega_{1}^2 \right)}{\omega_{12}^{2}\left( \omega_{+}^2 -\omega_{-}^2\right) }\left[ \cos\left(\omega_{+}t \right)- \cos\left(\omega_{-}t \right) \right]. \nonumber \\
\end{eqnarray}
\noindent In order to simplify the algebra and to exhibit more clearly the analogy with the Rabi oscillation, we assume that $\omega_{21}=\omega_{12}$ and we introduce the frequency detuning $\delta = \omega_{2} -\omega_{1}$, the mean of the uncoupled oscillator frequencies $\omega_m =\left( \omega_{1}+\omega_{2}\right) /2$ and the equivalent of the Rabi frequency $\Omega_1 = \omega_{12}^2/\omega_m $. If $\delta $ and $\Omega_1 $ are both small with respect to $\omega_m$, $\omega_{\pm}$ are approximately given by
\begin{eqnarray}\label{sr2}
\omega_{\pm} &\approx& \omega_m \pm \frac{1}{2} \sqrt{\delta^2+ \Omega_1^2}
\end{eqnarray}
\noindent and we can rewrite $x_1(t)$ and $x_2(t)$
\begin{eqnarray}\label{sr3}
\frac{x_1(t)}{X} &\approx & \cos\left(\omega_{m}t\right) \cos\left(\sqrt{\delta^2+ \Omega_1^2}\frac{t }{2}\right) \nonumber \\&&-\frac{\delta}{\sqrt{\delta^2+ \Omega_1^2}} \sin\left(\omega_{m}t\right) \sin\left(\sqrt{\delta^2+ \Omega_1^2}\frac{t }{2}\right) \nonumber \\
\frac{x_2(t)}{X} &\approx & \frac{\Omega_1}{\sqrt{\delta^2+ \Omega_1^2}} \sin\left(\omega_{m}t\right) \sin\left(\sqrt{\delta^2+ \Omega_1^2}\frac{t }{2}\right). \nonumber \\
\end{eqnarray}
\noindent $x_1(t)$ and $x_2(t)$ are both oscillating at the large frequency $\omega_{m}$ and their oscillation amplitude is slowly modulated at the frequency $\sqrt{\delta^2+ \Omega_1^2}$. If we consider for instance the energy $E_2$ of oscillator $2$ averaged over one period of the fast oscillation, it is given by
\begin{eqnarray}\label{sr4}
\frac{E_2}{E_1+E_2} =\frac{\Omega_1^2}{\delta^2+ \Omega_1^2} \sin^2\left(\sqrt{\delta^2+ \Omega_1^2}\frac{t }{2}\right).
\end{eqnarray}
\noindent If we consider the Rabi oscillation \cite{RabiPR37,Cohentannoudji88,Loudon83}, with all the population at $t=0$ in level $1$, the population transferred at $t$ in level $2$ is exactly given by the right-hand side of eq. (\ref{sr4}).
\section{Coupled mechanical oscillators with damping}
\label{se3}
\subsection{Coupled equations with damping}
We now add damping terms in the equations of motion of the coupled oscillators and we use anelastic damping terms (see Appendix B), because this choice simplifies the calculations. We assume that the imaginary parts of the coupling terms $\omega_{12}^{2}$ and $ \omega_{21}^{2} $ are negligible. Then, we have simply to replace $\omega_j^{2} $ by $\omega_j^{'2} = \omega_j^{2} +i \omega_j\gamma_j$ in eqs. (\ref{s0}) and we assume that the damping is weak $\gamma_j\ll \omega_j$ ($j=1,2$). Eq. (\ref{s2}) becomes
\begin{eqnarray}\label{s7}
\left( \omega^{2} -\omega_1^{'2}\right) \left( \omega^{2} -\omega_2^{'2}\right) -\omega_{12}^{2} \omega_{21}^{2}=0.
\end{eqnarray}
\noindent Thanks to the choice of anelastic damping, this equation is a second degree equation in $\omega^{2}$ whereas this equation would be a fourth degree equation in $\omega$, if we had used viscous damping terms. The solutions of eq. (\ref{s7}) are given by
\begin{eqnarray}\label{s8}
\frac{\omega_{\pm}^{2}}{\omega_1^{2}} = \frac{\omega_1^{'2}+\omega_2^{'2}}{2\omega_1^{2}} \pm\sqrt{\left(\frac{\omega_1^{'2}-\omega_2^{'2}}{2\omega_1^{2}} \right)^2 +\kappa^2}.
\end{eqnarray}
\noindent This result is similar to eqs. (\ref{s3},\ref{s3a}) but, as it involves the square root of a complex number, it less easy to visualize the variations of $\omega_{\pm}$. If the coupling term $\kappa$ is small, the interesting case is close to resonance and we will consider only this case from now on. We make an analytic study in two limiting cases and, afterwards, a numerical one in the general case.
\subsection{Analytic study of the resonance region $\omega_2 /\omega_1 \approx 1$}
The difference of the real parts of the frequencies $\omega_j^{'} $ is small and, in the square root of eq. (\ref{s8}), there are two competing dimensionless terms: the coupling term $\kappa^2 $, which is positive, and the term $\left[ \left(\omega_1^{'2}-\omega_2^{'2}\right)/\left( 2\omega_1^{2}\right)\right] ^2 $ which, exactly at resonance, is negative and equal to $-\left[ \left( \gamma_1-\gamma_2 \right)/\left( 2\omega_1\right)\right]^2$. The behavior is different, depending which term is dominant and it is natural to introduce a critical value $\kappa_{cr}$ of the coupling term defined by
\begin{eqnarray}\label{s8a}
\kappa_{cr} \equiv \frac{\left|\gamma_1-\gamma_2\right|}{2\omega_1} =\left|\frac{1}{2Q_1} -\frac{1}{2Q_2} \right|,
\end{eqnarray}
\noindent where we have introduced the quality factors of the uncoupled oscillators $Q_1=\omega_1/\gamma_1$ and $Q_2=\omega_1/\gamma_1$ (we may use $\omega_1$ for both oscillators as we study the resonance region $\omega_2 \approx\omega_1$). There are no simple analytic results when $\kappa \approx \kappa_{cr}$ and, in order to get approximate analytic results, we consider two limiting cases $\kappa \gg \mbox{ or } \ll\kappa_{cr}$.
\subsubsection{The coupling term is dominant: $\kappa \gg \kappa_{cr}$}
The mixing of the two oscillators occurs almost as in the absence of damping. The real parts of frequencies $\omega_{\pm}$ present an avoided crossing and, exactly at resonance, they are given by
\begin{eqnarray}\label{s9}
\frac{\omega_{\pm}^{2}}{\omega_1^{2}} \approx \frac{\omega_1^{'2}+\omega_2^{'2}}{2\omega_1^{2}} \pm\kappa.
\end{eqnarray}
\noindent This approximate form is valid as long as $\left( \omega_1^{'2}-\omega_2^{'2} \right)^2/\omega_1^{2}\ll 4\kappa^2$: this is verified near the resonance center but not in the wings where the difference $(\omega_1-\omega_2)$ becomes large. When eq. (\ref{s9}) is valid, the two resonance frequencies have the same imaginary part $\approx \left( \gamma_1+\gamma_2\right) /2$. This result can be understood by reference to the case without damping: in the resonance center, the two oscillators are completely mixed.
\subsubsection{The damping is dominant $\kappa \ll \kappa_{cr}$}
The behavior is completely different: the real parts of $\omega_{\pm}$ do not present an avoided crossing when the ratio $\omega_2/\omega_1$ varies: $\omega_{+}\rightarrow \omega_1$ (and $\omega_{-} \rightarrow \omega_2$) when the ratio $\omega_2/\omega_1 $ is sufficiently smaller or larger than $1$. We expand the square root appearing in eq. (\ref{s8}) at first order in $\kappa^2$:
\begin{eqnarray}\label{s10}
\frac{\omega_{\pm}^{2}}{\omega_1^{2}} \approx \frac{\omega_1^{'2}+\omega_2^{'2}}{2\omega_1^{2}} \pm \left[\frac{ \omega_1^{'2}-\omega_2^{'2}}{2\omega_1^{2}} + \kappa^2 \frac{\omega_1^{2}}{\omega_1^{'2}-\omega_2^{'2}} \right],
\end{eqnarray}
\noindent from which we deduce $\omega_{\pm}$
\begin{eqnarray}\label{s11}
\omega_{+} &\approx & \omega_1 + i \frac{\gamma_1}{2} + \frac{\kappa^2}{4} \times\frac{\omega_1^2}{\omega_1-\omega_2 +i \left( \gamma_1-\gamma_2 \right) /2}\nonumber \\
\omega_{-} &\approx & \omega_2+ i \frac{\gamma_2}{2} - \frac{\kappa^2}{4} \times\frac{\omega_1^2}{\omega_1-\omega_2 +i \left( \gamma_1-\gamma_2 \right) /2}.
\end{eqnarray}
\noindent We have simplified the results by replacing, for instance, $\left( \omega_1^{2}-\omega_2^{2}\right)$ by $2\omega_1 \left(\omega_1-\omega_2\right)$, which is a good approximation because $\omega_2 \approx\omega_1$, and by omitting terms in $\kappa^2\gamma_1$ or $\kappa^2\gamma_2$ which are negligible in the weak damping limit. When the frequency ratio $\omega_2/\omega_1$ varies around $1$, the real and imaginary parts of $\omega_{\pm}$ present resonant variations with Lorentzian line shapes: a dispersion (respectively absorption) lineshape for the real (respectively imaginary) parts of $\omega_{\pm}$. The full width at half maximum of these resonant variations is the same, equal to $\left( \gamma_1-\gamma_2 \right)$. The damping rate is given by the imaginary parts of $\omega_{\pm}$ and eqs. (\ref{s11}) prove that some damping is transferred from the more damped oscillator to the less damped one.
If we consider the case of exact resonance, $\omega_2 = \omega_1$, eq. (\ref{s10}) can be simplified and the quality factors $Q_{\pm}$ of the coupled oscillators are given by
\begin{eqnarray}\label{s11a}
\frac{1}{Q_{\pm}}&\equiv & \frac{2{\mathcal{I}}m\left( \omega_{\pm}\right)}{\omega_1}
\approx \frac{1}{2}\left(\frac{1}{Q_1} + \frac{1}{Q_2}\right) \nonumber \\
&& \pm \frac{1}{2}\left(\frac{1}{Q_1} -\frac{1}{Q_2}\right) \sqrt{1 -\frac{\kappa^2}{\kappa_{cr}^2} },
\end{eqnarray}
\noindent at first order in $1/Q_1 $ and $1/Q_2$.
From now on, we simplify the discussion by assuming that $\gamma_2=0$ (i.e. $Q_2 \longrightarrow \infty$): only oscillator $1$ is damped and the damping of oscillator $2$ is solely due to its coupling to oscillator $1$. Then eq. (\ref{s11a}) can be simplified and the quality factor $Q_{-}$ is given by
\begin{eqnarray}\label{s11b}
Q_{-} \approx \frac{1}{\kappa^2 Q_1}.
\end{eqnarray}
\noindent At first sight, this result is very surprising: the damping of oscillator $2$ induced by coupling to oscillator $1$ decreases when the damping of oscillator $1$ increases. We may nevertheless understand why it is so. The coupling term induces a mixing of the two oscillators which, in the absence of damping, is maximum at exact degeneracy, $\omega_2 = \omega_1$. In the presence of damping of oscillator $1$, the resonance frequency $\omega_1^{'}$ is complex and the difference $\left(\omega_1^{'}-\omega_2^{'}\right)$ never vanishes; its minimum modulus is $\gamma_1/2$ and, as the coupling is weak, this distance to degeneracy is sufficient to prevent a strong mixing of the two oscillators. Moreover, when $\gamma_1$ increases, the mixing decreases and, as a consequence, the damping induced on oscillator $2$.
The resonant variations of the real parts of $\omega_{\pm}$ are also very interesting. In particular, the frequency displacement of the resonance of oscillator $2$ is equal to the real part of $\Delta\omega_{-}\equiv \left( \omega_{-} - \omega_{2}\right)$. It is larger than its damping rate when $\left|\omega_1-\omega_2\right| > \gamma_1 /2$ and it should be easy to detect this displacement.
\subsection{Numerical study of the resonance region $\omega_2 /\omega_1 \approx 1$}
We now complement these analytic results by a numerical study. The frequencies and damping rates are referred to $\omega_1$ taken as the frequency unit. We assume that, in the absence of coupling, oscillator $2$ is not damped i.e. $\gamma_2=0$: this choice reduces the number of parameters and enhances the visibility of the damping induced by coupling to oscillator $1$.
If $\kappa \leqslant \kappa_{cr}$, the values of $\omega_{+}$ and $\omega_{-}$ are approximately given by eqs. (\ref{s11}) and there is no avoided crossing. We have plotted the real parts of $\Delta\omega_{\pm}/ \omega_1\equiv \left( \omega_{\pm} - \omega_1\right) /\omega_1$ as a function of $\omega_2 /\omega_1$ in the upper panel of fig. \ref{fig2}: as predicted by our analytic results, these curves are close to Lorentz dispersion curves.
If $\kappa > \kappa_{cr}$, there is an avoided crossing at resonance and we have plotted the real parts of $ \omega_{\pm}/\omega_1$ as a function of $\omega_2 /\omega_1$ in the lower panel of fig. \ref{fig2}: the shape of these curves is complicated when $\kappa$ is only slightly larger than $\kappa_{cr}$ but, when $\kappa$ increases, these curves rapidly become very similar to those calculated in the absence of damping and presented in fig. \ref{fig1}.
\begin{figure}[h]
\begin{center}
\includegraphics[width= 8 cm]{fig2a.eps}
\includegraphics[width= 8 cm]{fig2b.eps}
\caption{(Color online) Effect of the coupling on the real parts of $\omega_{\pm}$. The calculation is done with $\gamma_1/\omega_1= 0.01$ and $\gamma_2=0$ corresponding to the critical $\kappa$-value $\kappa_{cr} = 5\times 10^{-3}$. \\
Upper panel: Plot of the real parts of $\Delta\omega_{+}/ \omega_1= \left( \omega_{+} - \omega_{1}\right) /\omega_1$ and of $\Delta\omega_{-}/ \omega_1=\left( \omega_{-} - \omega_{2}\right) /\omega_1$ as a function of $\omega_2/\omega_1$ for the following values of $\kappa/ \kappa_{cr}$: $0.5$ dot-dashed (green) curves, $0.7$ full (red) curves, and $0.9$ dashed (blue) curves. The curves are close to Lorentz dispersion curves, with $\Delta\omega_{-}>0$ (respectively $<0$) when $\omega_2 /\omega_1 >1$ (respectively $<1$) and the opposite behavior for $\Delta\omega_{+}$.\\
Lower panel: Plot of the real parts of $\omega_{\pm}$ as a function of $\omega_2/\omega_1$ for the following values of $\kappa/ \kappa_{cr}$: $1.0$ dashed (blue) curves, $1.1$ full (red) curves, $1.2$ dot-dashed (green) curves.}\label{fig2}
\end{center}
\end{figure}
\begin{figure}[h]
\begin{center}
\includegraphics[width= 8 cm]{fig3.eps}
\caption{ Plot of the imaginary parts of $\omega_{\pm}/\omega_1$ as a function of the ratio $\omega_2/\omega_1$. $\gamma_1/\omega_1= 0.01$ and $\gamma_2=0$, corresponding to $\kappa_{cr} = 5\times 10^{-3}$. The different curves correspond to $\kappa/ \kappa_{cr} = 0.4$ dashed (blue) curves; $0.6$ full (red) curves; $0.8$ dot-dashed (green) curves; $1$ full (violet) curves and $1.2$ dashed (blue) curves. }
\label{fig3}
\end{center}
\end{figure}
Figure \ref{fig3} presents the variations of ${\mathcal{I}}m\left( \omega_{\pm}\right)/\omega_1$ as a function of $\omega_2 /\omega_1$ for a series of $\kappa$-values. These plots are close to Lorentz absorption curves when $\kappa$ is below $\kappa_{cr}$ while, when $\kappa \geqslant \kappa_{cr}$, the imaginary parts cross each other when $\omega_2 = \omega_1$.
\subsection{Comparison with quantum mechanics}
The two regimes discussed above appear to be fully similar to those observed in quantum mechanics when a discrete state is coupled to a continuum. As recalled in the introduction, there are two well-known limiting cases of this dynamics: the Weisskopf-Wigner exponential decay of the discrete state \cite{WW1930a,WW1930b} is a good approximation when the discrete state is weakly coupled to the continuum while the Rabi oscillation between two discrete states \cite{RabiPR37} is a good approximation if the continuum width is negligible. The continuous transition between these two regimes, first discussed by C. Cohen-Tannoudji and P. Avan \cite{Cohentannoudji77} in 1977, is described in detail in the book ``Atom-photon interactions'', by C. Cohen-Tannoudji, J. Dupont-Roc and G. Grynberg \cite{Cohentannoudji88}. We cannot reproduce here this discussion but we may summarize its results. The important quantities are the width $w_0$ of the continuum and the Rabi frequency $\Omega_1$:
a) if $w_0 \gg \Omega_1$, the dynamics is well described by the Weisskopf-Wigner exponential decay with the decay rate approximately given by the Fermi golden rule. The density of states appearing in the Fermi golden rule is inversely proportional to the width $w_0$ and, as a consequence, the decay rate of the discrete state is also $\propto 1/w_0$. As $w_0$ gives the decay rate of a continuum wavepacket, the decay rate of the discrete state decreases when the decay rate of a continuum wavepacket increases. In addition, the coupling to the continuum also induces an energy shift, which is usually difficult to measure and this difficulty explains why it is rarely discussed. However, this shift is famous in the case of the Lamb shift, first discovered in the $n=2$ level of of hydrogen \cite{LambPR47} and its discovery has played a very important role in the development of Quantum Electro-Dynamics. This shift exists also in molecular predissociation (see ref. \cite{ChildCJP75} and references therein).
b) in the opposite case, if $w_0 \ll \Omega_1$, the dynamics is well described by a Rabi oscillation between the discrete state and the narrow continuum which behaves as a discrete state, at least for timescales smaller than $h/w_0$ (where $h$ is the Planck constant).
Finally, by varying the relative magnitude of $\Omega_1$ and $w_0$, one can observe a continuous transition between these two limiting cases \cite{Cohentannoudji88}. Here is an application of these ideas: we consider an atom in its ground state coupled by a resonant laser to one of its excited states. Because of spontaneous emission, the excited state has a finite lifetime $\tau$ and it is a continuum of finite width $w_0\sim h/\tau$. The discussion is simpler if we may neglect spontaneous emission toward the ground state. If the Rabi frequency $\Omega_1$ is weak, $\Omega_1 \tau \ll 1$, the effect of the laser is to transfer ground state atoms in the excited state from which they never come back: this means that the laser has given a finite lifetime to the ground state and this lifetime, proportional to $1/(\Omega_1^2 \tau) $, increases when the excited lifetime proportional to $\tau$ decreases. In the opposite case of a strong coupling, $\Omega_1 \tau \ll 1$, the dynamics is described by a Rabi oscillation with a period inversely proportional to $\Omega_1$ and both states have the same lifetime equal to $2\tau $.
\section{Concluding remarks}
\label{se4}
In this paper, we have first recalled the coupling of two mechanical oscillators in the absence of damping. In a second step, we have added the effect of damping and we have shown that the behavior is very different, depending on the relative magnitude of the coupling and damping terms:
\begin{itemize}
\item the case when the coupling dominates the damping is classic. Then, the mixing of the two oscillators can be treated almost as in the absence of damping. The resonance frequencies are repelled by the coupling and the two oscillators are mixed by the coupling. As a consequence, the damping is shared, proportionally to the mixing induced by the coupling.
\item the original case occurs when the difference of damping rates dominates the coupling. Then, the coupling has weaker effects on the frequencies and on the damping rates of the two oscillators. If the frequency of one oscillator is swept close to resonance, the real and imaginary parts of the frequencies of the coupled oscillators present resonant variations with Lorentzian lineshapes, a dispersion lineshape for the real parts of the frequencies and an absorption lineshape for their imaginary parts. Some damping is transferred from the more damped oscillator to the less damped one with a surprising result: the damping transferred decreases when the damping of the more damped oscillator increases.
\end{itemize}
These two regimes are very similar to what occurs in quantum mechanics with the continuous transition between the Rabi oscillation regime and the Weisskopf-Wigner exponential decay when a discrete state is coupled to a continuum.
\section{Appendix A: example of coupled mechanical oscillators}
\label{se5}
\subsection{Two pendulums coupled by a spring}
\label{se51}
\begin{figure}[h!]
\includegraphics[width=8.0 cm]{fig4.eps}
\caption{ Schematic drawing of two simple pendulums coupled by a spring represented by the large horizontal arrow. }
\label{fig4}
\end{figure}
We first consider the case of two simple pendulums of masses $m_1$ and $m_2$ and of lengths $l_1$ and $l_2$ (see fig. \ref{fig4}). We note $x_1$ and $x_2$ their displacements from equilibrium and we assume a coupling force proportional to the difference $(x_1-x_2)$. The equations of motion are
\begin{eqnarray}\label{r1}
m_1 \frac{d^{2}x_1}{dt^{2}} &=& -k_1 x_1 -k_{12}(x_1-x_2),\nonumber \\
m_2 \frac{d^{2}x_2}{dt^{2}} &=& -k_2 x_2 -k_{21}(x_2-x_1),
\end{eqnarray}
\noindent where $k_1= m_1g/l_1$ and $k_2=m_2g/l_2$, $g$ being the acceleration of gravity.
Because of the equality of action and reaction, $k_{12}= k_{21}$. Noting $\omega_1^{2} = (k_1+ k_{12})/m_1$, $\omega_2^{2} = (k_2+ k_{12})/m_2$, $\omega_{12}^{2} = k_{12}/m_1$ and $\omega_{21}^{2} = k_{12}/m_2$, we get
\begin{eqnarray}\label{r3}
\frac{d^{2}x_1}{dt^{2}} &=& -\omega_1^{2} x_1 +\omega_{12}^{2} x_2, \nonumber \\
\frac{d^{2}x_2}{dt^{2}} &=& +\omega_{21}^{2}x_1-\omega_2^{2} x_2.
\end{eqnarray}
\subsection{Double pendulum}
\begin{figure}[h]
\begin{center}
\includegraphics[width=8.0 cm]{fig5.eps}
\caption{ Schematic drawing of two different double pendulums with the top pendulum being either a simple pendulum (case a) or an elastic pendulum (case b).}
\label{fig5}
\end{center}
\end{figure}
Two possible arrangements of a double pendulum are shown in fig. \ref{fig5}. The equations of motion are
\begin{eqnarray}\label{r4}
m_1 \frac{d^{2}x_1}{dt^{2}} &=& -k_1 x_1 +k_{2}x_2,\nonumber \\
m_2 \frac{d^{2}\left( x_1+x_2\right) }{dt^{2}} &=& -k_2 x_2.
\end{eqnarray}
\noindent In case a of fig. \ref{fig5}, $k_1= m_1g/l_1$ while, in case b, $k_1$ is the elastic constant of the spring and, in both cases, $k_2=m_2g/l_2$. Thanks to the first equation, we eliminate $d^{2}x_1/dt^{2}$ from the second one and we get
\begin{eqnarray}\label{r5}
\frac{d^{2}x_1}{dt^{2}} &=& -\frac{k_1}{m_1} x_1 +\frac{k_2}{m_1}x_2,\nonumber \\
\frac{d^{2}x_2}{dt^{2}} &=& \frac{k_1}{m_1}x_1- \frac{k_2\left( m_1+m_2\right) }{m_1m_2} x_2.
\end{eqnarray}
\noindent Noting $\omega_1^{2} = k_1/m_1$, $\omega_2^{2} = k_2(m_1+m_2)/(m_1m_2)$, $\omega_{12}^{2} =k_{2}/m_1$ and $\omega_{21}^{2} = k_{1}/m_1=\omega_1^{2}$, we also get eqs. (\ref{r3}). In this case, the coupling parameter $\kappa$ defined by eq. (\ref{s3b}) is equal to $\kappa =\sqrt{k_2/k_1}$ and, if we assume resonance $\omega_2=\omega_1$, $\kappa =\sqrt{m_2/(m_1+m_2)}$.
\section{Appendix B: damping of mechanical oscillators}
\label{se6}
The damping of a mechanical oscillator is treated in most textbooks. The equation of motion is linear in two main cases: i) a damping force proportional to the velocity due, for instance, to the friction on a fluid in the Stokes regime \cite{Stokes1850}; ii) an anelastic behavior of the spring \cite{ZenerPR37,ZenerPR38}, i.e. a restoring force which is not in phase with the displacement. Anelastic effect is described by an extension of Hooke's law \cite{SaulsonPRD90}, with a force proportional to $\left[ 1+i\phi(\omega) \right] x$ in complex notations. Here $x$ is the displacement with respect to equilibrium and $\phi(\omega)$ the phase shift between the force and the displacement for an oscillation at a frequency $\omega$. The equations of motion are
\begin{eqnarray}\label{q1}
\frac{d^{2}x}{dt^{2}} &=& -\omega_0^{2} x -\gamma \frac{dx}{dt} \mbox{ (viscous)},\nonumber \\
\frac{d^{2}x}{dt^{2}} &=& -\omega_0^{2}\left[ 1+i\phi(\omega)\right] x \mbox{ (anelastic)}.
\end{eqnarray}
\noindent $x(t) \propto \exp\left( i \omega t\right) $ is solution if $\omega$ verifies the following equations
\begin{eqnarray}\label{q3}
\omega^{2} -i \gamma\omega -\omega_0^{2} &=&0 \mbox{ (viscous)}, \\
\omega^{2} -\omega_0^{2} \left[ 1+i\phi(\omega)\right] &=&0 \mbox{ (anelastic)}.
\end{eqnarray}
\noindent $\omega$ is then given by
\begin{eqnarray}\label{q4}
\omega &=& \pm\sqrt{\omega_0^{2}- \frac{\gamma^{2}}{4}}+i \frac{\gamma}{2}
\approx\pm\omega_0 + i \frac{\gamma}{2} \mbox{ (viscous)}, \label{q4a} \\
\omega & = & \pm\sqrt{\omega_0^{2}\left[ 1+i\phi(\omega_0)\right] }
\approx \pm\omega_0\left[ 1+i\frac{\phi(\omega_0)}{2}\right] \label{q4b} \mbox{ (anelastic)}, \nonumber \\
\end{eqnarray}
\noindent with the approximate forms valid in the weak-damping limit, $\gamma \ll \omega_0$ or $\left|\phi(\omega_0)\right| \ll 1$. Moreover, to insure damping and not amplification, $\phi(\omega)$ must be an odd function of $\omega$ and we have chosen $\omega$ and $\phi(\omega)$ both positive. Both damping mechanisms lead to an exponential decrease of the oscillation amplitude with $ x(t) \approx x(0) \exp\left(-t/\tau\right) \cos\left(\omega_0 t\right)$. The decay time constant is equal to $\tau =2/\gamma$ or $\tau = 2/\left( \omega_0\phi(\omega_0)\right)$. The resonance quality factor defined by $Q \equiv \omega\tau /2$ is equal to $Q = {\mathcal{R}}e(\omega)/(2{\mathcal{I}}m(\omega)) = \omega_0/\gamma$ or $Q = 1/\phi(\omega_0)$. If we add on the right hand side of eqs. (\ref{q1}) a driving term $b\exp\left( i \omega t\right)$, the steady state regime is given by
\begin{eqnarray}\label{q5}
x(t) &=& \frac{b\exp\left( i \omega t\right)}{\omega^{2} -i \gamma\omega -\omega_0^{2}} \mbox{ (viscous)}, \\
x(t) &=& \frac{b\exp\left( i \omega t\right)}{\omega^{2} -\omega_0^{2} \left( 1+i\phi(\omega_0)\right)} \mbox{ (anelastic)}.
\end{eqnarray}
\noindent The two resonances have very similar lineshapes, quasi-Lorentzian in the viscous case and exactly Lorentzian in the anelastic case, with a maximum of the amplitude for $\omega \approx \omega_0$ and a resonance full width equal to $\gamma$ or $\omega_0\phi(\omega_0)$. The difference, which appear in the far wings, are of minor importance as the interest is focused on the resonance core. As a conclusion, both mechanisms are almost equivalent if $\gamma=\omega_0\phi(\omega_0)$. We use anelastic damping in the calculations of section \ref{se3}, because it simplifies the algebra, and we replace $\omega_0\phi(\omega_0)$ by $\gamma$ so that the equations look closer to the viscous damping discussed in most textbooks.
|
1,314,259,995,044 | arxiv | \section{Introduction}
A Handheld robot shares properties of a handheld tool while being enhanced with autonomous motion as well as the ability to process task-relevant information and user signals.
Earlier work in this field explored the communication between user and robot to improve cooperation \cite{GreggSmith:2015bh} \cite{GreggSmith:2016hn}. Such one-way communication of task planning, however, is limited in that the robot has to lead the user. But as users exert their will and decisions, task conflicts emerge which in turn inflict user frustration and decrease cooperative task performance.
As a starting point of addressing this problem, extended user perception can be introduced to allow the robot to estimate the user's point of attention via eye gaze in 3D space during task execution\cite{Stolzenwald:2018un}. An estimate of users' visual attention informs the robot about areas of users' interest. While introducing attention was preferred, particularly for temporal demanding tasks, it is still limiting. What is necessary is a model that goes beyond where the user is attending to but rather what is the user intending to do. A model of intention would allow the robot to infer the user's goal in the proximate future and go beyond reacting to immediate decisions only.
Intention inference has caught researcher's attention in recent years and promising solutions have been achieved through observing user's eye gaze \cite{Huang:2016dj}, body motion \cite{Ravichandar:2015ii} or task objects \cite{Liu:2015km}. These contributions target safe interactions between humans and sedentary robots with shared workspaces. Thus, the question remains open whether there is a model which suits the setup of a handheld robot which is characterised by close shared physical dependency and a \textit{working together} rather than a \textit{turn taking} cooperative strategy.
Our work is guided by the following research questions
\begin{enumerate}[label=\textbf{Q\arabic*}]
\item How can user intention be modelled in the context of a handheld robot task?
\label{Q1}
\item To what extent does intention prediction affect the cooperation with a handheld robot?
\label{Q2}
\end{enumerate}
\begin{figure}[t]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Intention_Prediction_Title}
\caption{This picture shows a participant within our user intention prediction study. The participant uses the robot to solve an assembly task and is about to decide where to place the currently held block. Using the eye tracker the prediction system extracts the user's gaze pattern which is used for action prediction.
}
\label{fig:blockcopygameparticipant}
\vspace{-0.5em}
\end{figure}
For our study, we use the open robotic platform\footnote{3D CAD models available from handheldrobotics.org}, introduced in \cite{GreggSmith:2016cz} in combination with an eye tracking system as reported in \cite{Stolzenwald:2018un}. Within a simulated assembly task, eye gaze information is used to predict subsequent user actions. The two principal parts of this study consist of modelling user intention in the first place followed by testing it through an assistive pick and place task. Our contribution is an intention prediction model with real-time capabilities that allows for human-robot collaboration through online plan adaptation in assistive tasks. Figure \ref{fig:intentionpredictionflowchart} shows an overview of our proposed system.
\begin{figure*}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.95\linewidth]{Pictures/Intention_Prediction_Flow_Chart}
\caption{Overview of the intention prediction model and its use for the robot's motion control.}
\label{fig:intentionpredictionflowchart}
\end{figure*}
\section{Background and Related Work}
In this section, we deliver a summary of earlier work on handheld robots and its control based on user perception. Furthermore, we review existing methods for intention inference with a focus on human gaze behaviour.
\subsection{Handheld Robots}
Early handheld robot work \cite{GreggSmith:2015bh} used a trunk-shaped robot with 4-DoF to explore issues of autonomy and task performance. This was later upgraded to a 6-DoF (joint space) mechanism \cite{GreggSmith:2016cz} and used gestures, such as pointing, to study user guidance. These earlier works demonstrate how users benefit from the robot's quick and accurate movement while the robot profits from the human's tactical motion. Most importantly, increased cooperative performance was measured with an increased level of the robot's autonomy. It was furthermore found that cooperative performance significantly increases when the robot communicates its plans e.g. via a robot-mounted display \cite{GreggSmith:2016hn}.
Within this series of work, another problem was identified: the robot does not sense the user's intention and thus potential conflicts with the robot's plan remain unsolved. For example, when the user would point the robot towards a valid subsequent goal, the robot might have already chosen a different one and keep pointing towards it rather than adapting its task plan. This led to irritation and frustration in users on whom the robot's plan was imposed on.
Efforts towards involving user perception in the robot's task planning were made in our recent work on estimating user attention\cite{Stolzenwald:2018un}. The method was inspired by work from Land et al. on how human's eye gaze is closely related to manual actions \cite{Land:2016kw}. The attention model measures the current visual attention to bias the robot's decisions. In a simulated \textit{space invader} styled task, different levels of autonomy were tested over varying configurations of speed demands. It was found that both the fully autonomous mode (robot makes every decision) and the attention driven mode (robot decides based on gaze information) outperform manual task execution. Notably, for high-speed levels, the increased performance was most evident for the attention-driven mode which was also rated more helpful and perceived rather cooperative than the fully autonomous mode.
As opposed to an intention model, the attention model would react to the current state of eye gaze information only, rather than using its history to make predictions about the user's future goals. We suggest that this would be required for cooperative task solving for complex tasks like assembly where there is an increased depth of subtasks.
\subsection{Intention Prediction}
Intention estimation in robotics is in part driven by the demand for safe human-robot interaction and efficient cooperation.
Ravichandar et al. investigated intention inference based on human body motion. Using Microsoft Kinect motion tracking as an input for a neural network, reaching targets where successfully predicted within an anticipation time of approximately \SI{0.5}{s} prior to the hand touching the object\cite{Ravichandar:2015ii}. Similarly, Saxena et al. introduced a measure of affordance to make predictions about human actions and reached 84.1\%/74.4\% accuracy \SI{1}{s}/\SI{3}{s} in advance, respectively\cite{Koppula:2016ja}. Later, Ravichandar et al. added human eye gaze tracking to their system and used the additional data for pre-filtering to merge it with the existing motion-based model \cite{Ravichandar:2016th}. The anticipation time was increase to \SI{0.78}{s}.
Huang et al. used gaze information from a head-mounted eye tracker to predict customers' choices of ingredients for sandwich making. Using a support vector machine (SVM), an accuracy of approximately 76\% was achieved with an average prediction time of \SI{1.8}{s} prior to the verbal request \cite{Huang:2015iw}.
In subsequent work, Huang \& Mutlu used the model as a basis for a robot's anticipatory behaviour which led to more efficient collaboration compared to following verbal commands only \cite{Huang:2016dj}.
We note that the above work targets intention inference purposed for \textit{external} robots which are characterised by a shared workspace with a human but can move independently. It is unclear whether these methods are suitable for close cooperation as it can be found in the handheld robot setup.
\subsection{Human Gazing Behaviour}
The intention model presented in this paper is mainly driven by eye gaze data. Therefore, we review work on human gaze behaviour to inform the underlying assumptions of our model.
Land et al. found that fixations towards an object often precede a subsequent manual interaction by around \SI{0.6}{s} \cite{Land:2016kw}. Subsequent work revealed that the latency between eye and hand varies between different tasks \cite{Land:2001hl}. Similarly, Johansson et al. \cite{Johansson:2001ck} found that objects are most salient for human's when they are relevant for tasks planning and preceding saccades were linked to short-term memory processes in \cite{Mennie:2006fo}.
The purpose of preceding fixations in manual tasks was furthermore explored through virtual \cite{Ballard:1995iy} and real \cite{Pelz:2001fb} block design tasks. The results show that humans gather information through vision \textit{just in time} rather than memorising e.g. all object locations.
\begin{figure}[b]
\vspace{-0.99em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Front_Profile_Robot_labeled2_lowRes}
\caption{The handheld robot used for our study. It features a set of input buttons and a trigger at the handle, a 6-DoF tip and user perception through gaze tracking as reported in \cite{Stolzenwald:2018un}.}
\label{fig:frontprofilerobotlabeled}
\vspace{0.8em}
\end{figure}
\section{Prediction of User Intention}
In this section, we describe how intention prediction is modelled for the context of a handheld robot on the basis of an assembly task.
\subsection{Data Collection}
We chose a simulated version of a block copying task which has been used in the context of work in hand-eye coordination \cite{Ballard:1995iy,Pelz:2001fb}. Participants of the data collection trials were asked to use the handheld robot (cf. figure \ref{fig:frontprofilerobotlabeled}) to pick blocks from a stock area and place them in the workspace area at one of the associated spaces indicated by a shaded model pattern. The task was simulated on a \SI{40}{inch} LCD TV display and the robot remained motionless during the data collection task to avoid distraction. We drew inspiration from a block design IQ test \cite{Miller:2009ib} and decided to use black and white patterns instead of colours. That way, a match with the model would, in addition, depend on the block's orientation which adds further complexity.
An overview of the task can be seen in figure \ref{fig:blockcopyinitexampleareas}, figure \ref{fig:blockcopyinitexamplemoves} shows examples of possible picking and placing moves.
\begin{figure}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/block_copy_init_example_areas}
\caption{Layout of the block copy task on a TV display. The area is divided into stock (red) and workspace (blue). The shaded pattern pieces in the workspace area have to be completed by placing the associated pieces from the stock using the real robot.}
\label{fig:blockcopyinitexampleareas}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/block_copy_init_example_moves}
\caption{Examples of possible moves for block 1 and 4. A stock piece has to be moved to an associated piece in the pattern and match the model's orientation to complete it.}
\label{fig:blockcopyinitexamplemoves}
\vspace{-0.5em}
\end{figure}
In order to pick or place pieces, users have to point the robot's tip towards and close to the desired location and pull/release a trigger in the handle. The position of the robot and its tip is measured via a motion tracking system\footnote{Opti Track: https://optitrack.com}\hspace{-0.4em}. The handle houses another button which can be used to rotate the grabbed piece.
The opening or closing process of the virtual gripper takes \SI{1.3}{s} which is animated in the screen. If the participant tries to place a mismatch, the piece goes back to the stock and has to be picked up again. Participants are asked to solve the task swiftly and it is completed when all model pieces are copied. Throughout the task execution, we kept track of the user's eye gaze using a robot-mounted remote eye tracker in combination with a 3D gaze model from \cite{Stolzenwald:2018un}. Figure \ref{fig:blockcopygameparticipant} shows an example of a participant solving the puzzle.
For the data collection, 16 participants (7 females, $m_{age}$ = 25, \textit{SD} = 4) were recruited.
Each completed one practice trial to get familiar with the procedure, followed by another three trials for data collection, where stock pieces and model pieces were randomised prior to execution. The pattern consists of 24 parts with an even count of the 4 types.
The task starts with 5 pre-completed pieces to increase the diversity of solving sequences leaving 19 pieces to be completed by the participant. That way, a total amount of 912 episodes of picking and dropping were recorded.
\subsection{User Intention Model}
In the context of our handheld robot task, we define intention as the user's choice of which object to interact with next i.e. which stock piece to pick and on which pattern field to place it.
Based on our literature review, our modelling is guided by the following assumptions.
\begin{enumerate}[label=\textbf{A\arabic*}]
\item An intended object attracts the users' visual attention prior to interaction. \label{A1}
\item During task planning, the users' visual attention is shared between the intended object and other (e.g. subsequent) task-relevant objects.\label{A2}
\end{enumerate}
As a first step towards feature construction, the gaze information for an individual object was used to extract a visual attention profile (VAP) which is defined as the continuous probability of an object being gazed. Let $\vect{x_{gaze}}$ be the 2D point of intersection between the gaze ray and the TV screen surface and $\vect{x_{i}}$ the 2D position of the $i$-th object in the screen. Then the gaze position can be compared to each object using the Euclidean distance:
\vspace{-0.5em}
\begin{equation}
d_i(t) = ||\vect{x_{gaze}}-\vect{x_{i}}||
\end{equation}
As a decrease of $d$ implies an increased visual intention, the distance profile can be converted to a visual attention profile (VAP) using the following equation:
\vspace{-0.3em}
\begin{equation}
\vspace{-0.5em}
P_{gazed,i}(t) = \exp(\frac{-d_i(t)^2}{2\sigma^2})
\end{equation}
Where $\sigma$ defines the gaze distance resulting in a significant drop of $P_{gazed}$ and it was set to \SI{60}{\mm} based on the pieces' size and tracking tolerance. The intention model uses the VAP of the last \SI{4}{\s} before the point in time of the prediction. Due to the data update frequency of \SI{75}{Hz} the profile is discretised into a vector of 300 entries (cf. example in figure \ref{fig:visualattentionprofile}).
The prediction for picking and placing actions was modelled separately as they require different feature sets. As mentioned above, earlier studies about gaze behaviour during block copying \cite{Ballard:1995iy} and assembly \cite{Mennie:2006fo} suggest that the eye gathers information about both what to pick and where to place it prior to picking actions. For this reason, we combined pattern and stock information for picking predictions for each available candidate, resulting in the features selection:
\begin{enumerate}[leftmargin=.42in]
\item[$F_1$] The VAP of the object itself.
\item[$F_2$] The VAP of the matching piece in the pattern. If there are several, the one with the maximum visual attention is picked.
\end{enumerate}
\begin{figure}[b]
\vspace{-1.011em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/visual_attention_profile}
\vspace{-1.5em}
\caption{Illustration of changing visual attention over time within the anticipation window of the prediction model for an individual object.
}
\label{fig:visualattentionprofile}
\vspace{0.7em}
\end{figure}
This goes in line with our assumptions \ref{A1}, \ref{A2}. Both features are vectors of real numbers between 0 and 1 with a length of $n = 300$. For the prediction of the dropping location, \ref{A2} is not applicable as the episode finishes with the placing of the part hence why only $F_1$ (a vector with length $n = 300$) is used for prediction. Note that this feature contains information about fixation durations as well as saccade counts.
An SVM \cite{Hearst:1998ew} was chosen as a prediction model as this type of supervised machine learning model was used for similar classification problems in the past, e.g. \cite{Huang:2015iw}. We divided the sets of VAPs into two categories, one where the associated object was the intended object (labelled as \texttt{chosen = 1}) and another one for the objects that were not chosen for interaction (labelled as \texttt{chosen = 0}). Training and validation of the models were done through 5-fold cross validation \cite{Kohavi:1995wf}.
The accuracy of predicting the \texttt{chosen} label for individual objects is 89.6\% for picking actions and 98.3\% for placing. However, sometimes the combined decision is conflicting e.g when several stock pieces are predicted to be the intended ones. This is resolved by selecting the one with the highest probability $P($\texttt{chosen }$ = 1)$ in a one-vs-all setup \cite{Rifkin:2004vf}. This configuration was tested for scenarios with the biggest choice e.g. when all 4 stock parts (random chance = 25\%) would be a reasonable choice to pick or when the piece to be placed matches 4 to 6 different pattern pieces (random chance = 17-25\%). This results in a correct prediction rate of 87.9\% for picking and 93.25\% for placing actions when the VAPs of the time up to just before the action time is used.
\section{Results of Intention Modelling}
Having trained and validated the intention prediction model for the case where VAPs range from $-4$ to 0 seconds prior to the interaction with the associated object, we are now interested in knowing to what extent the intention model predicts accurately at some time $t_{prior}$ prior to interaction. To answer this question, we extend our model analysis by calculating a $t_{prior}$-dependent prediction accuracy. Within a 5-fold cross validation setup, the \SI{4}{s}-anticipation window is iteratively moved away from the time of interaction and the associated VAPs are used to make a prediction about the subsequent user action using the trained SVM models. The validation is based on the aforementioned low-chance subsets, so that the chance of correct prediction through randomly selecting a piece would be $\leq25\%$. The shift of the anticipation window over the data set is done with a step width of 1 frame (\SI{13}{ms}). This is done for both the case of predicting which piece is picked up next as well as inferring intention concerning where it is going to be placed. For the time offsets $t_{prior}$ = 0, 0.5 and 1 seconds, the prediction of picking actions yields an accuracy $a_{pick}$ of 87.94\%, 72.36\% and 58.07\%. The performance of the placing intention model maintains a high accuracy over a time span of \SI{3}{s} with an accuracy $a_{place}$ of 93.25\%, 80.06\% and 63.99\% for the times $t_{prior}$ = 0, 1.5 and 3 seconds. In order to interpret these differences in performance, we investigated whether there is a difference between the mean duration of picking and placing actions. We applied a two-sample t-test and found that the picking time (mean = \SI{3.61}{s}, \textit{SD} = \SI{1.36}{s}) is significantly smaller than the placing time (mean = \SI{4.65}{s}, \textit{SD} = \SI{1.34}{s}), with $ p < 0.001, t = -16.12$.
As the prediction model of the picking actions implements the novel aspect of adding the VAPs of related objects, its comparison to existing methods is of particular interest. Figure \ref{fig:pickuppredictionovertimecompare} shows a comparison of our proposed model (where both features $F_{1}$ and $F_{2}$ are used) to the case where $F_{1}$ is the single basis for a prediction such as the model recently explored by Huang et al. \cite{Huang:2015iw}. It can be seen that both models well exceed the chance of picking randomly. Notably, the proposed model outperforms the existing one shortly after the subject ends the preceding move and presumably starts planning the next one. To further investigate the effect of the chosen model on the prediction performance, a two-factorial ANOVA was applied where the prediction time $t$ relative to the action and the model were set as the independent factors and the performance as dependent variable which reveals that the correct prediction rate of the proposed model is significantly higher ($p < 0.001$) than the one of the existing model.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/PickUp_Prediction_Over_Time_compare}
\caption{This diagram shows the performance of predicting pick up actions averaged over 912 samples for two models: our proposed model (red) and an SVM (black) which is based on the feature $F_{1}$ only, as proposed by Huang et al. \cite{Huang:2015iw}. It can be seen how both models perform better than chance (dashed black) and predict the actions with increasing accuracy as the prediction time $t$ approaches the time of the action's execution $t = 0$. $t_{mean}$ (with temporal SD $t_{SD}$) is the mean time of completing the last block and hence the earliest meaningful time of predicting picking as a subsequent action.}
\label{fig:pickuppredictionovertimecompare}
\end{figure}
\subsection{Qualitative Analysis}
For an in-depth understanding of how the intention models respond to different gaze patterns, we investigate the prediction profile i.e. the change of the prediction over time, for a set of typical scenarios.\\
\subsubsection{One Dominant Type
A common observation was that the target object perceived most of the user's visual attention prior to interaction which goes in line with our assumption \ref{A1}. An example of these \textit{one type dominant} samples can be seen in figure \ref{fig:placingcorrecttruepositive1dominant3}. A subset of this category is the case where the user's eye gaze alters between the piece to pick and the matching place in the pattern i.e. where to put it (cf. figure \ref{fig:pickupcorrecttuenegativethereandbackwideformat}) which supports our assumption \ref{A2}.
For the majority of these one type dominant samples both the picking and placing prediction models predict correctly. \\
\subsubsection{Trending Choice
While the anticipation time of the pick up prediction model lies within a second and is thus rather reactive, the placing intention model is characterised by a slow increase of likelihood during the task i.e. it shows a low-pass characteristic. Figure \ref{fig:trending choice} demonstrates that the model is robust against small attention gaps and intermediate glances at competitors, however, the model requires an increased time window to build up confidence.\\
%
\subsubsection{Incorrect Predictions
There is a number of reasons for an incorrect prediction. Most commonly, a close by neighbour received more visual attention and was falsely classified as the intended object. In other cases, it was impossible to predict the intended object using our model due to missing saccades towards it or faulty gaze tracking.
\begin{figure*}[t!]
\vspace{0.5em}
\centering
\begin{subfigure}[t]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_1Dominant_3}
\caption{\scriptsize One piece receives most of the user's visual attention prior to placing}
\label{fig:placingcorrecttruepositive1dominant3}
\vspace{1em}
\end{subfigure}
~
\begin{subfigure}[t]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/PickUP_Correct_Tue_Negative_ThereAndBack_wideFormat}
\caption{\scriptsize User gaze alters between stock piece and matching workspace location}
\label{fig:pickupcorrecttuenegativethereandbackwideformat}
\end{subfigure}%
\caption{
These diagrams show examples of correct predictions for \textit{one type dominant} samples. (a) shows, how long fixation times (blue) results into a high probability value (red) e.g. for a location to place a piece. Similarly, (b) shows, how the prediction model links the VIPs of related objects. The subject's gaze alters between two related objects e.g. a piece to pick up and a matching location to place it (cf. orange and blue VAPs) leading to a high probability estimation (red) for this piece being the user-intended one.
}
\label{fig:one dominant}
\end{figure*}
\begin{figure*}[h]
\centering
\begin{subfigure}[h]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_TrendingChoice_1_labelled}
\label{fig:placingcorrecttruepositivetrendingchoice1}
\end{subfigure}%
~
\begin{subfigure}[h]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_TrendingChoice_2_labelled}
\label{fig:placingcorrecttruepositivetrendingchoice2}
\end{subfigure}
\vspace{-1em}
\caption{These two examples illustrate how the visual attention (blue) of an object builds up during the user's decision process in which case the intention prediction (red) remains undecided ($P_{chosen} < 0.5$) for a longer time compared to the case where no competition receives fixations (cf. fig \ref{fig:one dominant}). }
\label{fig:trending choice}
\vspace{0.4em}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\vspace{-2.5em}
\end{figure*}
\section{Discussion of Intention Modelling}
In addressing research question \ref{Q1}, we proposed a user intention model based on gaze cues for the prediction of actions which was assessed in a pick and place task. As a novel aspect introduced through this study, the predictions are not only based on saccades and fixation durations of an individual object but also on those of related objects. In other words, assessing the attention on objects in the workspace helps to predict which piece outside the current workspace is needed next. When the subject turns his/her attention towards the piece, the model interprets this as a confirmation rather than the start of a selection process. This helps to cut the time required for the model to gather relevant gaze information and makes predictions more reliable than traditional models.
We showed that, within this task, the prediction of different actions has different anticipation times i.e. dropping targets are identified quicker than picking targets. This can partially be explained by the fact that picking episodes are shorter than placing episodes. But more importantly, we observed that users planned the entire pick-place cycle rather than planning picking and placing actions separately. This becomes evident through the qualitative analysis which shows altering fixations between the piece to pick and where to place it. That way, the placing prediction model is able to already gather information at the time of picking.
The proposed model allows predictions \SI{500}{ms} prior to picking actions (71.6\% accuracy) and \SI{1500}{ms} prior to dropping actions (80.06\% accuracy). These numbers are encouraging for testing the prediction model in a real-time application. Therefore, we proceed with an experimental study where the intention model is used for cooperative behaviour.
\section{Intention Prediction Model Validation}
\vspace{-1mm}
In the second part of our study, we validate the proposed intention model for the case where it is used to control the robot's behaviour and motion. While the aforementioned experiments and analysis demonstrate that the intention model is capable of predicting users' short term goals while having full control over the robot's tip, it is unclear whether this is true for the case where the robot reacts to these predictions. For example, users might adapt their intention to the robot's plans just by seeing it moving towards a target which might differ from their initially intended move. That way, labelling the robot's predictions as being correct or incorrect in the same way as we did in the first study becomes invalid due to the lack of ground truth. For this reason, we propose to assess the intention model in an indirect way instead by observing users' reactions to the predictions with a focus on frustration. We hypothesise that a mismatch between the robot's and the user's plans would inflict user frustration and that frustration is reduced when the robot follows the true user intention compared to avoiding it.
\subsection{Intention Affected Robot Behaviour}
For the experimental validation of the intention model, we used the aforementioned block copy task and introduced an assistive behaviour to the robot which is controlled based on the predictions of a user's intended subsequent move i.e. which piece the user wants to pick up next or at which location the user wants to drop it. We created 3 different behaviour modes: \textit{Follow intention}, \textit{Rebel} and \textit{Random}. For each, the robot retreats to a crouched position while there is a low probability for each available target. When the probability of the target with the highest probability reaches a threshold, the robot reacts as follows in the different modes:
\begin{itemize}
\item \textbf{Follow Intention: }\\ The robot moves towards the target with the highest predicted intention.
\item \textbf{Rebel: } \\The robot avoids the target with the highest prediction and moves towards the target with the lowest predicted intention instead.
\item \textbf{Random: }\\The robot moves towards a random target.
\end{itemize}
We set a maximum decision time of \SI{1.3}{s} after which the robot executes the above-mentioned behaviour for the rare case where no probability exceeds the threshold. This prevents the robot from getting stuck in the crouched position e.g. when there is a time gap in the gaze tracking stream.
\subsection{Experiment Execution}
We recruited 20 new participants (6 females, $m_{age}$ = 26, \textit{SD} = 4) for the validation study of which 2 were later removed from the set for data analysis due to malfunctioning gaze tracking. Each was asked to first complete the task without the robot moving for familiarisation with the rules and the robot handling. This practice session was followed by 3 trials where, for each, the robot's behaviour was set to a different behaviour mode. The block pattern to complete as well as the order of the behaviour modes were randomised. Furthermore, 5 (out of 24) randomly chosen blocks were pre-completed to stimulate some diversity in solving strategies e.g. to prevent repeated line-by-line completion.
The participants were told to solve the trial tasks swiftly and that their performance was recorded. They did not receive any information about the behaviour modes but were told that the robot will move and try to help them with the task. Each trial was followed by the completion of a NASA Task Load Index (TLX) form \cite{Hart:1988ho} and \SI{3}{\min} resting time.
\section{Results and Discussion: Model Validation}
To determine the effect of the robot's behaviour mode on the subjects' frustration level, we performed an analysis of variance (ANOVA) with the mode as the independent variable and the frustration component of the TLX as a dependent variable. As the analysis yielded a significant effect ($p = .023$), it was further explored using post-hoc pairwise t-test with applied Bonferroni correction. The frustration mean for the \textit{Rebel} group was identified as being significantly higher than in the \textit{Follow Intention} group ($p = 0.19$). No significant mean differences were found when comparing the \textit{Random} group to the others. The results can be seen in table \ref{tab:frustration} and figure \ref{fig:intentionvalidationfrustration}.
\begin{table}[b] \centering
\vspace{-1.3em}
\begin{tabular}{lll}
& \textbf{Follow Intention} & \textbf{Random} \\ \cline{2-3}
\multicolumn{1}{l|}{\textbf{Rebel}} & \multicolumn{1}{l|}{$p = .019$ *} & \multicolumn{1}{l|}{$p = .495$} \\ \cline{2-3}
\multicolumn{1}{l|}{\textbf{Random}} & \multicolumn{1}{l|}{$p = .469$} & \multicolumn{1}{l|}{-} \\ \cline{2-3}
\end{tabular}
\caption{Bonferroni corrected \textit{p}-values of pairwise t-test results for the differences in mode depended frustration means. The starred value is significant ($p <.05$).}
\label{tab:frustration}
\vspace{1em}
\end{table}
\begin{figure}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.85\linewidth]{Pictures/Intention_Validation_Frustration_starred}
\vspace{-0.6em}
\caption{Perceived frustration from the TLX results for each of the tested behaviour modes. The mean values of starred groups yield a significant difference (cf. table \ref{tab:frustration}).}
\label{fig:intentionvalidationfrustration}
\vspace{-1em}
\end{figure}
We extended our analysis to both, the combined TLX results which serve as an indicator for perceived task load and the measured performance which is defined as the number of completed blocks per minute. However, an applied ANOVA did not yield an effect of the robot's behaviour mode, neither on the combined TLX nor on the performance.
\begin{figure*}[t]
\vspace{0.5em}
\centering
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Follow_Prediction_Labelled_LowRes}
\caption{Prediction of the red piece during placing of the purple piece.}
\label{fig:demofollowprediction-}
\vspace{1em}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Follow_Reaching_Labelled_LowRes}
\caption{The robot's motion goes in line with the user's intention as it adapts its plans.}
\label{fig:demofollowreaching}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Rebel_Prediction_Labelled_LowRes}
\caption{Prediction of the pink piece while placing the purple one.}
\label{fig:demorebelprediction}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Rebel_Reaching_Labelled_LowRes}
\caption{
Avoiding user intent leads to a mismatch with the user's tactical motion.
}
\label{fig:demorebelreaching}
\end{subfigure}
\caption{These figures illustrate the systems' underlying intention estimation and how the different modes affect cooperation. The users' eye gaze model is represented as a yellow line while the estimated probability for a piece to be chosen by the user is indicated by its size. It can be seen how following the intention prediction assists the user with his/her choice (a,b) while avoiding the intended object (c,d) forces the user to adapt his/her plan to the robot's motion.}
\label{fig:demo}
\vspace{-0.5em}
\end{figure*}
As part of a qualitative review of the robot's behaviour we found that in the \textit{Rebel} mode, participants perform an increased number of corrective moves compared to the \textit{Follow Intention} scenario. Figure \ref{fig:demo} shows how the robot's aim matches the user's intention in the \textit{Follow Intention} mode whereas in the \textit{Rabel} example, the user rushes towards the intended aim but needs to correct his move as the robot aims for a different piece.
Some participants commented on the behaviour modes. The \textit{Follow Intention } mode was often preferred (e.g. \enquote{I liked being in charge and the robot was helpful} and \enquote{The robot followed my decisions}) whereas the \textit{Random} mode lead to irritation in some users (e.g. \enquote{First I thought it would go where I wanted but then it started moving in an unpredictable way}). For the \textit{Rebel} mode, we observed divergent reactions. While some subjects struggled because of the mismatch between the robot's motion and their plans, others started following the robot's lead. This was also reflected in the comments e.g. \enquote{Now the robot does its own thing, I don't like it} versus \enquote{It was easier because I did not have to think much}.
\\
\vspace{-0.5em}
The observed difference in frustration ratings between the mode where the robot supports the user's predicted intention versus avoiding it is evidence for most of the intention predictions matching the true intention. With regards to \ref{Q2}, our interpretation of the results is that during the \textit{Follow Intention} trials, the robot did follow the users' preferred sequence rather than the users adapting it to the robotic motion which validates the proposed intention model and its application in assisted reaching.
The fact that the mean frustration for the \textit{Random} mode lies between the other two modes is expected given their effect on frustration outlined above. However, the effect is too subtle to be compared to random motion and the sample size too small for a reliable distinction.
Our analysis furthermore shows that user frustration is more sensitive to the robot's intention prediction than perceived task load or performance. We suggest that robotic systems should follow user intention when there are subtasks with similar priorities for enhanced cooperation.
\vspace{-0.8em}
\section{Conclusion}
We investigated the use of gaze information to infer user intention within the context of a handheld robot. A pick and place task was used to collect gaze data as a basis for an SVM-based prediction model. Results show that, depending on the anticipation time, picking actions can be predicted with up to 87.94\% accuracy and dropping actions with an accuracy of 93.25\%. Furthermore, the model allows action anticipation \SI{500}{ms} prior to picking and \SI{1500}{ms} prior to dropping. We show that merging gaze information with respect to objects that are linked to the same task in a single model helps to increase the prediction performance.
The developed intention model can be used to make predictions in real-time enabling the robot to align its plans to the user's preferred goals making it a cooperative tool for complex tasks.
The proposed model performs particularly well for tasks where several objects connect to the same subtasks. This opens its applicability to other tasks in assembly and assisted living.\\
\vspace{-0.8em}
{\bf Acknowledgements} To the German Academic Scholarship Foundation and UK's EPSRC. Opinions are the ones of the authors and not of the funding organisations.
\vspace{-0.9em}
\bibliographystyle{unsrt}
\section{Introduction}
A Handheld robot shares properties of a handheld tool while being enhanced with autonomous motion as well as the ability to process task-relevant information and user signals.
Earlier work in this field explored the communication between user and robot to improve cooperation \cite{GreggSmith:2015bh} \cite{GreggSmith:2016hn}. Such one-way communication of task planning, however, is limited in that the robot has to lead the user. But as users exert their will and decisions, task conflicts emerge which in turn inflict user frustration and decrease cooperative task performance.
As a starting point of addressing this problem, extended user perception can be introduced to allow the robot to estimate the user's point of attention via eye gaze in 3D space during task execution\cite{Stolzenwald:2018un}. An estimate of users' visual attention informs the robot about areas of users' interest. While introducing attention was preferred, particularly for temporal demanding tasks, it is still limiting. What is necessary is a model that goes beyond where the user is attending to but rather what is the user intending to do. A model of intention would allow the robot to infer the user's goal in the proximate future and go beyond reacting to immediate decisions only.
Intention inference has caught researcher's attention in recent years and promising solutions have been achieved through observing user's eye gaze \cite{Huang:2016dj}, body motion \cite{Ravichandar:2015ii} or task objects \cite{Liu:2015km}. These contributions target safe interactions between humans and sedentary robots with shared workspaces. Thus, the question remains open whether there is a model which suits the setup of a handheld robot which is characterised by close shared physical dependency and a \textit{working together} rather than a \textit{turn taking} cooperative strategy.
Our work is guided by the following research questions
\begin{enumerate}[label=\textbf{Q\arabic*}]
\item How can user intention be modelled in the context of a handheld robot task?
\label{Q1}
\item To what extent does intention prediction affect the cooperation with a handheld robot?
\label{Q2}
\end{enumerate}
\begin{figure}[t]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Intention_Prediction_Title}
\caption{This picture shows a participant within our user intention prediction study. The participant uses the robot to solve an assembly task and is about to decide where to place the currently held block. Using the eye tracker the prediction system extracts the user's gaze pattern which is used for action prediction.
}
\label{fig:blockcopygameparticipant}
\vspace{-0.5em}
\end{figure}
For our study, we use the open robotic platform\footnote{3D CAD models available from handheldrobotics.org}, introduced in \cite{GreggSmith:2016cz} in combination with an eye tracking system as reported in \cite{Stolzenwald:2018un}. Within a simulated assembly task, eye gaze information is used to predict subsequent user actions. The two principal parts of this study consist of modelling user intention in the first place followed by testing it through an assistive pick and place task. Our contribution is an intention prediction model with real-time capabilities that allows for human-robot collaboration through online plan adaptation in assistive tasks. Figure \ref{fig:intentionpredictionflowchart} shows an overview of our proposed system.
\begin{figure*}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.95\linewidth]{Pictures/Intention_Prediction_Flow_Chart}
\caption{Overview of the intention prediction model and its use for the robot's motion control.}
\label{fig:intentionpredictionflowchart}
\end{figure*}
\section{Background and Related Work}
In this section, we deliver a summary of earlier work on handheld robots and its control based on user perception. Furthermore, we review existing methods for intention inference with a focus on human gaze behaviour.
\subsection{Handheld Robots}
Early handheld robot work \cite{GreggSmith:2015bh} used a trunk-shaped robot with 4-DoF to explore issues of autonomy and task performance. This was later upgraded to a 6-DoF (joint space) mechanism \cite{GreggSmith:2016cz} and used gestures, such as pointing, to study user guidance. These earlier works demonstrate how users benefit from the robot's quick and accurate movement while the robot profits from the human's tactical motion. Most importantly, increased cooperative performance was measured with an increased level of the robot's autonomy. It was furthermore found that cooperative performance significantly increases when the robot communicates its plans e.g. via a robot-mounted display \cite{GreggSmith:2016hn}.
Within this series of work, another problem was identified: the robot does not sense the user's intention and thus potential conflicts with the robot's plan remain unsolved. For example, when the user would point the robot towards a valid subsequent goal, the robot might have already chosen a different one and keep pointing towards it rather than adapting its task plan. This led to irritation and frustration in users on whom the robot's plan was imposed on.
Efforts towards involving user perception in the robot's task planning were made in our recent work on estimating user attention\cite{Stolzenwald:2018un}. The method was inspired by work from Land et al. on how human's eye gaze is closely related to manual actions \cite{Land:2016kw}. The attention model measures the current visual attention to bias the robot's decisions. In a simulated \textit{space invader} styled task, different levels of autonomy were tested over varying configurations of speed demands. It was found that both the fully autonomous mode (robot makes every decision) and the attention driven mode (robot decides based on gaze information) outperform manual task execution. Notably, for high-speed levels, the increased performance was most evident for the attention-driven mode which was also rated more helpful and perceived rather cooperative than the fully autonomous mode.
As opposed to an intention model, the attention model would react to the current state of eye gaze information only, rather than using its history to make predictions about the user's future goals. We suggest that this would be required for cooperative task solving for complex tasks like assembly where there is an increased depth of subtasks.
\subsection{Intention Prediction}
Intention estimation in robotics is in part driven by the demand for safe human-robot interaction and efficient cooperation.
Ravichandar et al. investigated intention inference based on human body motion. Using Microsoft Kinect motion tracking as an input for a neural network, reaching targets where successfully predicted within an anticipation time of approximately \SI{0.5}{s} prior to the hand touching the object\cite{Ravichandar:2015ii}. Similarly, Saxena et al. introduced a measure of affordance to make predictions about human actions and reached 84.1\%/74.4\% accuracy \SI{1}{s}/\SI{3}{s} in advance, respectively\cite{Koppula:2016ja}. Later, Ravichandar et al. added human eye gaze tracking to their system and used the additional data for pre-filtering to merge it with the existing motion-based model \cite{Ravichandar:2016th}. The anticipation time was increase to \SI{0.78}{s}.
Huang et al. used gaze information from a head-mounted eye tracker to predict customers' choices of ingredients for sandwich making. Using a support vector machine (SVM), an accuracy of approximately 76\% was achieved with an average prediction time of \SI{1.8}{s} prior to the verbal request \cite{Huang:2015iw}.
In subsequent work, Huang \& Mutlu used the model as a basis for a robot's anticipatory behaviour which led to more efficient collaboration compared to following verbal commands only \cite{Huang:2016dj}.
We note that the above work targets intention inference purposed for \textit{external} robots which are characterised by a shared workspace with a human but can move independently. It is unclear whether these methods are suitable for close cooperation as it can be found in the handheld robot setup.
\subsection{Human Gazing Behaviour}
The intention model presented in this paper is mainly driven by eye gaze data. Therefore, we review work on human gaze behaviour to inform the underlying assumptions of our model.
Land et al. found that fixations towards an object often precede a subsequent manual interaction by around \SI{0.6}{s} \cite{Land:2016kw}. Subsequent work revealed that the latency between eye and hand varies between different tasks \cite{Land:2001hl}. Similarly, Johansson et al. \cite{Johansson:2001ck} found that objects are most salient for human's when they are relevant for tasks planning and preceding saccades were linked to short-term memory processes in \cite{Mennie:2006fo}.
The purpose of preceding fixations in manual tasks was furthermore explored through virtual \cite{Ballard:1995iy} and real \cite{Pelz:2001fb} block design tasks. The results show that humans gather information through vision \textit{just in time} rather than memorising e.g. all object locations.
\begin{figure}[b]
\vspace{-0.99em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Front_Profile_Robot_labeled2_lowRes}
\caption{The handheld robot used for our study. It features a set of input buttons and a trigger at the handle, a 6-DoF tip and user perception through gaze tracking as reported in \cite{Stolzenwald:2018un}.}
\label{fig:frontprofilerobotlabeled}
\vspace{0.8em}
\end{figure}
\section{Prediction of User Intention}
In this section, we describe how intention prediction is modelled for the context of a handheld robot on the basis of an assembly task.
\subsection{Data Collection}
We chose a simulated version of a block copying task which has been used in the context of work in hand-eye coordination \cite{Ballard:1995iy,Pelz:2001fb}. Participants of the data collection trials were asked to use the handheld robot (cf. figure \ref{fig:frontprofilerobotlabeled}) to pick blocks from a stock area and place them in the workspace area at one of the associated spaces indicated by a shaded model pattern. The task was simulated on a \SI{40}{inch} LCD TV display and the robot remained motionless during the data collection task to avoid distraction. We drew inspiration from a block design IQ test \cite{Miller:2009ib} and decided to use black and white patterns instead of colours. That way, a match with the model would, in addition, depend on the block's orientation which adds further complexity.
An overview of the task can be seen in figure \ref{fig:blockcopyinitexampleareas}, figure \ref{fig:blockcopyinitexamplemoves} shows examples of possible picking and placing moves.
\begin{figure}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/block_copy_init_example_areas}
\caption{Layout of the block copy task on a TV display. The area is divided into stock (red) and workspace (blue). The shaded pattern pieces in the workspace area have to be completed by placing the associated pieces from the stock using the real robot.}
\label{fig:blockcopyinitexampleareas}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/block_copy_init_example_moves}
\caption{Examples of possible moves for block 1 and 4. A stock piece has to be moved to an associated piece in the pattern and match the model's orientation to complete it.}
\label{fig:blockcopyinitexamplemoves}
\vspace{-0.5em}
\end{figure}
In order to pick or place pieces, users have to point the robot's tip towards and close to the desired location and pull/release a trigger in the handle. The position of the robot and its tip is measured via a motion tracking system\footnote{Opti Track: https://optitrack.com}\hspace{-0.4em}. The handle houses another button which can be used to rotate the grabbed piece.
The opening or closing process of the virtual gripper takes \SI{1.3}{s} which is animated in the screen. If the participant tries to place a mismatch, the piece goes back to the stock and has to be picked up again. Participants are asked to solve the task swiftly and it is completed when all model pieces are copied. Throughout the task execution, we kept track of the user's eye gaze using a robot-mounted remote eye tracker in combination with a 3D gaze model from \cite{Stolzenwald:2018un}. Figure \ref{fig:blockcopygameparticipant} shows an example of a participant solving the puzzle.
For the data collection, 16 participants (7 females, $m_{age}$ = 25, \textit{SD} = 4) were recruited.
Each completed one practice trial to get familiar with the procedure, followed by another three trials for data collection, where stock pieces and model pieces were randomised prior to execution. The pattern consists of 24 parts with an even count of the 4 types.
The task starts with 5 pre-completed pieces to increase the diversity of solving sequences leaving 19 pieces to be completed by the participant. That way, a total amount of 912 episodes of picking and dropping were recorded.
\subsection{User Intention Model}
In the context of our handheld robot task, we define intention as the user's choice of which object to interact with next i.e. which stock piece to pick and on which pattern field to place it.
Based on our literature review, our modelling is guided by the following assumptions.
\begin{enumerate}[label=\textbf{A\arabic*}]
\item An intended object attracts the users' visual attention prior to interaction. \label{A1}
\item During task planning, the users' visual attention is shared between the intended object and other (e.g. subsequent) task-relevant objects.\label{A2}
\end{enumerate}
As a first step towards feature construction, the gaze information for an individual object was used to extract a visual attention profile (VAP) which is defined as the continuous probability of an object being gazed. Let $\vect{x_{gaze}}$ be the 2D point of intersection between the gaze ray and the TV screen surface and $\vect{x_{i}}$ the 2D position of the $i$-th object in the screen. Then the gaze position can be compared to each object using the Euclidean distance:
\vspace{-0.5em}
\begin{equation}
d_i(t) = ||\vect{x_{gaze}}-\vect{x_{i}}||
\end{equation}
As a decrease of $d$ implies an increased visual intention, the distance profile can be converted to a visual attention profile (VAP) using the following equation:
\vspace{-0.3em}
\begin{equation}
\vspace{-0.5em}
P_{gazed,i}(t) = \exp(\frac{-d_i(t)^2}{2\sigma^2})
\end{equation}
Where $\sigma$ defines the gaze distance resulting in a significant drop of $P_{gazed}$ and it was set to \SI{60}{\mm} based on the pieces' size and tracking tolerance. The intention model uses the VAP of the last \SI{4}{\s} before the point in time of the prediction. Due to the data update frequency of \SI{75}{Hz} the profile is discretised into a vector of 300 entries (cf. example in figure \ref{fig:visualattentionprofile}).
The prediction for picking and placing actions was modelled separately as they require different feature sets. As mentioned above, earlier studies about gaze behaviour during block copying \cite{Ballard:1995iy} and assembly \cite{Mennie:2006fo} suggest that the eye gathers information about both what to pick and where to place it prior to picking actions. For this reason, we combined pattern and stock information for picking predictions for each available candidate, resulting in the features selection:
\begin{enumerate}[leftmargin=.42in]
\item[$F_1$] The VAP of the object itself.
\item[$F_2$] The VAP of the matching piece in the pattern. If there are several, the one with the maximum visual attention is picked.
\end{enumerate}
\begin{figure}[b]
\vspace{-1.011em}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/visual_attention_profile}
\vspace{-1.5em}
\caption{Illustration of changing visual attention over time within the anticipation window of the prediction model for an individual object.
}
\label{fig:visualattentionprofile}
\vspace{0.7em}
\end{figure}
This goes in line with our assumptions \ref{A1}, \ref{A2}. Both features are vectors of real numbers between 0 and 1 with a length of $n = 300$. For the prediction of the dropping location, \ref{A2} is not applicable as the episode finishes with the placing of the part hence why only $F_1$ (a vector with length $n = 300$) is used for prediction. Note that this feature contains information about fixation durations as well as saccade counts.
An SVM \cite{Hearst:1998ew} was chosen as a prediction model as this type of supervised machine learning model was used for similar classification problems in the past, e.g. \cite{Huang:2015iw}. We divided the sets of VAPs into two categories, one where the associated object was the intended object (labelled as \texttt{chosen = 1}) and another one for the objects that were not chosen for interaction (labelled as \texttt{chosen = 0}). Training and validation of the models were done through 5-fold cross validation \cite{Kohavi:1995wf}.
The accuracy of predicting the \texttt{chosen} label for individual objects is 89.6\% for picking actions and 98.3\% for placing. However, sometimes the combined decision is conflicting e.g when several stock pieces are predicted to be the intended ones. This is resolved by selecting the one with the highest probability $P($\texttt{chosen }$ = 1)$ in a one-vs-all setup \cite{Rifkin:2004vf}. This configuration was tested for scenarios with the biggest choice e.g. when all 4 stock parts (random chance = 25\%) would be a reasonable choice to pick or when the piece to be placed matches 4 to 6 different pattern pieces (random chance = 17-25\%). This results in a correct prediction rate of 87.9\% for picking and 93.25\% for placing actions when the VAPs of the time up to just before the action time is used.
\section{Results of Intention Modelling}
Having trained and validated the intention prediction model for the case where VAPs range from $-4$ to 0 seconds prior to the interaction with the associated object, we are now interested in knowing to what extent the intention model predicts accurately at some time $t_{prior}$ prior to interaction. To answer this question, we extend our model analysis by calculating a $t_{prior}$-dependent prediction accuracy. Within a 5-fold cross validation setup, the \SI{4}{s}-anticipation window is iteratively moved away from the time of interaction and the associated VAPs are used to make a prediction about the subsequent user action using the trained SVM models. The validation is based on the aforementioned low-chance subsets, so that the chance of correct prediction through randomly selecting a piece would be $\leq25\%$. The shift of the anticipation window over the data set is done with a step width of 1 frame (\SI{13}{ms}). This is done for both the case of predicting which piece is picked up next as well as inferring intention concerning where it is going to be placed. For the time offsets $t_{prior}$ = 0, 0.5 and 1 seconds, the prediction of picking actions yields an accuracy $a_{pick}$ of 87.94\%, 72.36\% and 58.07\%. The performance of the placing intention model maintains a high accuracy over a time span of \SI{3}{s} with an accuracy $a_{place}$ of 93.25\%, 80.06\% and 63.99\% for the times $t_{prior}$ = 0, 1.5 and 3 seconds. In order to interpret these differences in performance, we investigated whether there is a difference between the mean duration of picking and placing actions. We applied a two-sample t-test and found that the picking time (mean = \SI{3.61}{s}, \textit{SD} = \SI{1.36}{s}) is significantly smaller than the placing time (mean = \SI{4.65}{s}, \textit{SD} = \SI{1.34}{s}), with $ p < 0.001, t = -16.12$.
As the prediction model of the picking actions implements the novel aspect of adding the VAPs of related objects, its comparison to existing methods is of particular interest. Figure \ref{fig:pickuppredictionovertimecompare} shows a comparison of our proposed model (where both features $F_{1}$ and $F_{2}$ are used) to the case where $F_{1}$ is the single basis for a prediction such as the model recently explored by Huang et al. \cite{Huang:2015iw}. It can be seen that both models well exceed the chance of picking randomly. Notably, the proposed model outperforms the existing one shortly after the subject ends the preceding move and presumably starts planning the next one. To further investigate the effect of the chosen model on the prediction performance, a two-factorial ANOVA was applied where the prediction time $t$ relative to the action and the model were set as the independent factors and the performance as dependent variable which reveals that the correct prediction rate of the proposed model is significantly higher ($p < 0.001$) than the one of the existing model.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\linewidth]{Pictures/PickUp_Prediction_Over_Time_compare}
\caption{This diagram shows the performance of predicting pick up actions averaged over 912 samples for two models: our proposed model (red) and an SVM (black) which is based on the feature $F_{1}$ only, as proposed by Huang et al. \cite{Huang:2015iw}. It can be seen how both models perform better than chance (dashed black) and predict the actions with increasing accuracy as the prediction time $t$ approaches the time of the action's execution $t = 0$. $t_{mean}$ (with temporal SD $t_{SD}$) is the mean time of completing the last block and hence the earliest meaningful time of predicting picking as a subsequent action.}
\label{fig:pickuppredictionovertimecompare}
\end{figure}
\subsection{Qualitative Analysis}
For an in-depth understanding of how the intention models respond to different gaze patterns, we investigate the prediction profile i.e. the change of the prediction over time, for a set of typical scenarios.\\
\subsubsection{One Dominant Type
A common observation was that the target object perceived most of the user's visual attention prior to interaction which goes in line with our assumption \ref{A1}. An example of these \textit{one type dominant} samples can be seen in figure \ref{fig:placingcorrecttruepositive1dominant3}. A subset of this category is the case where the user's eye gaze alters between the piece to pick and the matching place in the pattern i.e. where to put it (cf. figure \ref{fig:pickupcorrecttuenegativethereandbackwideformat}) which supports our assumption \ref{A2}.
For the majority of these one type dominant samples both the picking and placing prediction models predict correctly. \\
\subsubsection{Trending Choice
While the anticipation time of the pick up prediction model lies within a second and is thus rather reactive, the placing intention model is characterised by a slow increase of likelihood during the task i.e. it shows a low-pass characteristic. Figure \ref{fig:trending choice} demonstrates that the model is robust against small attention gaps and intermediate glances at competitors, however, the model requires an increased time window to build up confidence.\\
%
\subsubsection{Incorrect Predictions
There is a number of reasons for an incorrect prediction. Most commonly, a close by neighbour received more visual attention and was falsely classified as the intended object. In other cases, it was impossible to predict the intended object using our model due to missing saccades towards it or faulty gaze tracking.
\begin{figure*}[t!]
\vspace{0.5em}
\centering
\begin{subfigure}[t]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_1Dominant_3}
\caption{\scriptsize One piece receives most of the user's visual attention prior to placing}
\label{fig:placingcorrecttruepositive1dominant3}
\vspace{1em}
\end{subfigure}
~
\begin{subfigure}[t]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/PickUP_Correct_Tue_Negative_ThereAndBack_wideFormat}
\caption{\scriptsize User gaze alters between stock piece and matching workspace location}
\label{fig:pickupcorrecttuenegativethereandbackwideformat}
\end{subfigure}%
\caption{
These diagrams show examples of correct predictions for \textit{one type dominant} samples. (a) shows, how long fixation times (blue) results into a high probability value (red) e.g. for a location to place a piece. Similarly, (b) shows, how the prediction model links the VIPs of related objects. The subject's gaze alters between two related objects e.g. a piece to pick up and a matching location to place it (cf. orange and blue VAPs) leading to a high probability estimation (red) for this piece being the user-intended one.
}
\label{fig:one dominant}
\end{figure*}
\begin{figure*}[h]
\centering
\begin{subfigure}[h]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_TrendingChoice_1_labelled}
\label{fig:placingcorrecttruepositivetrendingchoice1}
\end{subfigure}%
~
\begin{subfigure}[h]{0.48\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Placing_Correct_True_Positive_TrendingChoice_2_labelled}
\label{fig:placingcorrecttruepositivetrendingchoice2}
\end{subfigure}
\vspace{-1em}
\caption{These two examples illustrate how the visual attention (blue) of an object builds up during the user's decision process in which case the intention prediction (red) remains undecided ($P_{chosen} < 0.5$) for a longer time compared to the case where no competition receives fixations (cf. fig \ref{fig:one dominant}). }
\label{fig:trending choice}
\vspace{0.4em}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\vspace{-2.5em}
\end{figure*}
\section{Discussion of Intention Modelling}
In addressing research question \ref{Q1}, we proposed a user intention model based on gaze cues for the prediction of actions which was assessed in a pick and place task. As a novel aspect introduced through this study, the predictions are not only based on saccades and fixation durations of an individual object but also on those of related objects. In other words, assessing the attention on objects in the workspace helps to predict which piece outside the current workspace is needed next. When the subject turns his/her attention towards the piece, the model interprets this as a confirmation rather than the start of a selection process. This helps to cut the time required for the model to gather relevant gaze information and makes predictions more reliable than traditional models.
We showed that, within this task, the prediction of different actions has different anticipation times i.e. dropping targets are identified quicker than picking targets. This can partially be explained by the fact that picking episodes are shorter than placing episodes. But more importantly, we observed that users planned the entire pick-place cycle rather than planning picking and placing actions separately. This becomes evident through the qualitative analysis which shows altering fixations between the piece to pick and where to place it. That way, the placing prediction model is able to already gather information at the time of picking.
The proposed model allows predictions \SI{500}{ms} prior to picking actions (71.6\% accuracy) and \SI{1500}{ms} prior to dropping actions (80.06\% accuracy). These numbers are encouraging for testing the prediction model in a real-time application. Therefore, we proceed with an experimental study where the intention model is used for cooperative behaviour.
\section{Intention Prediction Model Validation}
\vspace{-1mm}
In the second part of our study, we validate the proposed intention model for the case where it is used to control the robot's behaviour and motion. While the aforementioned experiments and analysis demonstrate that the intention model is capable of predicting users' short term goals while having full control over the robot's tip, it is unclear whether this is true for the case where the robot reacts to these predictions. For example, users might adapt their intention to the robot's plans just by seeing it moving towards a target which might differ from their initially intended move. That way, labelling the robot's predictions as being correct or incorrect in the same way as we did in the first study becomes invalid due to the lack of ground truth. For this reason, we propose to assess the intention model in an indirect way instead by observing users' reactions to the predictions with a focus on frustration. We hypothesise that a mismatch between the robot's and the user's plans would inflict user frustration and that frustration is reduced when the robot follows the true user intention compared to avoiding it.
\subsection{Intention Affected Robot Behaviour}
For the experimental validation of the intention model, we used the aforementioned block copy task and introduced an assistive behaviour to the robot which is controlled based on the predictions of a user's intended subsequent move i.e. which piece the user wants to pick up next or at which location the user wants to drop it. We created 3 different behaviour modes: \textit{Follow intention}, \textit{Rebel} and \textit{Random}. For each, the robot retreats to a crouched position while there is a low probability for each available target. When the probability of the target with the highest probability reaches a threshold, the robot reacts as follows in the different modes:
\begin{itemize}
\item \textbf{Follow Intention: }\\ The robot moves towards the target with the highest predicted intention.
\item \textbf{Rebel: } \\The robot avoids the target with the highest prediction and moves towards the target with the lowest predicted intention instead.
\item \textbf{Random: }\\The robot moves towards a random target.
\end{itemize}
We set a maximum decision time of \SI{1.3}{s} after which the robot executes the above-mentioned behaviour for the rare case where no probability exceeds the threshold. This prevents the robot from getting stuck in the crouched position e.g. when there is a time gap in the gaze tracking stream.
\subsection{Experiment Execution}
We recruited 20 new participants (6 females, $m_{age}$ = 26, \textit{SD} = 4) for the validation study of which 2 were later removed from the set for data analysis due to malfunctioning gaze tracking. Each was asked to first complete the task without the robot moving for familiarisation with the rules and the robot handling. This practice session was followed by 3 trials where, for each, the robot's behaviour was set to a different behaviour mode. The block pattern to complete as well as the order of the behaviour modes were randomised. Furthermore, 5 (out of 24) randomly chosen blocks were pre-completed to stimulate some diversity in solving strategies e.g. to prevent repeated line-by-line completion.
The participants were told to solve the trial tasks swiftly and that their performance was recorded. They did not receive any information about the behaviour modes but were told that the robot will move and try to help them with the task. Each trial was followed by the completion of a NASA Task Load Index (TLX) form \cite{Hart:1988ho} and \SI{3}{\min} resting time.
\section{Results and Discussion: Model Validation}
To determine the effect of the robot's behaviour mode on the subjects' frustration level, we performed an analysis of variance (ANOVA) with the mode as the independent variable and the frustration component of the TLX as a dependent variable. As the analysis yielded a significant effect ($p = .023$), it was further explored using post-hoc pairwise t-test with applied Bonferroni correction. The frustration mean for the \textit{Rebel} group was identified as being significantly higher than in the \textit{Follow Intention} group ($p = 0.19$). No significant mean differences were found when comparing the \textit{Random} group to the others. The results can be seen in table \ref{tab:frustration} and figure \ref{fig:intentionvalidationfrustration}.
\begin{table}[b] \centering
\vspace{-1.3em}
\begin{tabular}{lll}
& \textbf{Follow Intention} & \textbf{Random} \\ \cline{2-3}
\multicolumn{1}{l|}{\textbf{Rebel}} & \multicolumn{1}{l|}{$p = .019$ *} & \multicolumn{1}{l|}{$p = .495$} \\ \cline{2-3}
\multicolumn{1}{l|}{\textbf{Random}} & \multicolumn{1}{l|}{$p = .469$} & \multicolumn{1}{l|}{-} \\ \cline{2-3}
\end{tabular}
\caption{Bonferroni corrected \textit{p}-values of pairwise t-test results for the differences in mode depended frustration means. The starred value is significant ($p <.05$).}
\label{tab:frustration}
\vspace{1em}
\end{table}
\begin{figure}[h]
\vspace{0.5em}
\centering
\includegraphics[width=0.85\linewidth]{Pictures/Intention_Validation_Frustration_starred}
\vspace{-0.6em}
\caption{Perceived frustration from the TLX results for each of the tested behaviour modes. The mean values of starred groups yield a significant difference (cf. table \ref{tab:frustration}).}
\label{fig:intentionvalidationfrustration}
\vspace{-1em}
\end{figure}
We extended our analysis to both, the combined TLX results which serve as an indicator for perceived task load and the measured performance which is defined as the number of completed blocks per minute. However, an applied ANOVA did not yield an effect of the robot's behaviour mode, neither on the combined TLX nor on the performance.
\begin{figure*}[t]
\vspace{0.5em}
\centering
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Follow_Prediction_Labelled_LowRes}
\caption{Prediction of the red piece during placing of the purple piece.}
\label{fig:demofollowprediction-}
\vspace{1em}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Follow_Reaching_Labelled_LowRes}
\caption{The robot's motion goes in line with the user's intention as it adapts its plans.}
\label{fig:demofollowreaching}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Rebel_Prediction_Labelled_LowRes}
\caption{Prediction of the pink piece while placing the purple one.}
\label{fig:demorebelprediction}
\end{subfigure}
~
\begin{subfigure}[t]{0.23\textwidth}
\centering
\includegraphics[width=0.99\linewidth]{Pictures/Demo_Rebel_Reaching_Labelled_LowRes}
\caption{
Avoiding user intent leads to a mismatch with the user's tactical motion.
}
\label{fig:demorebelreaching}
\end{subfigure}
\caption{These figures illustrate the systems' underlying intention estimation and how the different modes affect cooperation. The users' eye gaze model is represented as a yellow line while the estimated probability for a piece to be chosen by the user is indicated by its size. It can be seen how following the intention prediction assists the user with his/her choice (a,b) while avoiding the intended object (c,d) forces the user to adapt his/her plan to the robot's motion.}
\label{fig:demo}
\vspace{-0.5em}
\end{figure*}
As part of a qualitative review of the robot's behaviour we found that in the \textit{Rebel} mode, participants perform an increased number of corrective moves compared to the \textit{Follow Intention} scenario. Figure \ref{fig:demo} shows how the robot's aim matches the user's intention in the \textit{Follow Intention} mode whereas in the \textit{Rabel} example, the user rushes towards the intended aim but needs to correct his move as the robot aims for a different piece.
Some participants commented on the behaviour modes. The \textit{Follow Intention } mode was often preferred (e.g. \enquote{I liked being in charge and the robot was helpful} and \enquote{The robot followed my decisions}) whereas the \textit{Random} mode lead to irritation in some users (e.g. \enquote{First I thought it would go where I wanted but then it started moving in an unpredictable way}). For the \textit{Rebel} mode, we observed divergent reactions. While some subjects struggled because of the mismatch between the robot's motion and their plans, others started following the robot's lead. This was also reflected in the comments e.g. \enquote{Now the robot does its own thing, I don't like it} versus \enquote{It was easier because I did not have to think much}.
\\
\vspace{-0.5em}
The observed difference in frustration ratings between the mode where the robot supports the user's predicted intention versus avoiding it is evidence for most of the intention predictions matching the true intention. With regards to \ref{Q2}, our interpretation of the results is that during the \textit{Follow Intention} trials, the robot did follow the users' preferred sequence rather than the users adapting it to the robotic motion which validates the proposed intention model and its application in assisted reaching.
The fact that the mean frustration for the \textit{Random} mode lies between the other two modes is expected given their effect on frustration outlined above. However, the effect is too subtle to be compared to random motion and the sample size too small for a reliable distinction.
Our analysis furthermore shows that user frustration is more sensitive to the robot's intention prediction than perceived task load or performance. We suggest that robotic systems should follow user intention when there are subtasks with similar priorities for enhanced cooperation.
\vspace{-0.8em}
\section{Conclusion}
We investigated the use of gaze information to infer user intention within the context of a handheld robot. A pick and place task was used to collect gaze data as a basis for an SVM-based prediction model. Results show that, depending on the anticipation time, picking actions can be predicted with up to 87.94\% accuracy and dropping actions with an accuracy of 93.25\%. Furthermore, the model allows action anticipation \SI{500}{ms} prior to picking and \SI{1500}{ms} prior to dropping. We show that merging gaze information with respect to objects that are linked to the same task in a single model helps to increase the prediction performance.
The developed intention model can be used to make predictions in real-time enabling the robot to align its plans to the user's preferred goals making it a cooperative tool for complex tasks.
The proposed model performs particularly well for tasks where several objects connect to the same subtasks. This opens its applicability to other tasks in assembly and assisted living.\\
\vspace{-0.8em}
{\bf Acknowledgements} To the German Academic Scholarship Foundation and UK's EPSRC. Opinions are the ones of the authors and not of the funding organisations.
\vspace{-0.9em}
\bibliographystyle{unsrt}
|
1,314,259,995,045 | arxiv | \part{Introduction}
This is a user guide on how to use the first version of our developed {\sc Maple} library (named {\tt Singularity}) for local bifurcation analysis of real zeros of scalar smooth maps; see \cite{GazorKazemi} for the main ideas. We remark that the term {\it singularity theory} has been used in many different Mathematics disciplines with essentially different objectives and tools but yet sometimes with similar terminologies; for examples of these see \cite{GazorKazemi}.
For more detailed information, definitions, and related theorems in what we call here {\it singularity theory}, we refer the reader to \cite{Melbourne87,Keyfitz,GatermannLauterbach,GatermannHosten,Gaffney,MurdBook,GovaertsBook,GolubitskySchaefferBook}.
\textbf{Verification and warning note}. \texttt{Singularity} is able to check and verify all of its computations. However, this sometimes adds an extra computational cost. This happens mainly for finding out the correct and suitable truncation degree and computational ring. Therefore, it is beneficial to skip the extra computations when it is not necessary. For an instance of benefit, consider that you need to obtain certain results for a large family of problems arising from the same origin. Therefore, you might be able to only check a few problems and conclude about the suitable truncation degree and computational ring for the whole family. Thereby, the commands of \texttt{Singularity} check and verify the output results unless it requires extra computation. In this case, a warning note of not verified output or possible errors is given; in these cases, a recommendation is always provided on how to verify or circumvent the problem. Lack of warning notes always indicates that the output results have been successfully verified.
\section{List of commands }
The following list shows a complete list of commands from singularity theory that have so far been implemented in {\tt Singularity}:
\begin{itemize}
\item \verb"Verify"; section \ref{SecVerify},
\item \verb"Normalform"; section \ref{SecNormalform},
\item \verb"UniversalUnfolding"; section \ref{SecUniversalUnfolding},
\item \verb"RecognitionProblem"; section \ref{SecRecognitionProblem},
\item \verb"CheckUniversal"; subsection \ref{SecCheckUniversal},
\item \verb"Transformation"; section \ref{SecTransformation},
\item \verb"TransitionSet"; subsection \ref{SecTransitionSet},
\item \verb"PersistentDiagram"; subsection \ref{SecPersistentDiagram},
\item \verb"NonPersistent"; subsection \ref{SecNonPersistent},
\item \verb"Intrinsic"; section \ref{SecIntrinsic},
\item \verb"AlgObjects", \verb"RT", \verb"T", \verb"P", \verb"S", \verb"TangentPerp", \verb"SPerp", \verb"IntrinsicGen"; section \ref{SecAlgObjects}. \\
\end{itemize}
The following enlists all implemented tools from computational algebraic geometry:
\begin{itemize}
\item \verb"MultMatrix"; section \ref{SecMultMatrix},
\item \verb"Division"; section \ref{SecDivision},
\item \verb"StandardBasis"; section \ref{SecStandardBasis},
\item \verb"ColonIdeal"; section \ref{SecColonIdeal},
\item \verb"Normalset"; section \ref{SecNormalset}.\\
\end{itemize}
In this user guide we will explain the capabilities, options and how each of these commands work.
After installing {\tt Singularity} on Maple software running on your computer, the above list is accessible by using the command
\verb"with(Singularity)". In fact, Maple enlists all the commands in the above two tables as its output.
\part{Singularity theory }
The terminology ``singularity theory'' has been used to deal with many different problems in different mathematical disciplines. Singularity theory here refers the methodologies in dealing with the qualitative behavior of local zeros
\begin{eqnarray}
\label{eq:1}
g(x, \lambda )=0,
\end{eqnarray}
where \(x\in \mathbb{R}\) is a state variable and \(\lambda\) is a distinguished parameter. The cases of multi-dimensional parameters are dealt with through the notions of unfolding. {\tt Singularity} will be soon enhanced to deal with the cases of multi-dimensional state variables.
In many real life problems at certain solutions, \(g(x, \lambda)\) is {\it singular}, {\rm i.e., }
\begin{eqnarray*}
g(x, \lambda)=g_{x}(x, \lambda)=0.
\end{eqnarray*}
A singular germ \(g(x, \lambda)\) subjected to smooth changes demonstrates surprising changes in the {\it qualitative properties} of the solutions, {\rm e.g., } changes in the number of solutions. This phenomenon is called a {\it bifurcation}.
\section{Qualitative properties }
We define the {\it qualitative properties} as the invariance of an equivalence relation.
The equivalence relation used in {\tt Singularity} is {\it contact equivalence} and is defined by
\begin{equation}
f\sim g \Leftrightarrow f(x, \lambda)= S(x, \lambda)g(X(x, \lambda), \Lambda(\lambda))
\end{equation}
where \(S(x, \lambda)>0\) while \((X, \Lambda)\) is locally a diffeomorphism such that \(X_{x}(x, \lambda)\) and \(\Lambda^{\prime}(\lambda)>0\).
\section{Ring and truncation degree}\label{SecVerify}
In this section we describe how to determine the permissible computational ring and the truncation degree. In fact, each smooth germ with a nonzero-infinite Taylor series expansion must be truncated at certain degree. Further, there are four different options in {\tt Singularity} for computational rings, {\rm i.e., } polynomial, fractional, formal power series and smooth germ rings, that they each can be used by commands in {\tt Singularity} for bifurcation analysis of each singular germ. The command \verb"Verify(g)" derives the following information about the singular germ \(g\) for correct and efficient computations:
\begin{enumerate}
\item the permissible computational rings for the germ \(g\).
\item the least permissible truncation degree \(k\) for computations involving the germ \(g\). In other words, the computations modulo degrees of higher than (but not equal to) \(k\) does not lead to error.
\item our recommended computational ring.
\end{enumerate}
We are also interested in the above information for the following purposes:
\begin{itemize}
\item A list of germs \(G\) in two variables for either division, standard basis computations, multiplication matrix, or intrinsic part of an ideal.
\item A parametric germ \(H(x, \lambda, \alpha)\) with \(\alpha\in \mathbb{R}^p\) for either transition set computation or persistent bifurcation diagram classification.\\
\end{itemize}
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"Verify"(\(g\), \verb"Vars")& derives the permissible computational rings and permissible truncation degree.
\\ \hline
Default upper bound for truncation& a permissible truncation degree is computed as long as it is less than or equal to \(20.\)
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \verb"Ideal"; \verb"Persistent"; the command \verb"Verify"(\(G\), \verb"Vars", \verb"Ideal") deals with an ideal \(I\) generated by \(G\), {\rm i.e., } \(I:= \langle G\rangle_\mathscr{E},\) where \(G\) is a list of germs. It returns the permissible computational ring and a permissible truncation degree when the ideal \(I\) is of finite codimension. Otherwise, it remarks that ``the ideal is of infinite codimension." However, the command \verb"Verify"(\(H\), \verb"Vars", \verb"Persistent") determines the least permissible truncation degree \(k\) so that the computations associated with either persistent bifurcation diagram classification or transition sets would be correct.
\item \verb"Fractional"; \verb"Formal"; \verb"SmoothGerms"; \verb"Polynomial"; the command uses either the rings of fractional germs, formal power series or ring of smooth germs.
\item Upper bound for truncation degree \(N\); this lets the user to change the default upper bound truncation degree from \(20\) to \(N\).
\end{itemize}
We remark that \verb"Vars" stand for the state variable, say \(x\), and the distinguished
parameter, usually taken as \(\lambda\). This provides the user to work with his/her own choice
of these variables.
\begin{example}
\verb"Verify"(\(x^3-\sin(\lambda), [x, \lambda]\)) gives
The following rings are allowed as the means of computations:
Ring of smooth germs
Ring of formal power series
Ring of fractional germs
The truncation degree must be: 3
\end{example}
\begin{example}
\verb"Verify"(\(x^3-\sin(\lambda), [x, \lambda]\), 2) gives the following warning message:
\begin{center}
``Increase the upper bound for the truncation degree!''
\end{center}
\end{example}
\begin{example}
\verb"Verify"(\([x^4-x\sin(\lambda), x^3\lambda-\lambda\sin(\lambda), 3x^4, 3x^2\lambda], [x, \lambda], \verb"Ideal"\)) gives
The following rings are allowed as means of computations:
Ring of smooth germs
Ring of formal power series
Ring of fractional germs
The truncated degree must be: 4
\end{example}
\begin{example}
Command \verb"Verify"(\(x^3-\sin(\lambda)\), \([x,\lambda]\), \verb"Persistent") gives the least
permissible truncation degree to be 2.
\end{example}
\section{Normal form}\label{SecNormalform}
A germ \(f(x, \lambda)\) is called a normal form for the singular germ \(g(x, \lambda)\) when \(f\) has a minimal set of monomial terms in its Taylor expansion among all contact-equivalent germs to \(g\). Therefore, it is easier to analyze the solution set of \(f\) while it has the same qualitative behavior as zeros of \(g\) do.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/the default options} & \textbf{Description} \\
\hline
\verb"Normalform"(\(g\),\verb"Vars" )& This function derives a normal form for \(g\).
\\\hline
The computational ring & The default ring is the ring of fractional germs.
\\\hline
Verify/Warning/Suggestion & It automatically verifies if fractional germ ring is sufficient for normal form computation of the input germ \(g.\) Otherwise, it writes a warning note along with a cognitive suggestion for the user.
\\\hline
Truncation degree & It, by default, detects the maximal degree \(k\) in which \(\mathcal{M}^{k+1}\subseteq \mathscr{P}(g).\) Thus, normal forms are computed modulo degrees higher than or equal to \(k+1.\)
\\\hline
Input germ & It, by default, takes the input germ \(g\) as a polynomial or a smooth germ. It truncates the smooth germs modulo the degree \(k+1.\)
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k;\) specifies the degree \(k\) so that computations are performed modulo degree \(k+1\). When the input degree \(k\) is too small for the input singular germ \(g\), the computation is not reliable. Thus, an error warning note is returned to inform the user of the situation along with cognitive suggestions to circumvent the problem.
\item \verb"Fractional", \verb"Formal", \verb"SmoothGerms"; \verb"Polynomial"; the command uses either the rings of fractional germs, formal power series or ring of smooth germs. When it is necessary, warning/cognitive suggestions are given accordingly.
\item \verb"list"; this generates a list of possible normal forms for the germ \(g\). Different normal forms may only occur due to possible alternative eliminations in intermediate order terms.
\end{itemize}
\begin{example}
\verb"Normalform"(\(x^3-\sin(\lambda), [x, \lambda]\), 10, \verb"SmoothGerms") generates
\begin{equation*}
x^3-\lambda.
\end{equation*} while \verb"Normalform"(\(1-\frac{1}{1+x^4-\lambda^2}, [x, \lambda]\), 10, \verb"Formal") gives rise to
\begin{eqnarray*}
x^4-\lambda^2.
\end{eqnarray*} Using \verb"Normalform"(\(x^5+x^3\lambda+\sin(\lambda^2), [x, \lambda]\), 10, \verb"Polynomial") gives the following suggestion and warning note.
Warning: The polynomial germ ring is not suitable for normal form computations.
Suggestion: Use the command \verb"Verify" to find the appropriate computational ring.
The following output might be wrong.
The germ is an infinite codimensional germ.
In fact the above statement is wrong since the high order term ideal contains
\begin{equation*}
\mathcal{M}^6+\mathcal{M}^4\langle \lambda\rangle+\mathcal{M}\langle \lambda^2\rangle.
\end{equation*}
\noindent Now the command \verb"Verify"(\(x^5+x^3\lambda+\sin(\lambda^2), [x, \lambda])\) gives rise to
Fractional germ ring; Formal power series ring; Smooth germ ring.
\noindent Thus, we use \verb"Normalform"(\(x^5+x^3\lambda+\sin(\lambda^2)\), \([x, \lambda]\), 10, \verb"Fractional") to obtain
\begin{equation*}
x^5+x^3\lambda+\lambda^2.
\end{equation*}
\end{example}
\section{Universal unfolding}\label{SecUniversalUnfolding}
Generally in dealing with singular problems, extra complications are experienced in the laboratory data than what are predicted by the modeling theoretical analysis. The problem here is due to {\it modeling imperfections}; natural phenomena can not be perfectly modeled by a mathematical model. In fact one usually neglects the impact of many factors like friction, pressure, and/or temperature, etc., to get a manageable mathematical model. Otherwise one will end up with a mathematical modeling problem with too many or infinite number of parameters. The imperfections around singular points may cause dramatic qualitative changes in the solution set of the model. Universal unfolding gives us a natural way to circumvent the problem of imperfections.
\begin{defn}
A parametric germ \(G(x, \lambda, \alpha)\) is called an {\em unfolding} for \(g(x, \lambda)\) when
\begin{equation*}
G(x, \lambda, 0)=g(x, \lambda).
\end{equation*} An unfolding \(G(x, \lambda, \alpha)\) for \(g\) is called a {\em versal unfolding} when for each unfolding \(H(x, \lambda, \beta)\) of \(g(x, \lambda)\) there is a smooth germ \(\alpha(\beta)\) so that \(H\) is contact-equivalent to \(G(x, \lambda, \alpha(\beta)).\) Roughly speaking, a versal unfolding is a parametric germ that contains a contact-equivalent copy of all small perturbations of \(g(x, \lambda)\). A versal unfolding with insignificant parameters is not suitable for the bifurcation analysis. So, we are interested in a versal unfolding that has a minimum possible number of parameters, that is called {\em universal unfolding}. In other words, universal unfolding has the minimum possible number of parameters so that they accommodate all possible qualitative types that small perturbations of \(g(x, \lambda)\) may experience.
\end{defn}
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/the default options} & \textbf{Description} \\
\hline
\verb"UniversalUnfolding"(\(g\),\verb"Vars" )& This function computes a universal unfolding for \(g\).
\\\hline
The computational ring & By default, {\tt Singularity } uses the ring of fractional germs.
\\\hline
Verify/Warning/Suggestion & This automatically derives the least sufficient degree for truncations and also verifies if fractional germ ring is sufficient for computation. Otherwise, it writes a warning note along with guidance on the suitable rings for computations and hints at other possible capabilities of {\tt Singularity.}
\\\hline
Degree & It, by default, detects the maximal degree \(k\) in which terms of degree higher than or equal to \(k+1\) can be ignored. Thus, the computations are performed modulo degree \(k+1.\)
\\\hline
Input germ & The default input germ \(g\) is a polynomial or a smooth germ. For an input smooth germ, the default procedure \verb"UniversalUnfolding" truncates the smooth germs modulo \(k+1,\) {\rm i.e., } modulo degrees higher than and equal to \(k+1.\)
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \verb"normalform"; A universal unfolding for normal form of \(g\) is derived by this option.
\item \verb"list"; this function provides the list of possible universal unfoldings for \(g\).
\item \(k\); the degree \(k\) determines the truncation degree so that all computations are performed modulo \(k+1.\) For low degrees of \(k,\) it may
derive wrong results. Thus, it gives a warning error and a suggestion for the user when \(k\) must be a larger number for correct result.
\item \verb"Fractional"; \verb"Formal"; \verb"SmoothGerms"; \verb"Polynomial"; this determines the computational ring. The command
\verb"UniversalUnfolding" gives a warning note when the user's choice of computational ring is not suitable for computations involving
the input germ \(g\) and writes a suggestion to circumvent the problem.
\end{itemize}
\begin{example} \verb"UniversalUnfolding"\((x^4+4x^3-\lambda x, [x, \lambda], \verb"normalform", \verb"list")\) gives rise to
\begin{eqnarray*}
& x^3-x\lambda+ \alpha_1+\alpha_2 \lambda &\\
& x^3-x\lambda+ \alpha_1+\alpha_2 x^2.&
\end{eqnarray*} \verb"UniversalUnfolding"\((6x-6\sin(x)-\lambda x, [x, \lambda], 6, \verb"normalform", \verb"list", \verb"Formal")\) leads to
\begin{eqnarray*}
& x^3-\lambda x+\alpha_{2}x^2+\alpha_{1} &\\
& x^3-\lambda x+\alpha_{2}\lambda+\alpha_{1}.&
\end{eqnarray*}
Now consider \(g(x, \lambda):=x^6+x^4\lambda+\lambda^2 .\)
\verb"UniversalUnfolding"\((g(x, \lambda), [x, \lambda], 6, \verb"normalform", \verb"list", \verb"Polynomial")\) gives the following warning error and suggestion:
\noindent \emph{Warning: The ring of polynomial germs is not suitable for normal form computations of \(g.\) }
\noindent \emph{Suggestion: The permissible computational ring options are} \verb"Fractional", \verb"SmoothGerms" \emph{and} \verb"Formal".
\end{example}
\section{Recognition problem }\label{SecRecognitionProblem}
We describe the command \verb"RecognitionProblem" on how it answers the recognition problem, that is, what kind of germs have the same normal form or universal unfolding for a given germ \(g\)?
\subsection{Normal form}
\begin{itemize}
\item \textbf{Low order terms}. Low order terms refer to the monomials in
\(\mathscr{S}(g)^{\perp}\) which do not appear in any contact-equivalent copy of \(g\).
\item \textbf{High order terms}. The ideal \(\mathscr{P}(g)\) represents the space of negligible terms that are called high order terms. These terms are eliminated in normal form of \(g.\)
\item \textbf{Intermediate order terms}. A monomial term are called an intermediate order term when it is neither low order nor high order term. Intermediate order terms may or may not be simplified in normal form computation of smooth germs.
\end{itemize}
The answer for the recognition problem for normal form of a germ \(g\) is a list of zero and nonzero conditions for certain derivatives of a hypothetical germ \(f.\) When these zero and nonzero conditions are satisfied for a given germ \(f,\) the germ \(f\) and \(g\) are contact-equivalent. Each germ with a minimal list of monomial terms in its Taylor expansion constitutes a normal form for \(g.\)
\subsection{Universal unfolding}\label{SecCheckUniversal}
Consider a parametric germ \(G(x, \lambda, \alpha)\) and a germ \(g(x, \lambda).\) Then, \(G\) is usually a universal unfolding for \(g\) when \(G(x, \lambda, 0)= g(x, \lambda)\) and certain matrix associated with \(G\) has a nonzero determinant. Thus, the answer of the recognition problem for universal unfolding is actually a matrix whose components are derivatives of a hypothetical parametric germ \(G\) satisfying \(G(x, \lambda, 0)= g(x, \lambda)\).\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/ default} & \textbf{Description} \\
\hline
\verb"RecognitionProblem"(\(g\), \verb"Vars") & returns a list of zero and nonzero conditions on certain derivatives of a hypothetical germ \(f\). A given germ \(f\) is contact-equivalent to \(g\) when those conditions are satisfied. \\
\hline
Computational ring & The default is fractional germ ring. The lack of warning notes is a confirmation that the fractional germ ring is suitable for computation.
\\
\hline
Truncation degree & It automatically computes an optimal truncation degree \(k\) and performs the remaining computations modulo degrees of higher than (but not equal to) \(k\). \\
\hline
Verification/Warning& A warning note of possible errors is given when the computational ring is not suitable for the germ \(g.\) The truncation degree is also checked and if it is not sufficiently large enough, a warning note is given. Warning notes are accompanied with cognitive suggestions to circumvent the problem.
\end{tabularx}
\subsubsection{Options}
\begin{itemize}
\item \(k\); this number represents the truncation degree.
\item Computational ring: \verb"Fractional", \verb"Formal", \verb"SmoothGerms"; \verb"Polynomial"; the command accordingly uses either the rings of
fractional germs, formal power series or smooth germs.
\item \verb"Universalunfolding"; it returns a matrix. The matrix components consists of
certain derivatives of a hypothetical parametric germ \(G\). Then, a parametric germ \(G(x, \lambda, \alpha)\) is a universal unfolding for \(g(x, \lambda)\) when \(G(x, \lambda, 0)= g(x, \lambda)\) and the associated matrix has a nonzero determinant.
\end{itemize}
\begin{example}
\verb"RecognitionProblem"(\(x^3+\sin(\lambda), [x, \lambda]\), 6, \verb"Formal") gives rise to
\begin{center}
"nonzero condition=", \([\frac{\partial}{\partial\lambda}f\neq 0, \frac{\partial^3}{\partial x^3}f\neq 0]\)\\
"zero condition=", \([f=0, \frac{\partial}{\partial x}f=0,\frac{\partial^2}{\partial x^2}f =0]\).
\end{center}
\noindent \verb"RecognitionProblem"(\(x^3+\exp(\lambda^2)-1, [x, \lambda]\), 6, \verb"universalunfolding", \verb"SmoothGerms") gives rise to
\begin{equation*}
\det\left( {\begin{array}{ccccc}
0 & 0 & 0 & g_{x,x,x}(0) & g_{x,x,\lambda}(0)\\
0 & g_{\lambda, \lambda}(0) & 0 & g_{x,x,\lambda}(0) & g_{x, \lambda, \lambda}(0)\\
G_{\alpha_1}(0) & G_{\lambda, \alpha_1}(0) & G_{x, \alpha_1}(0) & G_{x, x, \alpha_1}(0) & G_{x, \lambda, \alpha_1}(0)\\
G_{\alpha_2}(0) & G_{\lambda, \alpha_2}(0) & G_{x, \alpha_2}(0) & G_{x, x, \alpha_2}(0) & G_{x, \lambda, \alpha_2}(0)\\
G_{\alpha_3}(0) & G_{\lambda, \alpha_3}(0) & G_{x, \alpha_3}(0) & G_{x, x, \alpha_3}(0) & G_{x, \lambda, \alpha_3}(0)\\
\end{array}}
\right)\neq 0.
\end{equation*} \\
\end{example}
\noindent Let \(G(x,\lambda,\alpha)\) be a parametric germ where \(\alpha\in\mathbb{R}^{k}\).\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"CheckUniversal"(\(G\), \verb"Vars") & This function checks if a parametric germ \(G\) is a universal unfolding for
\(G(x,\lambda,0)\)
\end{tabularx}
\begin{example}
\verb"CheckUniversal"(\(x^5-\lambda+\alpha_{1}x+\alpha_{2}x^2+\alpha_{3}x^3, [x, \lambda]\)) gives
\begin{center}
"Yes"
\end{center}
\end{example}
\section{Transformations }\label{SecTransformation}
For each two contact-equivalent germs \(f\) and \(g,\) there are diffeomorphic germs \((X(x, \lambda), \Lambda(\lambda))\) and smooth germ \(S(x, \lambda)>0\) such that \(X_{x}(x, \lambda),\) \(\Lambda^{\prime}(\lambda)>0\) and \(f(x, \lambda)= S(x, \lambda)g(X(x, \lambda), \Lambda(\lambda))\).\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/option} & \textbf{Description} \\
\hline
\verb"Transformation"(\(g\), \verb"Vars") & This function computes the smooth germs \(X, \Lambda, S\) transforming the germ \(g\) into its normal form modulo degree \(k,\) where terms of degree higher than or equal to \(k\) are high order terms. \\
\hline
Transformation(\(g, f\)) & This function computes suitable smooth maps \(X, \Lambda, S\) for transforming the germ \(g\) into \(f\) modulo high order terms. \end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k;\) this number specifies a degree \(k\) so that computations are performed modulo degrees of higher or equal to \(k\). When \(k\) is less than the
degrees of high order terms a warning note is given.
\end{itemize}
\begin{example}
\verb"Transformation"(\(x^3+\sin(\lambda)+\exp(x^5)-1, x^3+\lambda, [x, \lambda], 4\)) gives rise to
\begin{eqnarray*}
X&=&x+\lambda+x \lambda+\lambda^2, \qquad \Lambda(\lambda)= \lambda, \\
S&=&1-3x^2-3x \lambda-\frac{5}{6}\lambda^2-3x^3-9 \lambda x^2-9x \lambda^2-3 \lambda^3.
\end{eqnarray*}
\end{example}
\section{Bifurcation diagrams }
Bifurcation diagram analysis of a parametric system is performed by the notion of {\it persistent} and {\it non-persistent} bifurcation diagrams.
Bifurcation diagram of \eqref{eq:1} is defined by
\begin{eqnarray*}
\lbrace (x, \lambda)\mid g(x, \lambda, \alpha)=0 \rbrace.
\end{eqnarray*} A bifurcation diagram is called {\it persistent} when the bifurcation diagrams subjected to small perturbations in parameter space remain self contact-equivalent.
\subsection{Persistent bifurcation diagram classification and transition set }
The classification of persistent bifurcation diagrams are performed by the notion of transition sets. In fact, a subset of parameter space is called {\it transition set} when the associated bifurcation diagrams are non-persistent. {\it Transition set} is denoted by \(\Sigma\) and is usually a hypersurface of codimension one for germs of finite codimension. Then, one choice from each connected components of the complement of the transition set \(\Sigma\) makes a complete persistent bifurcation diagram classification of a given parametric germ. This provides a comprehensive insight into the persistent zero solutions of a parametric germ.
\subsubsection{Transition set }\label{SecTransitionSet}
The parameters associated with non-persistent bifurcation diagrams are split into three categories: {\it bifurcation}, {\it hysteresis}, and {\it double limit point}. These are defined and denoted by
\begin{eqnarray*}
\mathscr{B}&=& \{\alpha\in\mathbb{R}^{p}\mid G=G_{x}=G_{\lambda}=0 \hbox{ at } (x, \lambda, \alpha) \hbox { for some }(x, \lambda)\in \mathbb{R}\times \mathbb{R}\},\\
\mathscr{H}&=& \{\alpha\in\mathbb{R}^{p}\mid G=G_{x}=G_{xx}=0 \hbox{ at } (x, \lambda, \alpha) \hbox{ for some }(x, \lambda)\in \mathbb{R}\times \mathbb{R}\},\\
\mathscr{D}&=& \{\alpha\in\mathbb{R}^{p}\mid\exists(x_{1}, x_{2}, \lambda)\in \mathbb{R}\times \mathbb{R}\times \mathbb{R}\hbox{ so that } G =G_{x}=0 \hbox{ at } (x_{1},\lambda,\alpha) \hbox{ and }\\ &&\; (x_{1},\lambda,\alpha) \}.
\end{eqnarray*}
The transition set \(\Sigma\) is now given by \(\Sigma:=\mathscr{B}\cup\mathscr{H}\cup\mathscr{D}\).
Suppose that \(H\) is a singular parametric germ. \\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/the default options} & \textbf{Description} \\
\hline
\verb"TransitionSet"(\(H, \alpha\), \verb"Vars")& This function estimates the transition set in terms of parameters of \(H.\) The default is to eliminate \(x\) and \(\lambda\) variables from the equations given by \(\mathscr{B}, \mathscr{H}, \mathscr{D}\).
\\\hline
Truncation degree & For non-polynomial input germs, by default, it automatically computes a suitable truncation degree \(k\) and truncates the input germ at degree \(k\), {\rm i.e., } preserving degrees of less than and equal to \(k\).
\end{tabularx}
\subsubsection{Options}
\begin{itemize}
\item \([\alpha_1, \alpha_2, \ldots, \alpha_p]\); this hints to derive the transition set in terms of these variables while the attempts are best made to eliminate the rest of variables from the equations (as many as possible).
\item \verb"plot"; this function plots/animates transition set in parameter space.
\item \(\alpha_{i}\); When codimension is more than or equal to three 3, some parameters \(\alpha_3, \alpha_4, \ldots\) will be taken as fixed by default. This option refines the \verb"plot"/\verb"animate" option by allowing to change the fixed parameters to \(\alpha_{i}. \)
\item \(k\); determines the truncation degree. The user may use the command \verb"Verify" to find an appropriate degree \(k.\)
\end{itemize}
\begin{example}
Here, we bring two examples from \cite[Page 206]{GolubitskySchaefferBook}.
\verb"TransitionSet"(\(x^4-\lambda x+\alpha_1+\alpha_2 \lambda+\alpha_3 x^2, [\alpha_1, \alpha_2, \alpha_3], [x, \lambda]\)) gives rise to
\begin{eqnarray*}
\mathscr{B}&:=&\{(\alpha_1, \alpha_2, \alpha_3)\,|\,\alpha_2^4+\alpha_2^2\alpha_3+\alpha_1=0\},\\
\mathscr{H}&:=&\{(\alpha_1, \alpha_2, \alpha_3)\,|\,128\alpha_2^2\alpha_3^3+3\alpha_3^4+72\alpha_1\alpha_3^2+432\alpha_1^2=0\},\\
\mathscr{D}&:=&\{(\alpha_1, \alpha_2, \alpha_3)\,|\,-\alpha_3^2+4\alpha_1=0,\,\alpha_3\leq 0\}.
\end{eqnarray*}
\verb"TransitionSet"(\(x^5-\lambda+\alpha_1 x+\alpha_2 x^2+\alpha_3 x^3, [ \alpha_1, \alpha_2, \alpha_3], [x, \lambda]\)) derives
\begin{eqnarray*}
\mathscr{B}&:=&\emptyset,\\
\mathscr{H}&:=&\{(\alpha_1, \alpha_2, \alpha_3)\,|\,-81\alpha_1 \alpha_3^4+27\alpha_2^2\alpha_3^3+360\alpha_1^2\alpha_3^2\\
&&-540\alpha_1\alpha_2^2\alpha_3+135
\alpha_2^4-400\alpha_1^3=0\},\\
\mathscr{D}&:=&\{(\alpha_1, \alpha_2, \alpha_3)\,|\,-16\alpha_3^6+224\alpha_1\alpha_3^4-88\alpha_2^2\alpha_3^3\\&&-1040\alpha_1^2\alpha_3^2+360
\alpha_1\alpha_2^2\alpha_3+
135\alpha_2^4+1600\alpha_1^3=0\}.
\end{eqnarray*}
\end{example}
\begin{example}
\verb"TransitionSet"(\(x^3+\sin(\lambda x)+\alpha_1+\alpha_2 x^2, [\alpha_1, \alpha_2], [x, \lambda], 5\), \verb"plot") generates Figure \ref{1}.
\begin{figure}[h]
\begin{center}
\subfigure[\label{1}]{\includegraphics[width=.40\columnwidth,height=.4\columnwidth]{TransitionSet1}}
\subfigure[\label{2}]{\includegraphics[width=.40\columnwidth,height=.4\columnwidth]{TransitionSet2}}
\caption{\small Blue color is for bifurcation \(\mathscr{B}\), green stands for hysteresis \(\mathscr{H}\) while the red color depicts transition set associated with double limit point \(\mathscr{D}\).}
\end{center}
\end{figure}
\verb"TransitionSet"(\(x^5-\lambda+\alpha_1 x+\alpha_2 x^2+\alpha_3 x^3, [\alpha_1, \alpha_2, \alpha_3], [x, \lambda], \verb"plot"\)) creates an animation ending at Figure \ref{2}.
\end{example}
\subsubsection{Persistent bifurcation diagrams} \label{SecPersistentDiagram}
The command \(\verb"PersistentDiagram"\) follows the following table.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/the default options} & \textbf{Description} \\
\hline
\verb"PersistentDiagram"(\(H\), \verb"Vars") & this function plots/animates bifurcation diagrams in \(x-\lambda\) plane by passing through the parameter space. The default path is a circle around the singular point and it may include at most two parameters.
\end{tabularx}
\subsubsection{Options}
\begin{itemize}
\item \(\alpha_{i}\); for parameter space of dimension more than two, it chooses three values for \(\alpha_i\), {\rm i.e., } a negative, zero and a positive value for \(\alpha_{i}\). Then, it plots/animates bifurcation diagrams in \(x-\lambda\) plane by passing (circular path by default) through parameter space for fixed parameter \(\alpha_i\).
\item \(f(\zeta), g(\zeta)\); this function animates bifurcation diagrams in \(x-\lambda\) plane by passing through the given path (\(f(\zeta), g(\zeta)\)).
\item \verb"ShortList"; \verb"IntermediateList"; \verb"CompleteList"; either of these generates a list of points associated persistent bifurcation diagrams.
\item \verb"plot"; this option plots the persistent bifurcation diagrams associated with parameter points output of the previous option, {\rm i.e., } \verb"ShortList"; \verb"IntermediateList"; \verb"CompleteList".
\item \(k\); determines the truncation degree of the germ \(H.\)
\end{itemize}
\begin{example}
Now we present how the command \verb"PersistentDiagram" works.
\verb"PersistentDiagram"(\(x^5-\lambda+\alpha_1 x+\alpha_2 x^2+\alpha_3 x^3\), \([x, \lambda]\) , \verb"plot", \verb"CompleteList") generates a list from which the list of inequivalent bifurcation diagrams are chosen in Figure \ref{Fig2}.
\begin{figure}[h]
\begin{center}
\subfigure[\label{a}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent0}}
\subfigure[\label{b}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent1}}
\subfigure[\label{c}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent2}}
\subfigure[\label{d}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent3}}
\subfigure[\label{e}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent4}}
\subfigure[\label{f}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent5}}
\subfigure[\label{g}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent6}}
\subfigure[\label{h}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent7}}
\subfigure[\label{i}]{\includegraphics[width=.26\columnwidth,height=.2\columnwidth]{persistent8}}
\caption{\small Persistent bifurcation diagrams associated with \(x^5-\lambda+\alpha_1 x+\alpha_2 x^2+\alpha_3 x^3\). }\label{Fig2}
\end{center}
\end{figure}
\end{example}
\subsection{Singular boundary conditions}\label{SecNonPersistent}
Extra sources of non-persistent is caused by singular boundary conditions of a parametric scalar map restricted to a bounded domain.
Let \(W\subset\mathbb{R}^{p}\) be a closed disk and \(U, L\subset\mathbb{R}\) be two closed intervals. Next, consider
\begin{equation*}
F(x,\lambda,\alpha)=0
\end{equation*}
where \(\alpha \in W\) and \((x, \lambda)\in U \times L\); see \cite[Pages 154-158]{GolubitskySchaefferBook}. The new
non-persistent sources are defined by
\begin{eqnarray*}
\mathscr{L}_{C}&:=& \{\alpha\in W\mid F(x,\lambda,\alpha)=0 \hbox{ for some }(x, \lambda)\in \partial U\times \partial L\},\\
\mathscr{L}_{SH}&:=& \{\alpha\in W\mid F=F_{x}=0 \hbox{ for some }(x, \lambda)\in \partial U\times L\},\\
\mathscr{L}_{SV}&:=& \{\alpha\in W\mid F=F_{x}=0 \hbox{ for some }(x, \lambda)\in U\times \partial L\},\\
\mathscr{L}_{T}&:=& \{\alpha\in W\mid F=F_{\lambda}=0 \hbox{ for some }(x, \lambda)\in \partial U\times L\},
\end{eqnarray*}
\begin{eqnarray*}
\mathscr{G}_{1}&:=& \{\alpha\in W\mid F=0 \hbox{ at } (x_{0},\lambda,\alpha) \hbox{ for some }
(x_{0},\lambda)\in \partial U \times L,\\
&& x_{0}\neq x \hbox{ and }
F=F_{x}=0 \hbox{ at } (x, \lambda, \alpha) \hbox{ for some } (x, \lambda) \in U \times L\},\\
\mathscr{G}_{2}&:=& \{\alpha\in W\mid\exists(x_{1}, \lambda), (x_{2}, \lambda)\in \partial U\times L, x_{1}\neq x_{2}\hbox{ s.t } F =0 \hbox{ at }\\
&& (x_{i}, \lambda, \alpha) \hbox{ for } i=1, 2\},
\end{eqnarray*}
\begin{eqnarray*}
\mathscr{L_{B}}&:=& \{\alpha\in W\mid F=F_{x}=F_{\lambda}=0 \hbox{ at } (x,\lambda,\alpha) \hbox{ for some }\\
&& (x, \lambda)\in U\times L\},\\
\mathscr{L_{H}}&:=& \{\alpha\in W\mid F=F_{x}=F_{xx}=0 \hbox{ at } (x, \lambda, \alpha) \hbox{ for some }\\
&& (x, \lambda)\in U\times L\},\\
\mathscr{G}_{D}&:=& \{\alpha\in W\mid\exists(x_{1}, \lambda), (x_{2}, \lambda)\in U\times L, x_{1}\neq x_{2}\hbox{ s.t } F= F_{x}=0 \hbox{ at }\\
&& (x_{i}, \lambda, \alpha) \hbox{ for } i=1,2\}.
\end{eqnarray*}
In this case, the transition set is given by \(\Sigma:=\mathscr{L}\cup\mathscr{G}\), here
\begin{eqnarray*}
\mathscr{L}&:=&\mathscr{L_{B}}\cup \mathscr{L_{H}}\cup\mathscr{L}_{C}\cup\mathscr{L}_{SH}\cup\mathscr{L}_{SV}\cup\mathscr{L}_{T},\\
\mathscr{G}&:=& \mathscr{G}_{D}\cup\mathscr{G}_{1}\cup \mathscr{G}_{2}.
\end{eqnarray*}
For a finite codimension singular germ, \(\Sigma\) is a hypersurface of codimension one and each two choices from a connected component in the complement of \(\Sigma\) are contact-equivalent. Therefore, we can classify the persistent bifurcation diagrams by merely choosing one representative parameter from each components of the complement set of \(\Sigma\) and plotting the associated bifurcation diagrams. The command \verb"NonPersistent" is designed for this purpose.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/default option} & \textbf{Description} \\
\hline
\verb"NonPersistent"(\(F\), \(\alpha\), \verb"Vars", \(U\), \(L\)) & This function computes transition set for \(F\) where bifurcation
diagrams are limited on \(U\times L\). Here, \(U\) and \(L\) are only taken as closed intervals. Further, it plots the transition set.
\\\hline
Box of figures & It plots transition set in \([-1, 1]\times [-1, 1]\) by default.
\end{tabularx}
\subsubsection{Options}
\begin{itemize}
\item \(V, W\); this option enforces that the computed transition set is plotted in \(V\times W\) instead of the default square \([-1, 1]\times [-1, 1]\).
\item \verb"Vertical" (\verb"Horizontal" is also similar); this assumes that the boundary conditions is \(U\times \mathbb{R},\) {\rm i.e., } there is only singular boundary conditions on vertical boundary lines.
\end{itemize}
\begin{example}
\verb"NonPersistent"(\(x^4-\lambda x+\alpha_1 x+\alpha_2 \lambda+\alpha_3 x^2, [\alpha_1, \alpha_2, \alpha_3], [x, \lambda], [-2,2], [1,3]\)) gives rise to
\begin{eqnarray*}
\mathscr{L}_{C}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, \alpha_1+18+4\alpha_3+\alpha_2=0, \alpha_1+22+4\alpha_3+3\alpha_2=0, \\ && \alpha_1+14+4\alpha_3+\alpha_2=0, \alpha_1+10+4\alpha_3+3\alpha_2=0\rbrace,\\
\mathscr{L}_{SH}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, 4\alpha_2\alpha_3-\alpha_1+32\alpha_2+4\alpha_3+48=0, 4\alpha_2\alpha_3+\alpha_1\\&&+32\alpha_2-4\alpha_3-48=0\rbrace,\\
\mathscr{L}_{SV}&=&\lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, 16\alpha_1\alpha_3^4+16\alpha_2\alpha_3^4-128\alpha_1^2\alpha_3^2-256\alpha_1\alpha_2\alpha_3^2
-128\alpha_2^2\alpha_3^2\\&&+256\alpha_1^3+768\alpha_1^2\alpha_2+768\alpha_1\alpha_2^2
+256\alpha_2^3-4\alpha_3^3+144\alpha_1\alpha_3+144\alpha_2\alpha_3-27=0,\\&& 16\alpha_1\alpha_3^4+48\alpha_2\alpha_3^4-128\alpha_1^2\alpha_3^2-768\alpha_1\alpha_2\alpha_3^2
-1152\alpha_2^2\alpha_3^2+256\alpha_1^3+2304\alpha_1^2\alpha_2\\&&+6912\alpha_1\alpha_2^2
+6912\alpha_2^3-36\alpha_3^3+1296\alpha_1\alpha_3+3888\alpha_2\alpha_3-2187=0 \rbrace,\\
\mathscr{L}_{T}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, \alpha_2+2=0, 4\alpha_3+16+\alpha_1=0, \alpha_2-2=0, 4\alpha_3+16+\alpha_1=0\rbrace,\\
\mathscr{G}_{1}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\,4\alpha_2^2\alpha_3^3+80\alpha_2^2\alpha_3^2+16\alpha_2\alpha_3^3-72\alpha_1\alpha_2\alpha_3
+512\alpha_2^2\alpha_3\\&&+32\alpha_2\alpha_3^2+16\alpha_3^3+27\alpha_1^2
-320\alpha_1\alpha_2+72\alpha_1\alpha_3+1024\alpha_2^2-384\alpha_2\alpha_3
\\&&+176\alpha_3^2+224\alpha_1-1024\alpha_2+640\alpha_3+768, 4\alpha_2^2\alpha_3^3+80\alpha_2^2\alpha_3^2-16\alpha_2\alpha_3^3\\&&+72\alpha_1\alpha_2\alpha_3
+512\alpha_2^2\alpha_3-32\alpha_2\alpha_3^2+16\alpha_3^3+27\alpha_1^2+320\alpha_1\alpha_2
+72\alpha_1\alpha_3\\&&+1024\alpha_2^2+384\alpha_2\alpha_3+176\alpha_3^2+224\alpha_1
+1024\alpha_2+640\alpha_3+768=0
\rbrace,\\
\mathscr{G}_{2}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\,4\alpha_3+16+\alpha_1=0 \rbrace,\\
\mathscr{L_{B}}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, \alpha_2^4+\alpha_2^2 \alpha_3+\alpha_1=0\rbrace,\\
\mathscr{L_{H}}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, 128\alpha_2^2\alpha_3^3+3\alpha_3^4+72\alpha_1\alpha_3^2+432\alpha_1^2=0 \rbrace,\\
\mathscr{G}_{D}&=& \lbrace (\alpha_1, \alpha_2, \alpha_3)\,|\, \alpha_3^2-4\alpha_1=0 \rbrace.\\
\end{eqnarray*}
generates Figure \ref{Fig3}.
\begin{figure}[h]
\begin{center}
\subfigure[\label{1}]{\includegraphics[width=.40\columnwidth,height=.4\columnwidth]{nonpersistent}}
\caption{\small Transition set}\label{Fig3}
\end{center}
\end{figure}
\end{example}
\part{Tools from algebraic geometry }
In this section we describe how to compute some tools from computational algebraic geometry; see \cite{GazorKazemi} for more information.
\section{Multiplication Matrix}\label{SecMultMatrix}
Let \(\mathcal{R}\) be either of the rings of germs \(\mathscr{E},\) \(\mathbb{R}[[x, \lambda]],\) \(\mathbb{R}[x, \lambda]_{\langle x, \lambda\rangle}\) or \(R[x, \lambda]\); see \cite{GazorKazemi}. Now we describe how to compute the multiplication Matrix defined by
\begin{equation}\label{Mult}
\varphi_{u, J}: \frac{\mathcal{R}}{J}\rightarrow \frac{\mathcal{R}}{J}, \quad \varphi_{u, J}(f+J):= uf+ J,
\end{equation} where \(J\) is an ideal generated by a finite set \(A\subset \mathcal{R},\) {\rm i.e., } \(J:=\langle A\rangle_\mathcal{R}\), and \(u\) is a monomial; also see \cite[Equation 3.4]{GazorKazemi}.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/option} & \textbf{Description} \\
\hline
\verb"MultMatrix"(\(A\), \(u\), \verb"Vars") & This function derives \(\varphi_{u, J}\) where \(u\) is a monomial, \(\varphi_{u, J}\) is defined by Equation \eqref{Mult}. \\\hline
Default computational ring & the fractional germ ring. \\
\hline
Truncation degree & When the input set of germs \(A\) only includes polynomials, \verb"MultMatrix"(\(A\), \(u\), \verb"Vars") does not need truncation degree. However, for non-polynomial input germs, a truncation degree \(k\) needs to be included. \\
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k\); determines the truncation degree. The user is advised to use the command \verb"Verify" to find an appropriate truncation degree \(k.\)
\item Computational ring: \verb"Fractional", \verb"Formal", \verb"SmoothGerms"; \verb"Polynomial"; the command uses either the rings of fractional germs, formal power series or ring of smooth germs. The command \verb"Verify" is an appropriate tool to find/verify the appropriate computational ring.
\end{itemize}
\begin{example}
The command \verb"MultMatrix"(\([x^6+\frac{12}{27}x^10\lambda^9, \frac{5}{3}x^{5}+\lambda
\sin(x^3), \lambda^2-\frac{2}{3}(1-\exp(x^5))], x, [x, \lambda] , 6, \verb"Formal"\)) gives rise to
\begin{equation*}
\left( {\begin{array}{ccccccccc}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & -\frac{5}{3}\\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0\\
\end{array}}
\right).
\end{equation*}
\end{example}
\section{Divisions of germs }\label{SecDivision}
The following table describes how to use the command \verb"Division" to divide a germ \(g\) by a set of germs \(G:= \{f_i| i=1, \ldots n\}\) where all these germs are in terms of the variables in \verb"Vars". Note that the ordering in the list of variables \verb"Vars" is important and determines how anti-lexicographical ordering is defined.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"Division"(\(g\), \(G\), \verb"Vars") & This divides the germ \(g\) by the germs in \(G\) using anti-lexicographical ordering. \\
\hline
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \verb"Formal"; \verb"SmoothGerms"; \verb"Polynomial"; \verb"Fractional"; this determines the computational germ ring. In order to verify/check the computational ring for the division, one is advised to use the command \verb"Verify"(G, \verb"Ideal", \verb"Vars") and find the permissible computational ring.
\item \(k\); this option enforces the computations modulo degrees higher than or equal to \(k+1.\) The command \verb"Verify"(G, \verb"Ideal", \verb"Vars") also suggests an optimal permissible truncation degree \(k\).
\end{itemize}
\begin{example}
For an example we use \verb"Division"(\(\sin(x^7)-1, [x^5+x^6\exp(\lambda), xy^3-\frac{2}{7}x\lambda^6-x^7, \lambda\cos(x^7)],\) \([x, \lambda], 8,\verb"SmoothGerms"\)). This returns \begin{equation*} -1\end{equation*} as the remainder of \begin{equation*} \sin(x^7)-1\end{equation*} divided by
\begin{equation*} \{x^5+x^6\exp(\lambda), xy^3-\frac{2}{7}x\lambda^6-x^7, \lambda\cos(x^7)\}\end{equation*} using the anti-lexicographical ordering with \(x\succ\lambda\).
\end{example}
\section{Standard basis} \label{SecStandardBasis}
Now we describe how to compute a standard basis for a set of germs in either of the following local rings: fractional germs \(\mathbb{R}[x, \lambda]_{\langle x, \lambda\rangle}\), formal power series \(\mathbb{R}[[x, \lambda]],\) and ring of smooth germs \(\mathscr{E}\); see \cite{GazorKazemi}. The command \verb"StandardBasis" follows the table and options described below. Note that \verb"Vars" denotes an order list of variables and the germs in \(G\) are in terms of the variables in \verb"Vars". The anti-lexicographical ordering is here used while it is determined by the ordering of variables in \verb"Vars".\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"StandardBasis"(\(G\), \verb"Vars") & computes the standard basis of the polynomial germs in \(G\) in the fractional germ ring. \\
\hline
Default computational ring & fractional germ ring. \\
\hline
Default input & the default germs in \(G\) must be polynomial germs. For the cases of non-polynomials, it needs a truncation degree \(k.\)
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k\); this determines the truncation degree.
\item Consider the cases that the option \(k\) is used. A warning note is given when the truncation degree \(k\) is not sufficiently high to guarantee that
the output is correct. A warning note is given like ``The truncation degree is not sufficiently high and thus, the following results might be wrong.'' In this case, for an appropriate truncation degree \(l\) is given.
\item \verb"Formal"; \verb"SmoothGerms"; \verb"Fractional"; this determines the computational germ ring and computes the standard basis accordingly.
\end{itemize}
\begin{example}
For an example, the command
\verb"StandardBasis"(\([x^5+x^2\sin(\lambda+x)+\lambda^2, x^3\lambda^2+\cos(\lambda)x, \lambda^6+x^4-\lambda x], [x, \lambda], 7, \verb"SmoothGerms"\))
computes the standard basis of the set of germs
\begin{equation*} \{x^5+x^2\sin(\lambda+x)+\lambda^2, x^3\lambda^2+\cos(\lambda)x, \lambda^6+x^4-\lambda x\}\end{equation*} as
\(\{x, \lambda^2\}.\)
\end{example}
\section{Colon ideals}\label{SecColonIdeal}
The colon ideal \(I: \langle g\rangle \) refers to the ideal defined by
\begin{equation*}
I: \langle g\rangle_\mathscr{E}= \{f\in \mathscr{E}: f \langle g\rangle_\mathscr{E}\subseteq I\}.
\end{equation*} Using the arguments on \cite[Page 22]{GazorKazemi}, we have \(I: \langle g\rangle_\mathscr{E}= \langle \frac{h_i}{g}| i=1, \ldots, n\rangle_\mathscr{E},\) where \(h_1, h_2, \ldots, h_n\) is a standard basis for the ideal \(I\cap \langle g\rangle_{\mathbb{R}[x, \lambda]}.\) The command \verb"ColonIdeal" follows this. \\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"ColonIdeal"(\(I\), \(g\))& computes the colon ideal \(I:g\) for \(g\) in \(\mathscr{E}\).
\end{tabularx}
\begin{example}
\verb"ColonIdeal"(\([x^7+\lambda x^3-\lambda^2 x, \lambda x^6+\lambda^2 x^2-\lambda^3, x^3\lambda+x],\lambda\)) leads to the ideal generated by
\begin{equation*}
[x(\lambda x^2+1), \lambda^2 x^2-x^4-\lambda^3, \lambda^4+\lambda^2-x^2, x(x^4+\lambda^3+\lambda)].
\end{equation*}
\end{example}
\section{Complement spaces }\label{SecNormalset}
For computing universal unfolding of a singular germ \(g\), we need to compute a basis for a complement vector space for the tangent space \(T(g)\) associated with \(g.\) This is equivalent to computing a basis for the quotient space \(\mathscr{E}/T(g).\) More generally, the command
\verb"Normalset"(\(I\)) computes a monomial basis for \(\mathscr{E}/I\), when \(I\) is either an ideal or a vector space with finite codimension in the local ring \(\mathscr{E}\).\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command} & \textbf{Description} \\
\hline
\verb"Normalset"(\(A\)) & computes a monomial basis for \(\mathscr{E}/I\), when \(A\) is a list of germs generating an ideal \(I:=\langle A\rangle\).
\end{tabularx}
\begin{example}
For example the command \verb"Normalset"(\([x^6+\lambda x^4+\lambda^2 x, \lambda x^5+\lambda^2 x^3+\lambda^3, 5x^6+3\lambda x^4, 5\lambda x^4+3\lambda^2 x^2, -3x^7-3\lambda x^5-\frac{25}{3}x^6]\)) returns
\begin{equation*}
[1, \lambda, x, \lambda^2, x^2, x^3, x^4, x^5, \lambda x, \lambda x^3, x^2\lambda]
\end{equation*} as a list of monomials for the complement of \(I\) in \(\mathscr{E}.\)
\end{example}
\part{Technical objects in singularity theory}
A user who is not interested in technical details and their commands may simply skip this section.
\section{Intrinsic ideals }\label{SecIntrinsic}
In this section we describe how to compute the intrinsic part of an ideal or a vector space. Let \(A\) and \(B\) be two lists indicating the generators of an ideal and a vector space, respectively. We intend to compute the maximal intrinsic ideal contained in \(\langle A\rangle_\mathscr{E}+\langle A\rangle_{\mathbb{R}}.\)\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/option} & \textbf{Description} \\
\hline
\verb"Intrinsic"(\(A\), \verb"Vars") & computes intrinsic part of the ideal generated by \(A\). It remarks when the ideal is of infinite codimension. \\
\hline
\verb" Intrinsic"(\(A, B\), \verb"Vars") & computes intrinsic part of a vector space given by \(\langle A\rangle_\mathscr{E} + \langle B\rangle_{\mathbb{R}}.\) It remarks when the vector space is of infinite codimension. \\
\hline
Computational ring &The default computational ring is the ring of fractional germs. \\
\hline
Verify/Warning/Suggestion& By default, \verb" Intrinsic" checks and verifies whether the fractional germ ring is sufficient for computation of the intrinsic part of the ideal or vector space spanned by \(A\) (and \(B\)). If fractional germ ring is not sufficient, it gives a warning note of possible errors and a suggestion to circumvent the problem. \verb" Intrinsic" remarks when the problem might be of infinite codimension. Despite possible warning errors, \verb" Intrinsic" computes the intrinsic part using the fractional germ ring.
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k\); this option enforces that the computations are performed modulo degree \(k.\)
\item \verb"Formal"; \verb"SmoothGerms"; \verb"Polynomial"; \verb"Fractional"; this determines the computational germ ring. It checks and verifies the computations. It gives warning notes of possible errors and alternative suggestions when it finds them necessary.
\end{itemize}
\begin{example} For an example
\verb"Intrinsic"\(([x^3\lambda+\lambda^2, 3x^3\lambda, 3x^2\lambda^2],\) \([x, \lambda]\), \(\verb"InfCodim"\), \(\verb"Fractional")\) leads to
\begin{equation*}
M^3\langle\lambda\rangle+\langle\lambda^2\rangle.
\end{equation*}
As for a second example \verb"Intrinsic"(\([x^5+\lambda x^3+\lambda^2, 5x^5+3x^3\lambda, 5x^4\lambda+3x^2\lambda^2], [\lambda x^3+2\lambda^2, x^3+2\lambda, x^4+\frac{3}{5}\lambda x^2, \lambda^2, x^5], [x, \lambda]\)) results in
\begin{equation*}
M^5+M^3\langle \lambda\rangle+\langle\lambda^2\rangle.
\end{equation*}
\end{example}
\section{Algebraic objects }\label{SecAlgObjects}
Singularity theory defines and uses many algebraic objects in the bifurcation analysis of zeros of smooth germs. These include
restricted tangent space \(RT\), tangent space \(T\), high order term ideal \(\mathscr{P}\), smallest intrinsic ideal associated with a singular germ \(\mathscr{S}\), a basis for complement of the tangent space \(\mathscr{E}/T\), and low order terms \(\mathscr{S}^{\perp}\); see \cite{GazorKazemi,GolubitskySchaefferBook}. These can be computed in {\tt Singularity} using the command \verb"AlgObjects" as well as the individual commands \verb"RT", \verb"T", \verb"P", and \verb"TangentPerp". The individual commands \verb"RT", \verb"T", and \verb"P" have the same default and non-default options as \verb"AlgObjects" has as follows.\\
\begin{tabularx}{\textwidth}{l|X}
\textbf{Command/option} & \textbf{Description} \\
\hline
\verb"AlgObjects"(\(g\), \verb"Vars") & This function computes \(RT\), \(T\), \(\mathscr{P}\), \(\mathscr{E}/T\), \(\mathscr{S}\), \(\mathscr{S}^{\perp}\) and intrinsic generators of \(\mathscr{S}\) for given \(g\).
\\
\hline
\verb"RT"(g, \verb"Vars") & This derives the restricted tangent space associated with a scalar smooth germ \(g\).\\
\hline
\verb"T"(g, \verb"Vars") & This command provides a nice representation of the tangent space associated with the singular smooth germ \(g.\) The representation uses intrinsic ideal representation as for the intrinsic part of \(T(g)\).
\\
\hline
\verb"p"(g, \verb"Vars") & This computes the high order term ideal associated with the germ \(g\). \\
\hline
\verb"TangentPerp"(g, \verb"Vars")& This first computes \(T(g)\), {\rm i.e., } the tangent space of the germ \(g,\) and then returns a monomial basis for the complement space of \(T(g)\).\\
\hline
\verb"S"(g, \verb"Vars")& Computes the smallest intrinsic ideal containing the germ \(g\).\\
\hline
\verb"SPerp"(g, \verb"Vars")& This derives a set of monomials of low order terms for the germ \(g\).\\
\hline
\verb"IntrinsicGen"(g, \verb"Vars")& This derives the intrinsic generators of \(\mathscr{S}(g)\) that determine the nonzero conditions for recognition problem for normal forms.
\\
\hline
Computational ring & The default computational ring is the ring of fractional germs. \\
\hline
Default degree \(k\) & For non-polynomial input germs, it computes the least degree \(k\) so that truncations at degree \(k\) is permissible. Next, the germ \(g\) is truncated and all algebraic objects are computed modulo degrees higher than or equal to \(k+1\).
\end{tabularx}
\subsection{Options}
\begin{itemize}
\item \(k\); this option enforces the computations modulo degree \(k+1.\)
\item \verb"Formal"; \verb"SmoothGerms"; \verb"Polynomial"; \verb"Fractional"; this determines the computational germ ring. It checks and verifies/gives warning notes of possible errors.
\end{itemize}
\begin{example} Now we present three examples of singular germs of high codimension; see and compare these examples with the examples in \cite[Page 4]{Keyfitz}. For example we consider a codimension 10 singularity and use \verb"AlgObjects"(\(x^5+x^3\lambda^2+\lambda^3, [x, \lambda], \verb"Fractional"\)). It gives
\begin{eqnarray*}
\mathscr{P} &=& M^{6}+M\langle \lambda^3 \rangle,\\
RT &=& M^{6}+M\langle \lambda^3 \rangle+\langle x^4\lambda, 3\lambda^2x^3+5x^5, \lambda^2x^3+x^5+\lambda^3 \rangle,\\
T&=& M^{5}+\langle \lambda^3\rangle+\lbrace \frac{3}{5}\lambda^2x^2+x^4, x^3\lambda+\frac{3}{2}\lambda^2\rbrace,\\
\mathscr{E}/T &=&\lbrace 1, \lambda, x, \lambda^2, x^2, x^3, \lambda x^2, \lambda^2 x, \lambda^2x^2, x\lambda\rbrace,\\
\mathscr{S} &=& M^{5}+\langle \lambda^3\rangle,\\
\mathscr{S}^{\perp} &=&\lbrace 1, \lambda, x, \lambda^2, x^2, x^3, x^4, \lambda x^2, \lambda^2 x, \lambda^2 x^2, x\lambda, x^3\lambda\rbrace,\\
\hbox{ intrinsic generators } &=& \lbrace x^5, \lambda^3\rbrace.
\end{eqnarray*}
A codimension 20 singularity:
\verb"TangentPerp"(\(x^8+\sin(\lambda^3), [x, \lambda], 9\)) derives the following
\begin{eqnarray*}
T&=& M^{9}+\langle \lambda^3\rangle+\lbrace x^7, x^8, \lambda x^7, -2\lambda^2\rbrace,\\
\hbox{ TangentPerp }&=&\lbrace 1, \lambda, x, x^2, x^3, x^4, x^5, x^6, \lambda x, \lambda x^2, \lambda x^3, \lambda x^6,\\&& \lambda^2 x, \lambda^2 x^2, \lambda^2 x^5, x^3\lambda^2, x^4\lambda, x^4\lambda^2, x^5\lambda, x^6\lambda^2\rbrace.
\end{eqnarray*}
Now we use the command \verb"IntrinsicGen" for an example of codimension 13. \verb"IntrinsicGen"(\(\lambda x^8+x^7-\lambda^3 x^2-\lambda^2 x, [x, \lambda]\)) leads to
\begin{eqnarray*}
\hbox{ intrinsic generators } &=& \lbrace x^7, x\lambda^2 \rbrace.
\end{eqnarray*}
\end{example}
\section*{\bf Acknowledgments}
We thank Professor Erik Postma at Maplesoft for his time and fruitful comments.
|
1,314,259,995,046 | arxiv | \section*{Appendix}\setcounter{subsection}{0} \setcounter{equation}{0}
\renewcommand{\thesubsection}{\Alph{subsection}.}
\renewcommand{\theequation}{\thesubsection\arabic{equation}}}
\newcommand{\begin{equation}}{\begin{equation}}
\newcommand{\end{equation}}{\end{equation}}
\newcommand{\begin{array}}{\begin{array}}
\newcommand{\end{array}}{\end{array}}
\newcommand{\begin{eqnarray}}{\begin{eqnarray}}
\newcommand{\end{eqnarray}}{\end{eqnarray}}
\newcommand{\nonumber}{\nonumber}
\newcommand{\ell_L}{\ell_L}
\newcommand{\ell_R}{\ell_R}
\renewcommand{\epigraphrule}{0pt}
\renewcommand{\epigraphwidth}{3.5cm}
\begin{document}
\vspace*{-2cm}
\title{Spontaneous Breaking of Lorentz Symmetry and Vertex Operators for Vortices}
\author{\textbf{A. P. Balachandran}\footnote{bal@phy.syr.edu} $^{a,b}$}
\author{\textbf{Se\c{c}kin K\"{u}rk\c{c}\"{u}o\v{g}lu}\footnote{kseckin@metu.edu.tr} $^{c}$}
\author{\textbf{Amilcar R. de Queiroz}\footnote{amilcarq@unb.br} $^{d}$}
\affiliation{$^a$ Physics Department, Syracuse University, Syracuse, NY, 13244-
1130, USA}
\vspace*{1cm}
\affiliation{$^b$Institute of Mathematical Sciences, CIT Campus, Taramani, Chennai 600113, India}
\vspace*{1cm}
\affiliation{$^c$ Middle East Technical University, Department of Physics, Dumlupinar Boulevard, 06800, Ankara, Turkey}
\vspace*{1cm}
\affiliation{$^d$ Instituto de Fisica, Universidade de Brasilia, Caixa Postal 04455, 70919-970, Brasilia, DF, Brazil}
\vspace*{1.5cm}
\begin{abstract}
We first review the spontaneous Lorentz symmetry breaking in the presence of massless gauge fields and infraparticles. This result was obtained long time ago in the context of rigorous quantum field theory by \cite{Frohlich1979241,Frohlich197961} and reformulated by Balachandran and Vaidya in \cite{Bal-Sachin} using the notion of superselection sectors and direction-dependent test functions at spatial infinity for gauge transformations. Inspired by these developments and under the assumption that the spectrum of the electric charge is quantized (in units of a fundamental charge $e$), we construct a family of vertex operators which create winding number $k$, electrically charged Abelian vortices from the vacuum (zero winding number sector) and/or shift the winding number by $k$ units.
Vortices created by this vertex operator may be viewed both as a source and as a probe for inducing and detecting spontaneous Lorentz symmetry breaking.
We find that for rotating vortices, the vertex operator at level $k$ shifts the angular momentum of the vortex by $k \frac{{\tilde q}}{q}$, where ${\tilde q}$ is the electric charge of the quantum state of the vortex and $q$ is the charge of the vortex scalar field under the $U(1)$ gauge field. We also show that, for charged-particle-vortex composites, angular momentum eigenvalues shift by $k \frac{{\tilde q}}{q}$, ${\tilde q}$ being the electric charge of the charged-particle-vortex composite. This leads to the result that for $\frac{{\tilde q}}{q}$ half-odd integral and for odd $k$, our vertex operators flip the statistics of charged-particle-vortex composites from bosons to fermions and vice versa. For fractional values of $\frac{{\tilde q}}{q}$, application of vertex operator on charged-particle-vortex composite leads in general to composites with anyonic statistics.
\end{abstract}
\maketitle
\section{Introduction}
The study of infrared (IR) effects in gauge field theories, especially due to its conceived intimate relation with the problem of quark confinement, has brought out several other interesting problems in quantum field theory (QFT), some of which are addressed at different levels of sophistication and some others await a proper treatment. Interesting and nontrivial effects already appear at the level of QED. The consequences of soft photons may be seen both at the level of perturbative calculations and at non-perturbative treatments. A classic example for the former is the Bloch-Nordsieck \cite{Bloch-Nordsieck} cancellation of IR divergences by soft photons, which also plays a crucial role in Schwinger's computation of the magnetic dipole moment of electrons \cite{Schwinger}. As for the latter, spontaneous breaking of Lorentz symmetry by IR photons in QED shown by Fr\"ohlich, Morchio and Strocchi \cite{Frohlich197961,Frohlich1979241} is a prominent example.
It appears that there are good physical reasons to look out for the consequences of accumulation of soft photons. Suppose that we are interested in prescribing a well-defined initial data for a set of fields on space-time to probe their time evolution. We may consider the simpler case of a globally hyperbolic space-time $\mathbb{R}\times \Sigma$ with $\mathbb{R}$ standing for time and $\Sigma$ for a space-like hypersurface. It is possible to fix some $\Sigma_t$ as a Cauchy surface at a time $t$ where the initial conditions for the fields are given. Usually for the sake of convenience one only considers initial data supported on a compact region of the Cauchy surface $\Sigma_t$. In a more realistic situation, the accumulation of IR photons traveling from the past of the initial time $t$ must also be taken into consideration. Therefore, the initial data for the electromagnetic field, which
are non-vanishing arbitrarily far away on $\Sigma_t$, have to be considered (Fig.1). This has consequences for the dynamics of the quantum fields, since the symmetries will be effected by the presence of these IR photons. For instance as already mentioned above, it leads to the spontaneous breaking of Lorentz symmetry in QED.
\begin{figure}
\centering
\includegraphics[width=0.6\textwidth, height=0.3\textheight]{fig1pic}
\caption{Clearly there are photons originating sufficiently past to $\Sigma_t$ which will arrive arbitrarily far away from any origin.}
\end{figure}
The above description of the initial data problem also appears to be relevant in the modelling of the ``night sky'' \cite{Bal-Sachin} or cosmic microwave background (CMB). CMB radiation which is supported far away from us (on scales of microscopic physics) contains IR photons coming from the distant past of the universe. It turns out that radiation with a similar physical origin is relevant for the description of the quantum states. It leads to spontaneous breakdown of Lorentz symmetry.
Following Fr\"ohlich, Morchio and Strocchi \cite{Frohlich197961}, it is also important to stress that the spontaneous breaking of Lorentz symmetry by the accumulation of IR photons is not due to the need to introduce an IR cut-off. The argument about the resolution power of the measuring apparatus, that is, the usual text-book interpretation of the IR cut-off, cannot be evoked for the ensuing symmetry breaking. As we have argued above, such photons are associated with the setting up of a proper initial data for the dynamics of fields on space-time.
Spontaneous breaking of Lorentz symmetry in QED can be contemplated by invoking the notion of super-selection sectors \cite{Haag} and using direction-dependent test functions at spatial infinity. Such a treatment is given by Balachandran and Vaidya in \cite{Bal-Sachin}. In this article, we will give a very brief review of some of the aspects of this work adapted for our purposes. In particular, we write down the charge operator $\widehat{Q}(\chi)$ in terms of the direction-dependent test functions $\chi$ and show that the associated eigenvalues and eigenvectors change under rotations, clearly indicating the spontaneous breaking of the Lorentz Symmetry.
In the present work we focus on the spontaneous breaking of rotational (and hence Lorentz) symmetry in the presence of electrically charged quantum vortices in the Abelian Higgs theory in $2+1$-dimensions. Using a multivalued set of test functions $\xi$, we construct the operators
$\widehat{V}(\xi)$ by exponentiating $\widehat{Q}(\xi)$ and show that $\widehat{V}(\xi)$ are vertex operators for the creation of electrically charged vortices. Indeed, we find that these operators change the vorticity and consequently the angular momentum of dynamical vortices. It is essential to emphasize that vortices created by $\widehat{V}(\xi)$ may be viewed as probes to detecting spontaneous Lorentz symmetry breaking. Since the latter is a phenomenon encountered in the charged sectors of QED ( as it should be clear from the previous paragraph and the discussion in \cite{Frohlich197961}) our line of reasoning is further supported by the observation that the electrically charged vortices may be viewed as sources in the context of QED leading to the spontaneous Lorentz symmetry breaking and subsequently to the construction of the vertex operators $\widehat{V}(\xi)$. In short, our results indicate a possible way in which spontaneous Lorentz symmetry breaking may be induced and probed.
Our present work is organized as follows: In section $3$ we construct a family of vertex operators which create winding number $k$ electrically charged Abelian vortices from the vacuum (zero winding number sector) and/or shift the winding number by $k$ units. Our discussion here is divided into several subsections, where rotating vortices, charged-particle-vortex composites and addition of Chern-Simons term are considered.We find that for rotating vortices the vertex operator at level $k$ shifts the angular momentum of the vortex by $k \frac{{\tilde q}}{q}$, where ${\tilde q}$ is the electric charge of the quantum state of the vortex and $q$ is the charge of the vortex scalar field under the $U(1)$ gauge field. We also show that, for charged-particle-vortex composites angular momentum eigenvalues shift by $k \frac{{\tilde q}}{q}$, ${\tilde q}$ being the electric charge of the charged-particle-vortex composite. This leads to the result that for $\frac{{\tilde q}}{q}$ half-odd integral and for odd $k$ our vertex operators flip the statistics of charged-particle-vortex composites from bosons to fermions and vice versa. For fractional values of $\frac{{\tilde q}}{q}$, application of vertex operator on charged-particle-vortex composite leads in general to composites with anyonic statistics.
\section{Spontaneous Lorentz Symmetry Breaking}
\subsection{Canonical Structure of Classical Electrodynamics}
We recall how the algebra of observables is constructed in the classical canonical formulation of electrodynamics. Let us start by fixing a time-slice in $3+1$-dimensional space-time. On this fixed spatial surface, the electric field $E_i$, $i=1,2,3$, is the momentum conjugate to the vector potential $A_j$. Confining our discussion to the classical theory, we can further suppose that a given charge density $J_0$ is localized in the sense that it has a compact support on the fixed spatial surface. The Gauss' law reads
\begin{equation}
G(\Lambda)=\int d^3x~\Lambda~\left( \partial_i E_i+ J_0\right) \approx 0,
\end{equation}
where $\Lambda$ belongs to an appropriate test function space. The requirement of differentiability of $G(\Lambda)$ with respect to variations of $E_i$ determines that elements of this test function space should fulfill
\begin{equation}
\Lambda(x)\to 0 ~\textrm{ as } r\equiv |\vec{x}| \to \infty.
\end{equation}
$G(\Lambda)$ are first class constraints. They generate the $U(1)$ (Gauss law) gauge transformations:
\begin{equation}
\left\{ G(\Lambda),A_i \right\} = \partial_i \Lambda.
\end{equation}
The associated charges are defined as
\begin{equation}
Q(\chi)=\int d^3 x~\left( -E_i \partial_i\chi + \chi J_0 \right),
\end{equation}
where the test functions $\chi$ go to a constant at spatial infinity. They are first class since they have vanishing Poisson brackets (PB) with the Gauss law
and therefore they are the observables of the theory. They generate the $U(1)$ gauge transformations with the PB's
\begin{equation}
\left\{Q(\chi_1),Q(\chi_2) \right\} = 0 \,.
\end{equation}
We emphasize that the distinction between charges $Q(\chi)$ and Gauss law constraints $G(\Lambda)$ may be cast in terms of the associated test function spaces. For the constraints this space is composed of test functions vanishing at spatial infinity, while for the charges it is composed of functions that are constant at spatial infinity and do not necessarily vanish.
Quantization lifts $G(\Lambda)$ and $Q(\chi)$ to operators
\begin{equation}
\widehat{G}(\Lambda) \quad \mbox{and} \quad \widehat{Q}(\chi)
\end{equation}
acting on a suitable Hilbert space. The PB's are promoted to commutators by the usual Dirac's quantization prescription.
\subsection{Direction-Dependent Test Functions}
Accumulation of infrared photons at spatial infinity, as discussed in the introduction, gives us good physical reasons to relax the condition on the test functions for charges being constant at spatial infinity. We observe that classically
\begin{equation}
\int d^3 x~E_i\partial_i \chi = \lim_{r\to \infty} \int d\Omega~r^2~\hat{n}_i E_i~\chi(r,\theta,\phi) - \int d^3 x \chi \partial_i E_i \,.
\label{eq:boundaryterm}
\end{equation}
For natural choices of $E_i$ -- for instance those that fall as $\frac{1}{r^2}$ when $r\to\infty$, the surface integral in the right hand side of the above expression converges for test functions $\chi(r, \theta, \Phi)$, which has a direction-dependent limit as ${\bm r} \rightarrow \infty$:
\begin{equation}
\lim_{r \rightarrow \infty} \chi(r, \theta, \phi) : = \chi(\infty, \theta, \phi) \equiv \chi(\hat n) < \infty \,.
\label{eq:dirdep1}
\end{equation}
We henceforth accept the possibility (\ref{eq:dirdep1}).
Spatial infinity can be viewed as a two-sphere $S^2$ and we can make the mode expansion
\begin{equation}
\lim_{r\to\infty} \chi(r,\theta,\phi)\equiv \chi(\hat{n})=\sum \chi_{lm} Y_{lm},
\label{eq:testfunctions}
\end{equation}
in spherical harmonics $Y_{lm}$ with $l\in \mathbb{N}$ and $-l\leq m \leq l$ as usual. Now because of the Gauss law constraint in quantum states, the effect of
$Q(\chi)$ on quantum states depend only on $\chi(\hat n)$. Accordingly the associated ``charge" can also be mode expanded as
\begin{equation}
\label{angular-dep-charge-1}
Q(\chi)=\sum Q_{lm}Y_{lm}.
\end{equation}
Before we proceed it is worthwhile to remark that, the choice of the asymptotic behaviour $\frac{x_i}{r^3}$ for the electric field is in fact too restrictive. Suppose that we try to compute the charges $Q_{lm}$ at spatial infinity in this case. Inserting $\chi(\hat n) = \sum \chi_{lm} Y_{lm}$ in (\ref{eq:boundaryterm}), we find by orthogonality of $Y_{lm}$ that only the $Y_{00}$ component contributes to the integral, therefore at spatial infinity we seem to have only $Q_{00} \neq 0$ and the rest of the charges vanishing at infinity. However, for a more general electric field, say that due to an accelerated charge, in general all $Q_{lm}$ survive. Then, the results of \cite{Frohlich197961, Bal-Sachin} show that the boost symmetry is in fact broken, and therefore such a boost cannot be performed in quantum theory.
\subsection{Poincar\'e Group as an Automorphism of Algebra of Observables}
Let us turn our attention to quantum states and briefly point out how the Lorentz symmetry is spontaneously broken under the physical setting described above. For a detailed account of these results we refer the reader to \cite{Bal-Sachin}.
A quantum state is labelled by eigenvalues of $\widehat{Q}(\chi)$, that is, the eigenvalues $Q_{lm}$ of all $\widehat{Q}_{lm}$, in addition to other relevant quantum numbers. It may be denoted as
\begin{equation}
\label{quantum-state-1}
|\cdot; Q_{\lbrace lm \rbrace } \rangle\equiv |\cdot;Q_{00},Q_{1,-1},Q_{1,0},Q_{11},...\rangle ,
\end{equation}
where $\cdot$ represents other quantum numbers (see below) and we regard $\{lm\}$ as a collective index from now on. Observe that
\begin{align}
\widehat{G}(\Lambda) |\cdot;Q_{\lbrace lm \rbrace } \rangle & = 0, \\
\widehat{Q}_{l'm'} |\cdot; Q_{\lbrace lm \rbrace } \rangle &= \delta_{l'l}\delta_{m'm} Q_{l'm'} |\cdot; Q_{\lbrace lm \rbrace }\rangle = Q_{lm} |\cdot; Q_{\lbrace lm \rbrace }\rangle
\end{align}
A given test function $\chi(\hat{n})$ defines a superselection sector in the nomenclature of \cite{Haag}. Its choice is equivalent to fixing all the eigenvalues of $\widehat{Q}(\chi)$, that is, the $Q_{\lbrace lm \rbrace }$ in $|\cdot; Q_{\lbrace lm \rbrace } \rangle$ at spatial infinity. A given superselection sector corresponds in algebraic language to a given irreducible representation (IRR) of the net of algebra of observables. In the context of QED, $\widehat{Q}(\chi)$ with $\chi$ in a fixed space of direction-dependent test functions at infinity gives a specific superselection sector for the algebra of observables of the theory.
It is also important in this context to distinguish between local and global observables. Other quantum numbers of a state vector $|\cdot;Q_{\lbrace lm \rbrace } \rangle$ are eigenvalues of some set of local observables. These local observables are defined within a region of compact support and as such they cannot change globally defined charge sectors, that is the values of $\{lm\}$; they are associated with the inner automorphisms of the algebra of observables. In contrast, all symmetries that transform states from a superselection sector to another superselection sector are said to be spontaneously broken. Such transformations can be interpreted as contained in the set of outer automorphisms. The Lorentz group (more generally the Poincar\'e group) is not a local observable, and in fact an outer automorphism acting on the quantum states and the operators ${\widehat Q}_{lm}$. As we show below for the subgroup of rotations, the action of the Lorentz group changes the superselection sector, leading to the interpretation that Lorentz group is spontaneously broken.
\subsection{Rotations}
For our purposes it is enough to restrict attention to the subgroup of rotations. For a more comprehensive discussion of the spontaneous breaking of the Poincar\'e group in QED and other related consequences due to infrared photons, we refer the reader to \cite{Frohlich1979241,Frohlich197961, Bal-Sachin}.
Rotations are implemented by the unitary irreducible representation (UIRR), $U(R)$, of the rotation group:
\begin{equation}
{\widehat Q}_{lm}^\prime = U(R) {\widehat Q}_{lm} U(R)^{-1} = \sum D_{m^\prime m}^l (R) {\widehat Q}_{l m^\prime} \,,
\end{equation}
where $D_{m^\prime m}^l(R)$ denotes the matrix elements of the rotation group element for the rotation $R$ in the (IRR) $l$.
Under rotations the state kets transform as
\begin{eqnarray}
U(R) | \cdot, Q_{\lbrace l m \rbrace } \rangle &\equiv& | \cdot, D_{m^\prime m}^l (R) Q_{\lbrace l m^\prime \rbrace }\rangle \nonumber \\
&\equiv& | \cdot, D_{m^\prime m}^{l_1} (R) Q_{l_1 m^\prime} \,, D_{m^\prime m}^{l_2} (R) Q_{l_2 m^\prime} \,, \cdots D_{m^\prime m}^{l_i} (R) Q_{l_i m^\prime}\,, \cdots \rangle \,,
\end{eqnarray}
and sum over relevant repeated indices are implied. We note again that $\{lm\}$ is a collective index running over all possible values of $l$ and $m$. We observe that under the action of the rotation group the state kets can be rotated to another superselection sector, with a whole set of generically new charges. Hence rotational symmetry and consequently Lorentz symmetry are generically spontaneously broken.
Note that $U(R)$ is unitary only in the direct sum of all superselection sectors it connects. Note also that if the $Q_{lm}$ in a superselection sector are all invariant under the rotation group, then the latter is not spontaneously broken in this sector.
In particular, if we make a rotation about the third axis, with
\begin{equation}
U(R) = e^{i \varphi J_3} \,, \quad D^l_{m m^\prime}(R) = \delta_{m m^\prime}e^{i m\varphi}
\end{equation}
we have on the state kets
\begin{equation}
e^{i \varphi J_3} | \cdot, Q_{\lbrace lm \rbrace } \rangle = | \cdot, e^{i m_1\varphi} Q_{l_1 m_1} \,, e^{i m_2\varphi} Q_{l_2 m_2} \,, \cdots e^{i m_i\varphi} Q_{l_i m_i}\,, \cdots \rangle
\end{equation}
The above discussion may be easily adapted to $(2+1)$-dimensions. In particular, direction-dependent test functions in $(2+1)$-dimensions may be written as
\begin{equation}
\chi(\varphi)\equiv \lim_{r\to\infty} \chi(r,\varphi) = \sum_{n} a_n e^{in\varphi}.
\label{eq:testf21}
\end{equation}
In this case, a finite rotation generated by $J_3$, takes the superselection sector labeled by $|\cdot; n\rangle$ to another superselection sector labeled by $|\cdot; n^\prime\rangle$, leading to the spontaneous breaking of the Lorentz group.
As a final remark in this section, we emphasize once more that there might be a subgroup of the Poincar\'e group surviving the breaking. Which subgroup survives depends on the choice of the superselection sector: the stability group of the associated vector states contained in the Lorentz group survives unbroken.
\section{Vortices and Vertex Operators}
In this section we discuss the consequences of spontaneously broken Lorentz symmetry for $(2+1)$-dimensional vortices.
The Abelian Higgs model has the Lagrangian density
\begin{equation}
\label{abelian-vortex-Lag}
\mathcal L = - \frac{1}{4}F_{\mu \nu} F^{\mu \nu} + |D_0 \Phi|^2 - |D_i \Phi|^2 - \lambda (|\Phi|^2 -a^2)^2,
\end{equation}
where $\Phi$ is the complex scalar field and $D_\mu=\partial_\mu-iq A_\mu$ is the covariant derivative, with $A_\mu$ the $U(1)$ gauge potential with the field strength $F_{\mu\nu}$ and $q$ the coupling constant. Under a $U(1)$ gauge transformation the fields transform as
\begin{equation}
\Phi \to e^{i \chi} \Phi \,, \quad A_\mu \to A_\mu + \frac{1}{q} \partial_\mu \chi.
\end{equation}
Static vortex solutions in this model are characterized by the winding number of the Higgs field $\Phi$ \cite{Nielsen-Olesen}. It is standard to work in the radial gauge $A_r=0$. For rotationally symmetric configurations, a winding number $n \in {\mathbb Z}$ field profiles at spatial infinity ($r \rightarrow \infty$) as
\begin{equation}
\Phi = a e^{in \varphi} \,, \quad r \rightarrow \infty \,.
\end{equation}
Finiteness of energy in $(2+1)$-dimensions requires that $D_\mu \Phi = 0$ as $r \rightarrow \infty$. Therefore we have,
\begin{equation}
A_\varphi = - \frac{1}{q} i \partial_\varphi \ln \Phi \quad \quad r \rightarrow \infty \,,
\end{equation}
and the magnetic flux is proportional to the winding number:
\begin{equation}
\frac{q}{2 \pi} \oint A_\mu dx^\mu = n \,.
\end{equation}
The winding number operator can be written as
\begin{align}
{\widehat N} &= - \frac{i}{2 \pi} \int d^2 x \, \varepsilon^{ij} \partial_i {\hat \Phi}^* \partial_j {\hat \Phi} \nonumber \\
&= - \frac{i}{2 \pi} \oint_{|x| \rightarrow \infty} {\hat \Phi}^* \partial_i {\hat \Phi} dx^i \,,
\label{eq:windingnumber}
\end{align}
where ${\hat \Phi} = \frac{\Phi}{|\Phi|}$.
The charge-current density is given by
\begin{equation}
J_\mu = iq (\Phi^* D_\mu \Phi - \Phi D_\mu \Phi^*) \,.
\end{equation}
We note that, at the classical level the charge density $J_0$ vanishes identically for a static vortex. So the electric charge (i.e. $Q_{00}$ in the notation of (\ref{angular-dep-charge-1})) vanishes for static vortices. Static vortices do not carry electric charges and have only magnetic field ${\bf B}$. In order to study the consequences of the breaking of Lorentz symmetry we need non-vanishing electric charge. This is quite natural and expected, since as emphasized in the introduction and in section 2, spontaneous breaking of Lorentz symmetry occurs only in the nonzero charged sectors of QED. Electrically charged vortices may be used to induce this symmetry breaking. There appears to be several possible ways of associating an electric charge to a vortex. In what follows we consider some of these possibilities.
\subsection{Rotating Vortices}
We can study a dynamic instead of a static vortex. We still keep the $A_0 = 0$ gauge choice. The action contains the additional electric field term $E_i^2 = (\partial_0 A_i)^2$. The equal-time commutation relations are
\begin{equation}
\lbrack A_i(x) \,, E_j(y) \rbrack = i \delta_{ij } \, \delta^{(2)}(x-y) \,, \quad
\lbrack \Phi(x) \,, \partial_0 \Phi^*(y) \rbrack = i \delta^{(2)}(x-y) \,.
\label{eq:basiccom-1}
\end{equation}
In addition to the conserved energy and momentum, the vortex now has angular momentum which is conserved. In other words, we have rotating vortices \cite{Manton}.
The adjoint actions of ${\widehat V}(\chi) := e^{\frac{i}{q}\widehat{Q}(\chi)}$ on the quantum fields $A_i$ and $\Phi$,
\begin{equation}
e^{-\frac{i}{q}\widehat{Q}(\chi)} \Phi e^{\frac{i}{q}\widehat{Q}(\chi)} = e^{i\chi} \Phi \,, \quad
e^{-\frac{i}{q}\widehat{Q}(\chi)} A_i e^{\frac{i}{q}\widehat{Q}(\chi)} = A_i + \frac{1}{q} \partial_i \chi.
\label{eq:adjtr}
\end{equation}
follow from the basic commutation relations given in (\ref{eq:basiccom-1}). Infinitesimally these read
\begin{equation}
\left[\widehat{Q}(\chi),\Phi \right] = -q \chi \Phi \,, \quad \left[\widehat{Q}(\chi),A_i \right] = i \partial_i \chi \,.
\end{equation}
For the ensuing discussion, we assume that the spectrum of the conserved non-vanishing electric charge
\begin{equation}
{\widehat Q}_e := \frac{1}{e} \int d^2 x \, J_0 \,
\end{equation}
is quantized in units of a fundamental charge $e$. That is,
\begin{equation}
\textrm{Spec}~ {\widehat Q}_e = {\mathbb Z} \,.
\label{eq:chargequan}
\end{equation}
On quantum states ${\widehat Q}_e := \frac{1}{e} {\widehat Q}(\chi)$ where $\lim_{r \rightarrow \infty} \chi ({\hat n}) = 1$. It also worth remarking that ${\widehat Q}_e - \frac{1}{e} {\widehat Q}_{00}$ is a Gauss law constraint.
Suppose that we consider $\widehat{Q}(\xi)$, where $\xi (\varphi) = k \varphi \,, k \in {\mathbb Z}$, which is clearly {\it not} in the space of functions (\ref{eq:testf21}), since it is multivalued on $S^1$. Consider the operator
\begin{eqnarray}
\label{vertex-op-def-1}
\widehat{V}(\xi) \equiv e^{\frac{i}{q} \widehat{Q}(\xi)} &=& e^{\frac{i}{q}\int d^2 x ~ (- \partial_i \xi E^i + \xi J_0 )} \nonumber \\
&=& e^{\frac{i k}{q}\int \frac{1}{r} dr d \varphi ~(-E_\varphi + \varphi J_0 )} \,,
\end{eqnarray}
where the second line is written in polar coordinates in the coordinate basis with the metric components $h_{rr} = 1 \,, h_{\varphi \varphi} = r^2 \,, h_{r \varphi} = 0$.
It appears to represent a singular gauge transformation, but we show below that it generates a well-defined finite gauge transformation. Hence local observables commute with it and its eigenvalues also serve to define superselection sectors. We will see, however that rotations do not commute with it. Hence rotations, and consequently Lorentz transformations get spontaneously broken.
In what follows we demonstrate that it is in fact a vertex operator for vortices.
Due to (\ref{eq:chargequan}) it is a well-defined operator with the property
\begin{eqnarray}
{\widehat V}(\xi + 2 \pi k ) &=& e^{\frac{i k}{q} \int \frac{1}{r} dr d \varphi \, (- E_\varphi + \varphi J_0) + \frac{2 \pi i k}{q} \int d^2 x \, J_0 } \nonumber \\
&=& e^{i 2 \pi k \frac{m e}{q}} {\widehat V}(\xi)
\label{eq:consistency}
\end{eqnarray}
The phase on the right hand side classifies the possible statistics associated with these operators, as will be discussed in the subsequent section.
For future reference we note that
\begin{eqnarray}
{\widehat V}(\xi + 2 \pi k ) &=& {\widehat V}(\xi) \quad \mbox{for} \quad k m \frac{e}{q} \in {\mathbb Z} \nonumber \\
{\widehat V}(\xi + 2 \pi k ) &=& - {\widehat V}(\xi) \quad \mbox{for} \quad k m \frac{e}{q} \quad \mbox {half odd integer} \,.
\end{eqnarray}
The operator (\ref{vertex-op-def-1}) is indeed gauge invariant since
\begin{equation}
\left[\widehat{G}(\Lambda),\widehat{Q}(\xi)\right]\approx 0.
\label{eq:gaugeinv}
\end{equation}
We now proceed to interpret ${\widehat V}(\xi) = e^{\frac{i}{q} {\widehat Q}(\xi)}$ as a vertex operator shifting the winding number of the vortex by $k$ units.
The action of the operator (\ref{vertex-op-def-1}) on $ \hat \Phi$ and $D_\mu \hat \Phi$ reads
\begin{align}
{\widehat V}(\xi)^{-1} \hat \Phi {\widehat V}(\xi) &= e^{i k \varphi} ~ \hat \Phi, \nonumber \\
{\widehat V}(\xi)^{-1}\left( D_\mu \hat \Phi \right) {\widehat V}(\xi) &= e^{i k \varphi}~D_\mu \hat \Phi.
\end{align}
Using the above formula we obtain
\begin{align}
{\widehat V}(\xi)^{-1} \left ( \partial_\mu {\hat \Phi} \right) {\widehat V}(\xi) &= {\widehat V}(\xi)^{-1} D_\mu {\hat \Phi} {\widehat V}(\xi) + i q{\widehat V}(\xi)^{-1} A_\mu {\hat \Phi} {\widehat V}(\xi) \nonumber \\
&= e^{i k \varphi} D_\mu {\hat \Phi}+ i q {\widehat V}(\xi)^{-1} A_\mu {\widehat V}(\xi) {\widehat V}(\xi)^{-1} {\hat \Phi} {\widehat V}(\xi) \, \nonumber \\
&= e^{i k \varphi} D_\mu {\hat \Phi} + i q (A_\mu + \frac{1}{q} k \partial_\mu \varphi) e^{i k \varphi} {\hat \Phi} \, \nonumber \\
&= e^{i k \varphi} (\partial_\mu {\hat \Phi} + i k (\partial_\mu \varphi ){\hat \Phi}) \,.
\end{align}
From (\ref{eq:windingnumber}) and (\ref{eq:adjtr}) we find
\begin{align}
{\widehat V}(\xi)^{-1} {\widehat N} {\widehat V}(\xi) &= {\widehat N} + \frac{1}{2 \pi} \oint_{|x| \rightarrow \infty} k \partial_i \varphi dx^i \, \nonumber \\
&= {\widehat N} + \frac{k }{2 \pi} \int_0^{2 \pi} \partial_\varphi \varphi d \varphi \, \nonumber \\
&= {\widehat N} + k \,.
\end{align}
which together with (\ref{eq:gaugeinv}) clearly indicate that ${\widehat V} (\xi)$ is a gauge invariant vertex operator shifting the winding number by $k$ units.
The above result may also be expressed as
\begin{equation}
\lbrack {\widehat V}(\xi) \,, {\widehat N} \rbrack = - k {\widehat V}(\xi) \,.
\end{equation}
Denoting the quantum state of the vortex with winding number $n$ as $| n \,, \cdot \rangle$, we have ${\widehat N} | n \,, \cdot \rangle = n | n \,, \cdot \rangle$ and also
\begin{equation}
\lbrack {\widehat V}(\xi) \,, {\widehat N} \rbrack | n \,, \cdot \rangle = - k {\widehat V}(\xi) | n \,, \cdot \rangle
\end{equation}
from which we obtain
\begin{equation}
{\widehat N} ({\widehat V}(\xi) | n \,, \cdot \rangle ) = (n + k ) {\widehat V}(\xi) | n \,, \cdot \rangle
\end{equation}
indicating that
\begin{equation}
{\widehat V}(\xi) | n \,, \cdot \rangle \equiv | n + k \,, \cdot \rangle.
\end{equation}
It is also worthwhile to remark that in the literature there are the so-called `` 't Hooft loop operators", they are given as $ T (C^\prime) = e^{- i \int_{C^\prime} d^3 x \, \alpha_i E_i}$, for a suitable one-form $\alpha= \alpha_i dx^i$ \cite{tHooft, Nair}, and act as vortex creation operators in a manner similar to the operators introduced in this work. We are not going to make any detailed comparison of these two operators at present, however, it is readily observed that in our case for ${\widehat V}(\xi)$ the role of $\alpha_i$ is played by $ \partial_\varphi \xi$ which is closed in the integer cohomology ${\cal H}(S^1 \,, {\mathbb Z})$, whereas for the `` 't Hooft loop operators" this not the case; $\alpha$ is not closed $d \alpha \neq 0$.
\subsection{Shift of Angular Momentum}
Let us consider a rotating vortex described by the quantum states $|\tilde{q},j\rangle$ satisfying
\begin{equation}
\widehat{Q}_e |\tilde{q} \,, j \rangle = \frac{\tilde{q}}{e} |\tilde{q} \,, j \rangle \,, \quad J_3 |\tilde{q} \,, j \rangle = j |\tilde{q} \,, j \rangle.
\label{eq:eigenv1}
\end{equation}
We are ignoring other possible quantum numbers irrelevant for our discussion.
Under a spatial rotation by $\theta$, that is $\varphi \rightarrow \varphi + \theta$, we have
\begin{eqnarray}
{\widehat V}(\xi) \rightarrow e^{i \theta J_3} {\widehat V}(\xi) e^{- i \theta J_3} &=& e^{\frac{i k}{q} \int \frac{1}{r} dr d \varphi (- E_{\varphi + \theta})} e^{\frac{i}{q} \theta k \int d^2 x J_0} \, \nonumber \\
&=& {\widehat V}(\xi) e^{i \frac{e}{q} \theta k \widehat{Q}_e} \,,
\label{eq:shift1}
\end{eqnarray}
where in passing from the first line line to the second line, we have made use of a change of variables, to absorb the shift in the argument of $E_\varphi$, since $d (\varphi + \theta) = d \varphi$.
It is now easy to compute
\begin{equation}
{\widehat V}(\xi) |\tilde{q} \,, j \rangle = e^{\frac{i}{q} {\widehat Q}(\xi)} |\tilde{q} \,, j \rangle.
\end{equation}
We have from (\ref{eq:eigenv1}) and (\ref{eq:shift1}),
\begin{equation}
e^{i \theta J_3} {\widehat V}(\xi) |\tilde{q} \,, j \rangle
= e^{ i \theta (k \frac{\tilde{q}}{q} + j)} {\widehat V}(\xi) |\tilde{q} \,, j \rangle \,,
\label{J3-shifting-1}
\end{equation}
\begin{equation}
{\widehat V}(\xi) |\tilde{q} \,, j \rangle = |\tilde{q} \,, j+ k \frac{\tilde{q}}{q} \rangle \,.
\end{equation}
We conclude that the action of the vertex operator on the quantum state $|\tilde{q} \,, j \rangle$ shifts the angular momentum of the state by $k \frac{\tilde{q}}{q}$.
\subsection{Charged-Particle-Vortex Composites}
Suppose now that a charged particle say with charge $\tilde{q}$ is orbiting the vortex. According to Wilczek's results \cite{Wilczek}, the angular momentum of the charged particle orbiting around a vortex shifts from integer value $j$ (in the absence of the vortex) by an amount $- \frac{\tilde{q}}{q} n$, $n$ being the winding number of the vortex.
We can denote the quantum state of this composite system as $|\tilde{q}, J \rangle$ with
\begin{equation}
\widehat{Q}_e |\tilde{q} \,, J \rangle = \frac{\tilde{q}}{e} |\tilde{q} \,, J \rangle \,, \quad J_3 |\tilde{q} \,, J \rangle = J |\tilde{q} \,, J \rangle \,,
\end{equation}
where
\begin{equation}
J = j - \frac{\tilde{q}}{q} n \,, \quad j \in {\mathbb Z} \,.
\label{eq:specj1}
\end{equation}
Proceeding in a similar manner as before, we find
\begin{eqnarray}
e^{i \theta J_3} {\widehat V}(\xi) |\tilde{q} \,, J \rangle &=& e^{i \theta J_3} {\widehat V}(\xi) e^{- i \theta J_3} e^{ i \theta J_3} | \tilde{q} \,, J \rangle \nonumber \\
&=& {\widehat V}(\xi) e^{\frac{i}{q} \theta k \int d^2 x J_0} e^{ i \theta J} |\tilde{q} \,, J \rangle \nonumber \\
&=& {\widehat V}(\xi) e^{i \theta k \frac{\tilde{q}}{q}} e^{ i \theta J} |\tilde{q} \,, J \rangle \nonumber \\
&=& e^{ i \theta (k \frac{\tilde{q}}{q} + J)} {\widehat V}(\xi) |\tilde{q} \,, J \rangle \,.
\end{eqnarray}
so that we have
\begin{equation}
{\widehat V}(\xi) |\tilde{q} \,, J \rangle = |\tilde{q} \,, J + k \frac{\tilde{q}}{q} \rangle \,.
\end{equation}
The spectrum of angular momentum after the application of the vertex operator becomes
\begin{eqnarray}
J_{new} &=& j - \frac{\tilde{q}}{q} (n - k) \,, \nonumber \\
&=& J + \frac{\tilde{q}}{q} k \,.
\label{eq:specj2}
\end{eqnarray}
This result has very interesting consequences. We first note that $\tilde{q}$ and $q$ come in integer multiples of the fundamental charge $e$. Let us suppose then that the ratio $\frac{\tilde{q}}{q}$ is a half-odd integer:
\begin{equation}
\frac{\tilde{q}}{q} = \frac{2 \ell + 1}{2} \,, \quad \ell \in {\mathbb Z} \,.
\end{equation}
In this case we first observe that ${\widehat V}(\xi) $ is anti-periodic for any integer $k$, that is,
\begin{equation}
{\widehat V}(\xi + 2 \pi k ) = - {\widehat V} (\xi) \,.
\end{equation}
Then the spectrum of $J_{new}$ is
\begin{equation}
\mbox{Spec} \left ( J_{new} \right ) \equiv
\left \lbrace
\begin{array}{llll}
\mbox{integer} & \mbox{for} & n \,, k \in \mbox{even integers} \\
\mbox{integer} & \mbox{for} & n \,, k \in \mbox{odd integers} \\
\mbox{half-odd integer} & \mbox{for} & n \in \mbox{odd integers} & k \in \mbox{even integers} \\
\mbox{half-odd integer} & \mbox{for} & n \in \mbox{even integers} & k \in \mbox{odd integers} \\
\end{array}
\right.
\end{equation}
We infer from (\ref{eq:specj1}) and (\ref{eq:specj2}), where $n \in {\mathbb Z}$, that for any odd integer $k$, the action of ${\widehat V}(\xi)$ takes a half-integral angular momentum state to an integral angular momentum state, and an integral angular momentum state to a half-integral angular momentum state. For even $k$, no such shift occurs, that is, integral and half-integral angular momentum states remain as integral and half-integral after the action of $V(\xi)$ for even $k$.
Suppose that we interchange two identical charged particle-vortex composites. General results obtained by \cite{Wilczek} state that the statistics is normal (bosonic or fermionic depending on the statistics of the charged particle) for integral angular momentum eigenvalues and normal statistics is reversed for half-odd integral angular momentum eigenvalues. In the intermediate cases, the composites are neither bosons nor fermions; they are then anyons.
From our results, we see that for $\tilde{q}/q$ half-odd integral and odd $k$, the action of ${\widehat V} (\xi)$ on charged particle-vortex composites changes their angular momentum from integer values to half-odd integer values or vice versa. Therefore ${\widehat V} (\xi)$ flips the statistics of two such identical composites from fermions to bosons or vice versa. For example, if initially the two identical charged particle-vortex composites are fermions, then they behave as bosons after the application of ${\widehat V} (\xi)$ for odd $k$ on each composite, the winding number $n$ being integral.
We may also obtain anyonic charged particle-vortex composites starting from a bosonic or a fermionic identical pair. This can happen when $\tilde{q}/q$ is a generic rational number since
\begin{equation}
{\widehat V}(\xi + 2 \pi k ) = e^{i 2 \pi k \frac{{\tilde q}}{q}} {\widehat V}(\xi) \,.
\end{equation}
\subsection{Adding the Chern Simons term}
Another interesting possibility is to switch on a Chern-Simons (CS) term. The details of the vortex models and the solutions are somewhat different in this case. There are several models with vortex solutions. Regardless of these details, let us briefly summarize the essential features in this case.
Adding the Chern-Simons term
\begin{equation}
L_{CS} = \frac{\kappa}{2} \varepsilon^{\mu \nu \rho} A_\mu \partial_\nu A_\rho \,,
\end{equation}
the Gauss law constraint becomes
\begin{equation}
\int d^2 x ~ \Lambda \left(\partial_i E_i - \kappa B + J_0 \right) \approx 0 \,, \quad B = \frac{1}{2} \varepsilon^{ij} F_{ij} \,.
\end{equation}
where $\Lambda$ are test functions vanishing at spatial infinity.
As shown in \cite{Manton, Khare}, when the Chern-Simons term $L_{CS}$ is present, there is a non-vanishing electric charge $Q = \int d^2 x J_0$ even for static vortices, which turns out to be proportional to the magnetic flux:
\begin{equation}
Q = \kappa \int d^2 x B = \kappa \frac{2 \pi n}{q} \,.
\end{equation}
Since this result incorporates the quantization of the electric charge into the theory, it provides an additional incentive in the present context.
The charges are
\begin{equation}
Q(\chi) = \int d^2 x~ \left( - E_i \partial_i \chi + \kappa \varepsilon^{ij} \partial_i \chi A_j + \chi J_0\right) \,,
\end{equation}
and they are gauge invariant.
For the operator ${\widehat V}(\xi)$ we find
\begin{equation}
{\widehat V}(\xi + 2 \pi k ) = e^{i 2 \pi k \frac{2 \pi \kappa}{q^2} n} {\widehat V}(\xi) \,.
\end{equation}
We therefore, observe that ${\widehat V}(\xi)$ leads in general to anyonic charged particle-vortex composites when $L_{CS}$ is present. Let us also note that for a non-abelian theory it is well-known that the Chern-Simons level $\kappa$ is quantized as $\kappa = \frac{q^2 r}{4 \pi}$ where $r$ is an integer. In such a case appropriate non-Abelian generalisations of ${\widehat V} (\xi)$ could be periodic or anti-periodic depending on $k r n $ being an even integer or an odd integer.
\newpage
{\bf \large Acknowledgements}
\vskip 1em
APB was supported by DOE under grant number DE-FG02-85ER40231 and by the Institute of Mathematical Sciences, Chennai. S. K is supported by TUBiTAK under project No. 110T738 and TUBA-GEBIP program of The Turkish Academy of Sciences. ARQ is supported by CNPq under process number 307760/2009-0.
\vskip 1em
\providecommand{\href}[2]{#2}\begingroup\raggedright
|
1,314,259,995,047 | arxiv | \section{Introduction} \label{sec:intro}
The ability for robots to perform complex tasks is inherently linked to the richness of their environment models. Advances in sensor technology, machine perception, and natural language understanding provide a wealth of data that can be infused into these models. These innovations raise new questions with regards to how to assimilate, manage, and utilize this abundance of knowledge. A fundamental problem is how to reason over this rich information in a manner that enables robots to efficiently plan in diverse environments of varying scales and complexities. Consider the human-robot teaming scenario illustrated in Figure~\ref{fig:motivation}, in which a user instructs the mobile robot to ``navigate to the nearest red ball.''
If we assume that the robot has access to knowledge bases (e.g., campus-level maps) and various sensor measurements (e.g., images, laser scans, audio, etc.) that it has accumulated over time, the problem becomes one of situating or ``grounding'' the instruction in the context of the perceived environment. With a few exceptions~\cite{kuipers04, modayil04, beeson10, pronobis12, walter13}, contemporary methods attempt to fuse the knowledge bases and sensor measurements into a single, flat representation of the environment (i.e., the ``world model'') that expresses all metric~\cite{eustice05, olson06, durrant-whyte06, bailey06, walter07, kaess08, cummins09} as well as semantic~\cite{mozos07, zender08, pronobis12, walter13, hemachandra14, hemachandra15} knowledge gleaned from the observations. There are three fundamental limitations to this approach.
\begin{figure}[!t]
\centering
\subfigure[a mobile robot receiving a natural language instruction]{\includegraphics[width=0.95\linewidth]{figs/fig1.jpg}}
\mbox{
\subfigure[detailed world model]{\includegraphics[width=0.47\linewidth]{figs/fig11.png}}
\subfigure[compact world model]{\includegraphics[width=0.47\linewidth]{figs/fig7.png}}
}
\caption{Our framework learns to build a minimal representation of the environment sufficient to interpret a given natural language instruction. In this example, (a) a mobile robot is directed to ``navigate to the nearest ball in the lab.'' Traditional methods interpret the instruction in the context of (b) an exhaustive world model, whereas our method maintains (c) a compact world model sufficient to ground the provided instruction.}
\label{fig:motivation}
\end{figure}
First, a consistent, high fidelity model of the environment is expensive to maintain in terms of both compute and memory storage. Second, searching over dense models is computationally prohibitive in the context of both planning and natural language understanding~\cite{tellex11a, howard14, chung15}, with costs as high as exponential in the size of the model~\cite{tellex11a}. More generally, it is unnecessarily detailed for most tasks. Ideally, one would reason over the most compact representation of the environment necessary to understand the instruction. However, this representation can not be inferred until after the instruction is received. Third, in situations in which concepts are taught or evolve in-situ from human demonstrations, previous interpretations of the environment may become incorrect or deficient, necessitating a means of revisiting these models as needed.
We propose a framework that explicitly reasons over the relevance of the observations and perceptual classifiers available, so as to learn a task-relevant, scalable environment representation sufficient for planning and natural language understanding. Underlying this method is a learned probabilistic model that can be readily adapted based upon the difficulty of the task and the complexity of the environment. Importantly, the method infers an efficient environment representation online by leveraging a learned model of saliency. This model extracts characteristics of the representation from free-form utterances to ``lazily'' reason over the small subset of available knowledge pertinent to the task. Specifically, we build upon recent work on adapting perception pipelines from natural language instructions~\cite{patki18a} to infer subsets of observations that we use to construct instruction-specific representations of the environment. These induced representations are more efficient to search, yet still express the correct hierarchies and affordances necessary to perform the task. In scenarios where humans can interactively teach robots to classify objects in-situ, past observations of such objects could be added to the world model given utterances that reference the object.
The central contribution of this paper is a framework that exploits three probabilistic graphical models in the form of
Distributed Correspondence Graphs~\cite{howard14} to adaptively model the environment representation in a
task-specific manner. These models are trained from examples of how language maps to the relevant scene semantics, perceptual classifiers, and the symbols used to ground language-based instructions.
Experimental results demonstrate that the ability to dynamically adapt perception and observation models significantly improves the computational efficiency of natural language symbol grounding.
\section{Related Work} \label{sec:related}
Existing language understanding methods reason over a flat, unified symbolic model of the world that expresses the spatial,
semantic, and/or topologic properties of the environment through a representation that is assumed to be globally
consistent.
In practice, these models are typically constructed by running a state-of-the-art SLAM algorithm~\cite{walter07,
eustice05, olson06, kaess08, grisetti09}, which provides flat, globally metric models of the environment that are
limited to spatial information. Semantic and topologic properties are then manually injected to realize a representation
suitable for language grounding.
Localization and mapping methods that attempt to jointly reason over spatial, semantic, and topologic
properties of the environment have also been proposed~\cite{kuipers00, zender08, vasudevan08, pronobis12,
walter13, hemachandra14, duvallet14, hemachandra15}.
With few exceptions~\cite{kuipers00}, however, these methods still attempt to maintain a single globally consistent environment
representation, which is both unnecessarily detailed for language grounding and also resource (e.g., memory) intensive.
Given a natural language utterance, grounding methods~\cite{harnad90,tellex11,howard14} attempt to associate each word in the utterance with its corresponding referent in this environment model and the robot's symbolic action space. Semantic parsing-based methods~\cite{matuszek10,matuszek12a,thomason15} similarly map natural language to meaning representations, typically in the form of a lambda calculus. Early work in grounding~\cite{winograd71,roy03} employs manually engineered correspondences and features between words in a flat representation of the environment. Modern day methods~\cite{kollar10,tellex11a,tellex11,howard14,chung15} take a statistical approach to language grounding (and similarly for inverse grounding~\cite{tellex12,tellex14,gong18}) that employs probabilistic models that relate words to their corresponding referents according to the hierarchical structure of language, enabling the resolution of complex free-form language. These models are typically learned from annotated natural language corpora as well as through interaction with humans~\cite{thomason15,spranger15,she17}. Probabilistic grounding models have been shown to be effective at interpreting cooking instructions~\cite{bollini10}, learning spatial relations in semantic maps \cite{walter13,hemachandra14}, and directing mobile manipulators~\cite{walter14b}, among others.
These methods perform inference over the entire set of state and action symbols, resulting in a computational complexity that is proportional to the power set of objects, regions, and constraints. This limits inference to simple tasks with a few interchangeable constraints or requires access to a set of predefined environment-specific behaviors. To improve scalability, \citet{howard14} developed the Distributed Correspondence Graph (DCG) model that separates inference across conditionally independent constituents of the graph. In effect, this distributes inference across multiple factors in a graphical model, transforming the computational complexity from exponential to linear in the number of symbols. \citet{chung15} propose the Hierarchical Distributed Correspondence Graph (HDCG), which improves the efficiency of inference by learning to construct a more efficient approximation of the space of relevant symbols for probabilistic language grounding. \citet{paul16a} describe a method that partitions the joint distribution into concrete and abstract factors. The algortihm performs inference in two stages per phrase. In the first stage, distributions of concrete symbols are inferred and used to inform sparse approximations of the abstract symbolic representation that are more efficient to search. In the second stage, distributions of abstract symbols are inferred and joined with the concrete symbols to represent the meaning of each phrase.
\section{Technical Approach} \label{sec:approach}
The problem of natural language understanding is commonly framed as inference over a learned distribution that associates linguistic elements with their corresponding symbolic representation of the robot's state and action spaces. More specifically, inference involves reasoning over a representation $\Gamma_{s}$ that symbolizes objects, places, constraints, actions, trajectories, and others concepts expressed by the robot's world model. The set of symbols forms a discrete and finite space in which the instruction can be grounded. The distribution over groundings is conditioned over a parse of the utterance $\bm{\Lambda}$ as well as a world model $\bm{\Upsilon}_{t}$ expressing environment knowledge that may be known a priori $\bm{\Upsilon}_{0}$ or extracted from multimodal observations $\mathbf{z}_{1:t}$ using the classifiers in the robot's perception pipeline $\mathbf{P}$
\begin{equation}
\bm{\Upsilon}_{t} \approx f( \mathbf{z}_{1:t}, \mathbf{P}, \bm{\Upsilon}_{0} ).
\end{equation}
Natural language understanding then follows as maximum a posteriori (MAP) inference over $\Gamma_s$
\begin{equation}
\Gamma_{s}^* = \argmax_{ \gamma_1 ... \gamma_n \in \bm{\Gamma_{s}} } \; p \left( \Gamma_{s} \vert \bm{\Lambda}, \bm{\Upsilon}_t \right).\label{eqn:basic-2}
\end{equation}
Several contemporary approaches~\cite{tellex11a,
howard14, paul16a} formulate this problem as probabilistic inference in a factor graph with a hierarchical structure
dictated by the compositional nature of the utterance, symbolic representation, and environment. This enables the model
to reason about the meaning of particular phrases in terms of the symbolic grounding space based upon their child phrases, and a model of the environment. The parameters of the grounding model (e.g., feature weights in a log-linear model) are learned from annotated corpora that express the meaning of each phrase in the context of the child groundings and phrases.
In practical settings, the the space of groundings $\Gamma_s$, the environment $\bm{\Upsilon}_t$ is complex, and the free-form instructions $\Lambda$ may be complex and diverse, making exact inference computationally intractable. To address this, the Distributed Correspondence Graph~\cite{howard14} proposes an approximate factorization of the grounding distribution that affords an efficient inference
\begin{equation}
\Phi_{s}^* = \argmax_{ \phi_{ij} \in \Phi_{s} } \; \prod\limits_{ i = 1 }^{ \lvert \Lambda \rvert } \prod\limits_{ j = 1 }^{ \lvert \Gamma_{s} \rvert } p( \phi_{ij} \vert \gamma_{ij}, \lambda_i, \Phi_{ci}, \bm{\Upsilon}_{t} ).
\label{eqn:dcg_0}
\end{equation}
Formally, DCG inference involves searching for the most likely assignment of boolean correspondence variables $\Phi^*_{s}$~\cite{paul2018efficientplatforms} in the context of the groundings $\gamma_{ij} \in \Gamma_s$, phrases $\lambda_i \in \Lambda$, child correspondences $\Phi_{ci}$, and the world model $\bm{\Upsilon}_{t}$ by maximizing the factorization in Equation~\ref{eqn:dcg_0}. In such model, a correspondence variable $\phi_{ij}$ being true expresses the fact that the corresponding grounding $\gamma_{ij}$ matches the associated phrase in the command.
The ability to ground free-form instructions is inherently linked to the richness of the robot's environment
representation $\bm{\Upsilon}_{t}$. However, building exhaustively detailed world models using all available knowledge
bases and observations $\mathbf{z}_{1:t}$ is computationally expensive, particularly in large-scale, unstructured
environments. The runtime of common language understanding models such as $\text{G}^3$ are exponential in the cardinality of the symbol space $\lvert \Gamma_{s}\rvert$~\cite{howard14}. DCG improves this complexity to being linear in the size of the world model, however the cost of inference still inhibits real-time human-robot interaction.
In practice, a large fraction of the objects and their corresponding symbols that comprise the inferred world model
are typically inconsequential to the meaning of the utterance. In such cases, there exists a compact environment representation $\bm{\Upsilon}_{t}^{*}$ that is sufficient to interpret the utterance, providing a significant improvement in the computational efficiency of inference
relative to the standard model (Equation~\ref{eqn:dcg_0}).
We propose a probabilistic model that exploits natural language in order to guide the generation of these compact world
models $\bm{\Upsilon}_{t}^{*}$. Integral to this approach is the ability to infer a small, succinct subset of perceptual
classifiers $\mathbf{P}^{*} \in \mathbf{P}$ in a manner that dynamically adapts the robot's perceptual capabilities
according to the current task
\begin{equation}
\mathbf{P}^{*} \approx f\left( \mathbf{P}, \bm{\Lambda} \right), \label{eqn:compact_P}
\end{equation}
resulting in the compact world model
\begin{equation}
\bm{\Upsilon_{t}}^{*} \approx f\left( \mathbf{z}_{1:t}, \mathbf{P}^{*}, \bm{\Upsilon}_{0} \right)
\end{equation}
We further observe that not all observations are necessary to produce this compact representation $\bm{\Upsilon}_{t}^{*}$. For instructions in which the context of the observation may be evident (e.g., ``drive to the nearest red ball in the hallway''), samples outside of these semantically classified regions (i.e., hallways) can be pruned from the space of observations.
As the robot drives through the environment, a real-time scene classifier produces a semantic label (i.e., a scene category) that will be associated with all of the observations (from all available sensors) and pose measurements. The ability to assign a label to the current region in real-time allows us to treat such information as an observation produced by a virtual sensor (i.e., the scene classifier).
We define a minimal set of observations $\mathbf{z}^{*} \in \mathbf{z}_{1:t}$ that, based on their semantic labels, are used to construct the compact representation that is sufficiently detailed to contain all symbols necessary to be expressed
by the natural language symbol grounding model
\begin{subequations}
\begin{align}
\mathbf{z}^{*} &\approx f\left( \mathbf{z}_{1:t}, \bm{\Lambda} \right) \label{eqn:compact_z}\\
%
\bm{\Upsilon_{t}}^{*} &\approx f\left( \mathbf{z}^{*}, \mathbf{P}^{*}, \bm{\Upsilon}_{0} \right). \label{eqn:compact_w}
\end{align}
\end{subequations}
Using the subsampled set of observations to construct a compact representation for symbol grounding transforms the expression for natural language inference (Eqn.~\ref{eqn:dcg_0}) to
\begin{equation}
\Phi_{s}^* = \argmax_{ \phi_{ij} \in \Phi_{s} } \; \prod\limits_{ i = 1 }^{ \lvert \Lambda \rvert } \prod\limits_{ j = 1 }^{ \lvert \Gamma_{s} \rvert } p( \phi_{ij} \vert \gamma_{ij}, \lambda_i, \Phi_{ci}, \bm{\Upsilon_{t}^{*}} ).
\label{eqn:fullmodel}
\end{equation}
This inference problem
requires that we learn three models (Fig.~\ref{fig:architecture}): an adaptive perception model, an observation filtering model, and a natural language
symbol grounding model.
The process for training these models begins with the natural language symbol grounding module, in which symbols that
represent objects, spatial relationships, containers, constraints, actions, and other types are associated with language
~\cite{howard14, paul16a}. The process of training the observation filtering and adaptive perception models requires
one to fit the minimum set of semantic labels and perceptual classifiers.
Such classifiers are the ones that extract the most compact environment representation for each example that will
not prune out any of the annotated ground-truth symbols from the corpus of instructions.
This process yields three separate corpora with common instructions, but different symbolic representations and
annotations that we use to train the three distinct models.
\section{Experimental Setup}
\label{sec:experiments}
Figure~\ref{fig:architecture} illustrates the software architecture that we implemented for experimental evaluation of the proposed algorithm. In this architecture, the robot stores the sensors measurements in the observation filtering module.
When the human provides a textual instruction, we convert the text into a parse tree $\bm{\Lambda}$ that is provided to the three
natural language understanding modules. The \textit{scene semantics} natural language understanding module extracts the salient
scene semantics $\bm{\Gamma_{z}}$ pertaining to the instruction. The observations filtering module then extracts a subset of observations $\mathbf{z}^{*}$ (Eqn.~\ref{eqn:compact_z}) based on the inferred scene semantic label(s). The \textit{perception} natural language understanding module extracts the symbols representing the classifiers (Eqn.~\ref{eqn:compact_P}) that are necessary to detect the objects that are relevant to the natural language instruction. This information is then passed to the adaptive perception node that extracts an approximation of the environment model $\bm{\Upsilon_{t}}^{*}$ (Eqn.~\ref{eqn:compact_w}) from $\mathbf{z}^{*}$ using the sub-sampled classifiers $\mathbf{P}^{*}$. The \textit{symbol grounding} natural language understanding module uses the parse tree and the world model approximation to extract a distribution of symbols that represents the robot behavior $\bm{\Gamma_{s}}$ (Eqn.~\ref{eqn:fullmodel}).
\begin{figure}[!t]
\centering
\vspace{6pt}
\begin{tikzpicture}
\node[rectangle,font=\footnotesize,draw=black, text=white, fill=black!40!cyan, line width=0.03cm, text width=1.4cm, align=center] at (3.65,-0.75) (ap) {adaptive perception};
\node[rectangle,font=\footnotesize,draw=black, line width=0.03cm, text width=1cm, align=center] at (-3.0,0) (parser) {parser};
\node[rectangle,font=\footnotesize,draw=black, text=white, fill=black!40!green, line width=0.03cm, text width=1.4cm, align=center] at (3.65,1.5) (sc) {observation filtering};
\node[rectangle,font=\footnotesize,draw=black, line width=0.03cm, text width=1cm, align=center] at (-3.0,1.5) (robot) {robot};
\node[rectangle,font=\footnotesize,draw=black, line width=0.03cm, text width=1cm, align=center] at (-3.0,-1.5) (human) {human};
\node[rectangle,font=\footnotesize,draw=black, text=white, fill=black!40!green, line width=0.06cm, text width=2.15cm, align=center] at (0.25,1.5) (nlsgsem) {natural language understanding\\ (scene semantic)};
\node[rectangle,font=\footnotesize,draw=black, text=white, fill=black!40!cyan, line width=0.06cm, text width=2.15cm, align=center] at (0.25,0) (nlsgper) {natural language understanding (perception)};
\node[rectangle,font=\footnotesize,draw=black, text=white, fill=black!30!orange, line width=0.06cm, text width=2.15cm, align=center] at (0.25,-1.5) (nlsggrd) {natural language understanding (grounding)};
\draw[->,line width=1pt] (nlsgsem) to (sc);
\draw[->,line width=1pt] (sc) to (ap);
\draw[->,line width=1pt] (human) to (parser);
\draw[->,line width=1pt] (parser) to (-3.0,1) to (-1.25,1) to (-1.25,-1.5) to (nlsggrd);
\draw[->,line width=1pt] (parser) to (-3.0,1) to (-1.25,1) to (-1.25,0) to (nlsgper);
\draw[->,line width=1pt] (parser) to (-3.0,1) to (-1.25,1) to (-1.25,1.5) to (nlsgsem);
\draw[->,line width=1pt] (-3.125,1.7) to (-3.125,2.25) to (3.65,2.25) to (sc);
\draw[->,line width=1pt] (nlsgper) to (3.3,0) to (3.3,-0.325);
\draw[->,line width=1pt] (ap) to (3.65,-1.5) to (nlsggrd);
\draw[->,line width=1pt] (nlsggrd) to (0.25,-2.5) to (-3.875,-2.5) to (-3.875,1.5) to (robot);
\node[font=\scriptsize,text width=2cm,align=center] at (-2.15,0.75) (parse tree) {parse tree $\left(\bm{\Lambda}\right)$};
\node[font=\scriptsize,text width=1.5cm] at (-2.15,-0.75) (instruction) {instruction};
\node[font=\scriptsize,text width=4.5cm] at (-1.5,-2.25) (command) {command symbols $\left(\bm{\Gamma_{s}}\right)$};
\node[font=\scriptsize,text width=1.625cm,align=left] at (2.5,1.0) (semantic) {semantic\\ symbols\\ $\left(\bm{\Gamma_{z}}\right)$};
\node[font=\scriptsize,text width=1.75cm,align=left] at (2.55,-0.5) (perception) {perception\\ symbols \\$\left(\bm{\Gamma_{p}}\right)$};
\node[font=\scriptsize,text width=3cm] at (-1.5,2.0) (observations) {observations $\left(\mathbf{z}_{1:t}\right)$};
\node[font=\scriptsize,text width=1.0cm] at (4.3,0.5) (filteredobservations) {filtered obs.\ $\left(\mathbf{z}^{*}\right)$};
\node[font=\scriptsize,text width=1.625cm,align=left] at (2.5,-2.0) (adapted environment) {environment model\\ $\left(\bm{\Upsilon}_{t}^{*}\right)$};
\end{tikzpicture}
\caption{The system architecture for language-guided observation filtering, adaptive perception, and natural language symbol grounding. The three natural language understanding models that are learned from the annotated instructions are highlighted in bold.}
\label{fig:architecture}
\end{figure}
All of the natural language understanding modules are implemented as Distributed Correspondence Graphs~\cite{howard14}
with symbolic representations and features adapted for each of the scene semantics, perception, and grounding domains.
We trained the natural language understanding modules with a synthetic corpus of annotated examples consistent with example robot instructions, such as ``navigate to the nearest cone in the parking lot'' or ``navigate to the farthest blue ball.'' Approximately 500 instructions were annotated for the scene semantic, perception, and grounding models in accordance with their symbolic representation. The software was integrated onto two Clearpath Robotics Husky A200 Unmanned Ground Vehicles (Fig.~\ref{fig:motivation}) and used for dataset collection at two distinct sites. Visual observations were collected using the RealSense D435 RGB-D sensor. Robot localization was performed using laser-scan matching with a planar LIDAR sensor.
In these experiments, we use eight semantic labels such as ``kitchen,'' ``laboratory,'' ``parking lot,'' etc., which are
associated with sensor observations. To detect the semantics of the scene ,we use a YOLO object detector~\cite{redmon2017yolo9000} trained on the COCO dataset~\cite{lin2014microsoft}. Object detections are passed to a scene classifier. The scene classifier then assigns a semantic label to each observation based on an object co-occurrence model that relates objects and scene classes. Objects that are not characteristic of any particular scene (e.g., person, cat, or horse) are ignored. The perception pipeline within in the adaptive perception node contains multiple elements including a YOLO-based object detector, a noise removal filter that refines the segmented object clusters, a 3d bounding box detector, an LUV color space-based color detector, and a 3-DOF pose detector. We limit the sensing range to 3.5\,m to avoid processing noisy point cloud data.
The experiments were designed to explore the impact of observation filtering and adaptive perception on the task of mobile robot instruction following. We quantify the performance of the system using metrics of computational efficiency of perception for
symbol grounding under the assumption of lazy evaluation of the observations.
\section{Results} \label{sec:results}
This section presents results highlighting the performance of different aspects of the learned models in our proposed architecture. First, we highlight the computational efficiency of adaptive perception applied in the navigation domain. Second, we demonstrate how observation filtering reduces the number of observations we need to reason over in order to extract task-relevant objects. Later, we demonstrate the efficiency gains achieved by combining these two strategies in order to generate compact world representations.
\subsection{Adaptive Perception}
\label{sec:results-adaptive-perception}
In previous experiments~\cite{patki18a}, we observed that language grounding was faster in environments inferred by adaptive perception than non-adaptive perception. Also the adaptive perception was found to be faster than its counterpart. To verify the predicted behavior of the adaptive perception pipeline, we analyzed its impact on the runtime of perception by evaluating it on the datasets collected at two diffrent sites for six different instructions. Table~\ref{table:results-1} presents the results demonstrating the impact of adaptive perception (AP) on the perception runtime against the standard baseline (B) that corresponds to the standard approach of invoking all classifiers and observations. Table~\ref{table:results-2} shows the impact of adaptive perception on the compactness of the approximated world representations. Consistent with previous evaluations~\cite{patki18a}, reducing the cardinality of the world model improves the runtime of language grounding.
\begin{figure}[!t]
\centering
\vspace{6pt}
\subfigure[exhaustive perception: detecting all objects]{\includegraphics[width=1.0\linewidth]{figs/fig3.png}\label{fig:exhaustive}}\\
%
\subfigure[adaptive perception: detecting only cups]{\includegraphics[width=1.0\linewidth]{figs/fig2.png}\label{fig:adaptive}}\\
%
\caption{Impact of adaptive perception for the command ``drive to the farthest cup in the kitchen.'' A standard approach requires generating and reasoning over \subref{fig:exhaustive} an exhaustive map generated using all of the available object detectors, resulting in a map with $37$ objects and a runtime of $408$\,s. In contrast, our adaptive method generates \subref{fig:adaptive} a more compact map only using detectors relevant to the command, resulting in a map with $11$ objects and a runtime of $225$\,s.}.
\label{fig:adaptive-perception-navigation-results}
\end{figure}
Figure \ref{fig:adaptive-perception-navigation-results} demonstrates the impact of adaptive perception for the example instruction ``drive to the nearest cup in the kitchen.'' In this particular example, the model is able to independently evaluate which object detectors should be engaged to construct an instruction-specific world model. By using the information contained within the instructions, our method results in a $36\%$ reduction in the time required to build an environment representation for inferring the instruction ``go to the nearest cup in the kitchen.'' This demonstrates how inferring the classifiers useful for generating task-relevant compact representations can reduce the runtime requirements of robot perception. As we have seen~\cite{patki18a}, the reduction in runtime is proportional to the sparsity of classifiers necessary to extract a sufficient detailed environment model that is suitable for the grounding of specific instructions.
As more complex detectors are considered (e.g., ICP-based point cloud matching), we expect to find that these differences will become increasingly significant. For example, an operator performing service on a truck may require a robot to ``turn the top-left screw on the back panel by forty five degrees'' at one point during an activity, while it may also ask the same robot to ``unload the truck of all of the pallets'' at a later time. The computational requirements of the multitude of classifiers necessary to generate a consistent interpretation of the environment that is sophisticated enough to perform both of these tasks may be too burdensome for an robot to extract in real-time. We hypothesize that as the interactions approach such diversity and complexity, a model that extracts the salient information from the command and constructs a representation suitable for natural language symbol grounding will outperform non-adaptive representations of the environment.
\subsection{Observation Filtering}
\label{sec:results-observation-filtering}
\begin{table}[!t]
\centering
\vspace{6pt}
\caption{ Improvement in the perception runtime at sites 1 \& 2 }
\label{table:results-1}
\setlength{\tabcolsep}{4.0pt}
{\scriptsize
\begin{tabularx}{1.0\linewidth}{lccccc}
\toprule
& & \multicolumn{4}{c}{ (runtime in seconds) } \\
Instruction & Site & B & OF & AP & OF+AP \\
\midrule
``go to the farthest umbrella in the hallway'' & 1 & 401 & 60 & 242 & 55 \\
``go to the nearest suitcase in the parking lot'' & 2 & 306 & 136 & 220 & 99 \\
``go to the farthest cup in the kitchen'' & 1 & 401 & 146 & 225 & 75 \\
``go to the nearest keyboard in the office'' & 2 & 306 & 74 & 222 & 46 \\
``go to the nearest ball in the hallway'' & 1 & 401 & 59 & 217 & 38 \\
``go to the farthest ball in the lab'' & 2 & 306 & 67 & 206 & 48 \\
\bottomrule
\end{tabularx}}
\end{table}
\begin{table}[!t]
\centering
\caption{ Improvement in the representation compactness at sites 1 \& 2 }
\label{table:results-2}
\setlength{\tabcolsep}{4.5pt}
{\scriptsize
\begin{tabularx}{1.0\linewidth}{lccccc}
\toprule
& & \multicolumn{4}{c}{ (\# of detected objects) } \\
Instruction & Site & B & OF & AP & OF+AP \\
\midrule
``go to the farthest umbrella in the hallway'' & 1 & 37 & 4 & 2 & 2 \\
``go to the nearest suitcase in the parking lot'' & 2 & 36 & 3 & 3 & 2 \\
``go to the farthest cup in the kitchen'' & 1 & 37 & 29 & 11 & 9 \\
``go to the nearest keyboard in the office'' & 2 & 36 & 29 & 3 & 3 \\
``go to the nearest ball in the hallway'' & 1 & 37 & 4 & 1 & 1 \\
``go to the farthest ball in the lab'' & 2 & 36 & 7 & 7 & 7 \\
\bottomrule
\end{tabularx}}
\end{table}
To explore the impact of observation filtering, we evaluated the runtime performance of perception on the same six instructions explored for the adaptive perception experiment. Table~\ref{table:results-1} presents the results that reveal the impact of observation filtering (OF) against the standard baseline (B). This result demonstrates how removing observations inferred to be unnecessary to extract the meaning of the natural language instruction can improve the runtime performance of robot perception. The results demonstrate a $55\%$ reduction in runtime for the instruction ``go to the nearest suitcase in the parking lot'' over the baseline. The improvement is a function of the diversity of scene labels across all observations. Table~\ref{table:results-2} shows the impact of observations filtering on the compactness of the approximated world representation. In this case the improvement is a function of the distribution of objects across different regions in the world.
\subsection{Observation Filtering with Adaptive Perception}
\begin{figure*}[!t]
\centering
\subfigure[exhaustive environment model]{\includegraphics[width=0.325\linewidth]{figs/fig8.png}\label{fig:exhaustive-map-ttic}}
\subfigure[semantic scene labels]{\includegraphics[width=0.325\linewidth]{figs/fig9.png}\label{fig:scene-labels-ttic}}
\subfigure[compact environment model inferred for the command ``drive to the farthest cup in the kitchen'']{\includegraphics[width=0.325\linewidth]{figs/fig10.png}\label{fig:compact-map-ttic}}
%
\subfigure[exhaustive environment model]{\includegraphics[width=0.325\linewidth]{figs/fig4.png}\label{fig:exhaustive-map-roc}}
\subfigure[semantic scene labels]{\includegraphics[width=0.325\linewidth]{figs/fig5.png}\label{fig:scene-labels-roc}}
\subfigure[compact environment model inferred for the command ``drive to the nearest ball in the lab'']{\includegraphics[width=0.325\linewidth]{figs/fig6.png}\label{fig:compact-map-roc}}\\
%
\caption{A visualization of environment representations for Site 1 (top) and Site 2 (bottom). The renderings in \subref{fig:scene-labels-ttic} and \subref{fig:scene-labels-roc} depict the scene labels. The standard approach of employing all observations and object classifiers results in \subref{fig:exhaustive-map-ttic}, \subref{fig:exhaustive-map-roc} an exhaustive representation of the environment. In contrast, inferring the set of observations and detectors relevant to the command yields \subref{fig:compact-map-ttic}, \subref{fig:compact-map-roc} compact environment models that afford more efficient grounding.}
\label{fig:observation-filtering-navigation-results}
\end{figure*}
The last model that we considered combines observation filtering with adaptive perception. The results in Table~\ref{table:results-1} show the improvement of observation filtering with adaptive perception (OF+AP) against the standard baseline (B). As expected, combining both of these approaches reduces the time required to extract a suitable world model for natural language symbol grounding in all six scenarios. An example is depicted in Figure~\ref{fig:observation-filtering-navigation-results}. In the best case, we observed a $90\%$ improvement in runtime performance for the instruction ``go to the nearest ball in the hallway.'' Table~\ref{table:results-2} lists the number of objects extracted by the perception pipeline. Reducing the number of objects significantly improves the runtime of symbol grounding, which is at best linear~\cite{howard14,chung15} and at worst exponential~\cite{tellex11} in the size of the world model.
\section{Conclusions} \label{sec:conclusions}
In this paper, we presented a novel framework that improves the efficiency of natural language understanding by generating and reasoning over a compact, instruction-specific world model. Underlying the framework are three primary methods that exploit the structure of language to facilitate inference. First, we use language reduce the set of all observations available to the robot by extracting semantic labels for the context in which the salient observations occur. Second, language is used to infer a subset of perceptual classifiers that extract a compact but sufficiently complex environment model that is suitable for interpreting the meaning of the instruction. Third, language is used in the context of the compact environment representation to infer the symbolic meaning of the instruction. Experimental results demonstrate how adaptive perception and observation filtering improve the computational efficiency of inference without affecting the accuracy of language grounding. In ongoing work, we are exploring methods to improve the robustness of semantic label classification for observations, including per-pixel semantic classification approaches.
This work also presents a number of interesting areas of future research. In the examples considered here, we did not exploit prior knowledge about the environment. However, one can easily extrapolate how using past compact representations to seed future models might mitigate the need to re-classify all objects for every instruction. A model that does not discard the information, but incrementally builds a rich spatial-semantic environment model over time is likely to be highly effective and efficient for human-robot interaction in complex environments with diverse tasks. Training and evaluating the performance of language models that use corpora collected from studies involving human-robot interaction and more complex tasks, robots, and environments that exploit differences in scale remain as future work. Such additional experiments would further characterize the performance of the proposed model and enrich our understanding of how to best construct efficient, hierarchical representations of environments for multi-modal human-robot interaction.
\section{Acknowledgements} \label{sec:acknowledgements}
This work was supported in part by the National Science Foundation under grants IIS-1638072 and IIS-1637813, by the Robotics Consortium of the U.S. Army Research Laboratory under the Collaborative Technology Alliance Program Cooperative Agreement W911NF-10-2-0016, and by ARO grants W911NF-15-1-0402 and W911NF-17-1-0188.
\bibliographystyle{IEEEtranN}
{\small
|
1,314,259,995,048 | arxiv | \section{Introduction}
Data can be broadly categorized into two categories i.e. Structured Data and Unstructured Data. Structured data can be defined as text which consists of certain patterns and is highly organized. Since structured data has a defined outline and framework, machines can search and navigate through it with ease. Example of such data would be finance account number, date formats etc. Unstructured data as the name suggests, although present in abundance is very difficult to process as it does not conform to given set of rules. Examples of such data would be product reviews on e-commerce, emails etc. Structured data analysis has become a mature industry today.
Analysis of unstructured data which comprises of 80\% of enterprise data is where the actual challenge lies and the latest trend concentrates on exploiting this resource. Unstructured text contains huge amounts of unrelated and diverse information with no framework or outline for machines to be able to identify any patterns or structure in order to locate the said information. As far as unstructured text on mobile devices is concerned, it turns out that users store even more random information in the form of such text, for e.g.: passwords, otps, blog texts, to-do lists, emails, drafts for speeches, etc. This results in data of manifold nature with varied forms and lengths of text.
Our proposed system draws on knowledge of concepts, encoded in a hierarchical common-sense knowledge database known as ConceptNet\footnote{http://conceptnet.io/} to provide enhanced tag extraction capabilities. Our approach uses Deep Learning to provide abstractive extraction of concepts by using knowledge graph embeddings to extract tags from keywords while ensuring on-device efficiency by keeping the entire pipeline computationally inexpensive. Before using the knowledge graph CNN we also use Part of Speech (POS) to extract words which are nouns and proper nouns which are further fed as input to our model. Apart from these, we have also proposed a custom ranking algorithm to extract the top n tags generated from the given data.
The remaining part of the paper is organized in the following
manner: Section II talks about the related works and how
our work differs from them; Section III describes the overall
pipeline model and the techniques employed; Section IV talks
about the datasets used to either evaluate the performance of this pipeline or used as a part of this pipeline; Section V provides the experiments conducted; Section VI talks about the methods with which our pipeline has been compared to; Section VII shows the results obtained after experimentation; Section VIII talks about the applications of this pipeline in real world scenarios and Section IX finally concludes the paper and lists down some improvements which could be researched in future.
\section{Related Work}
Keyword extraction is an important task in the area of text mining. Extracting a small set of keywords from a text or document can help in various tasks in understanding the document \cite{hulth2006study}.
None of the prior works, to the best of our knowledge, have shared results on user Notes application which is one of the most prominent sources of unstructured text on-device. Additionally, our work targets predicting results with an entirely on-device pipeline. This we considered necessary so that the user privacy is maintained by not uploading his personal data on any server.
\begin{figure}
\centering
\includegraphics[width=\linewidth]{Flow_Document_g1.png}
\caption{Proposed System}
\label{fig:method}
\end{figure}
Several previous works have approached keyword extraction from short text using various statistical approaches such as TF-IDF or Bag of Words on features extracted from text. Many of these works focus in methods of selecting alternative input features. These approaches mostly rely on word frequencies and the keywords extracted are not always relevant to the user. Furnkranz et al. (1998) \cite{furnkranz1998case} uses all noun phrases matching any of a number of syntactic heuristics as features. Aizawa (2001) \cite{aizawa2001linguistic} extracts POS entities, by matching pre-defined patterns. Their representation shows a small improvement in results. In these works, it is unclear how many keywords are extracted.
Witten et al. \cite{witten2005kea} use a key phrase extraction algorithm, called KEA, based on Naive Bayes algorithm. Their algorithm learns a model for identifying extracted keywords during training, which is then applied to finding keywords from new documents. Tang et al. (2004) \cite{tang2004loss} also apply Bayesian decision theory for keyword extraction using word linkage information and thus using context features. However, these methods limit themselves to extracting keywords present in the text, and cannot extract keywords or tags based on the concept(s) present in the text.
Another interesting approach is depicted in Sahlgren and Coster (2004) \cite{sahlgren2004using} where they compute a concept-based representation from word co-occurrence data, which is combined with full-text representation. They show that this combination improved performance for their task of text categorization. Some other approaches \cite{ko2004improving} also make use of text summarization methods to find sentences containing relevant keywords. Then they use a scoring mechanism to give these sentences higher weight in their feature vectors. In this paper, we propose a fast, novel system for on-device extraction of keywords and generation of tags for unstructured text which generates tags from entities and concepts present in the text, and ranks those in order to enhance user experience.
\section{Proposed System}\label{sec:PS}
Fig. \ref{fig:method} shows the pipeline of the proposed system. As we can see, an unstructured text is sent as input to a POS Tagger from which a set of entities are extracted. Depending on whether those set of entities are present in the knowledge base or not, a set of similar entities is obtained. Finally, these set of entities are passed to a graph CNN model to extract the relevant tags in the form of keywords and concepts. Once these tags have been extracted, it is passed to a custom ranking method which reorganizes these set of tags on the basis of their priority. The in depth details of each component of the pipeline are mentioned in the coming sub-sections.
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{pos_g1.png}
\caption{Bi-LSTM + CRF with Character and Word (GloVe) embeddings}
\label{fig:pos2}
\end{figure}
\subsection{Part Of Speech Tagging}
For building a POS model, a model similar to Lample et al. \cite{lample2016neural} and Ma and Hovy \cite{ma2016end} is used. Firstly, a Bi-LSTM layer is trained to get character embeddings from the train data. This gives a character-based representation of each word. Next this is concatenated with standard GloVe (50 dimension vectors trained on 6 billion corpus of Wikipedia 2014 and Gigaword5) word vector representation. This gives us the contextual representation of each word. Then, a Bi-LSTM is run on each sentence represented by the above contextual representation. This final output from the model is decoded with a linear chain CRF using Viterbi algorithm. For on-device inference, the Viterbi decode algorithm is implemented in Java to be run on android devices and get the final output. The model is quantized to reduce its size and make it feasible for on-device requirements.
\begin{figure*}
\centering
\includegraphics[width=0.8\linewidth]{Architecture_g.png}
\caption{Architecture of the Knowledge Graph CNN}
\label{fig:Architecture}
\end{figure*}
We used the tagged dataset from the CoNLL-2003 shared task for training of the above neural network. The model uses word embeddings of dimension 100, character embeddings of dimension 50 and has 100 LSTM units. The final POS model used on-device had an accuracy of 97.21\% on the test dataset. When an input text is passed to the POS model, the extracted proper nouns are added to the final set of tags. The verbs are lemmatized and passed alongside the nouns to the neural network for inferencing concepts from the commonsense based knowledge graph. Fig. \ref{fig:pos2} shows the architecture of the POS model used.
\subsection{CNN-based Knowledge Graph Learning}\label{sub:CNNKB}
Our approach uses a CNN-based knowledge graph model as explained in Feipeng Zhao et al \cite{zhao2017convolutional}. In this architecture, both embedding and CNN based score function are unknown. The model develops the entity and relation embeddings whilst also learning the knowledge graph structure using triplets (h, r, t) where h and t are the head and tail entities and r is the relationship between them.
Given any (h, r, t), the three embedding vectors are stacked over one another and 3 x 3 kernels are used for convolution over the combined matrix of size 3 x [embedding dimension]. CNNs when applied on images go through rows and columns on the image pixels but in our case they go over the locally connected structure of the head, relation and tail together. The CNN output is then passed to the max-pooling layer to get subsamples. The max-pooling filter size is set to 1 x 2 with stride as 2. Dropout is added for regularization. The dropout probability is set to 0.5 during training. The final layer of the network is a logistic-regression layer. Positive (Correct) triplets have score 1 and Negative (Incorrect) triplets have score 0. The final negative score is a tanh activation of the regression layer. The loss function is given by the formula
\begin{equation}
\sum_{(h,r,t)\epsilon S}\sum_{(h',r,t')\epsilon S'_{(h,r,t)}}
[\gamma + cnn(h,r,t) - cnn(h',r,t')]_{+}
\end{equation}
where \emph{h} is the head entity, \emph{r} is the relation, \emph{t} is the tail entity, \emph{h'} is the corrupted head entity, \emph{t}' is the corrupted tail entity, \emph{S} is the set of golden triplets, \emph{S'} is the set of corrupted triplets, $\gamma$ is the margin hyperparameter of the network, \emph{cnn(h, r, t)} is the score of a golden triplet and \emph{cnn(h',r,t')} is the score of a corrupted triplet.
Mini-batch stochastic gradient descent is used as an optimizer for the loss function. Also, we require negative sampling in order to calculate the score for each positive triplet. The embedding and CNN parameters are initialized with random values. Training is fixed at a certain number of epochs based on the size of dataset used. The architecture is shown in Fig. \ref{fig:Architecture}
The training data (knowledge graph) provided to this model is filtered from the vast ConceptNet dataset as explained in Section \ref{sec:Datasets}. Our knowledge graph contains triplets of a summarizing nature and is specifically filtered for this task of generating concepts from unstructured text. Other methods use standard datasets for training and validation sets but this task required the creation of a hierarchical knowledge graph which we split in a 9:1 ratio during the model training phase.
The purpose of using a knowledge graph to generate tags is to ensure that the approach is not confined to the input text. The knowledge graph facilitates real world knowledge being applied to the extraction process to emulate human behaviour when trying to understand the same input text. Another reason for using a CNN based learning method is that this pipeline was designed for on-device inference where such models are feasible and efficient.
\subsection{Entity Similarity Module}\label{sub:Similar}
Due to the architecture being deployed on-device, the constraints of model size and inference time are strict. This results in restrictions on how deep the CNN architecture can be since heavy model sizes prevent on-device deployment. This necessitated the use of GloVe embeddings to find similar words to entities outside the knowledge graph in order to be able to incorporate such entities. If an entity outside the vocabulary of the knowledge graph is encountered, we extract words similar to the entity in question using cosine similarity.
Table \ref{tab:Metrics} in Section \ref{sub:Model Parameters} shows the various on-device metrics of the models that have been experimented with and establishes the need for this alternative approach to incorporating large knowledge graphs on-device.
\subsection{Concept Selection Module}\label{sub:Selection}
When a word from a given text is passed through Concept-
Net, it gives a number of concepts corresponding to
that word. For example when we pass the word car through ConceptNet,
we get concepts such as \emph{artifact}, \emph{tool}, \emph{vehicle}, \emph{item}, \emph{machine}.
Most of these concepts are generally irrelevant with respect
to the general context of the text. Hence, in order to choose
the most appropriate concept, we calculate context factor. If
$c_i$ represents a concept from set of extracted concepts for a
word, $w_j$ represents an output of POS, \emph{tf($w_j$)} represents term
frequency of word $w_j$ and \emph{N} represents the length of text, then
context factor \emph{$L_{contx}(c_i)$} can be defined as
\begin{equation}
L_{contx}( c_{i}) =\frac{1}{N} \ \sum _{j} tf( w_{j}) \ *\ CosineSim( w_{j} ,c_{i})
\end{equation}
where \emph{CosineSim} is cosine similarity between word $w_j$ and
concept $c_i$ is calulcated using GloVe embeddings.
We choose the concept $c_i$ with maximum \emph{$L_{contx}$} value
as the most appropriate concept for a given word $w_i$. This
context factor helps us in analysing the general context of the
entire text while selecting a concept for a word. For example,
consider the text “Typically, the driver is responsible for all
damage to the car during the tenure of the lease, even if they
are not at fault.” In this text, \emph{$L_{contx}$(vehicle)} is maximum with
value 0.70 for the word car. But for the sentence “Machine was a very popular term in my family but car was the first machine that actually caught my imagination and I can safely say that it is my favorite machine till date.”, \emph{$L_{contx}$(machine)} is maximum with value 0.55 and hence becomes the extracted concept for the word car.
\subsection{Ranking Of Tags}
There can be a scenario that for considerably long unstructured text, we may end up extracting large number of tags, say up to 20-30 tags which can in turn prove to be another form of clutter for the user. Hence, in order to enhance user experience it is utterly important to rank and select only a handful of extracted tags for any given text. In this section, we present a custom ranking algorithm and later we also present evidence in the form of results obtained on various datasets as a justification for the hypothesis on which this algorithm is based on.
The hypothesis on which the algorithm is based is that if the tag generating word is found in the vicinity of a large number of other tag generating words for an input text, it will be given a higher priority while ranking the tags. A tag generating word is simply a word from which a tag is extracted. On the basis of our hypothesis, we calculate a ranking factor RF given by the equation
\begin{equation}
RF_{t_{j}}( t_{i}) =\sum _{j}\frac{C( w_{i} ,w_{j})}{F( w_{i}) *F( w_{j}) *\alpha }
\end{equation}
where \emph{C($w_i, w_j$)} is the co occurrence of words from which tags $t_i$ and $t_j$ have been extracted for each sentence, \emph{F($w_i$)} and \emph{F($w_j$)} are the frequencies of words $w_i$ and $w_j$ in the unstructured text and $\alpha$ is the average number of words occurring in the unstructured text between $w_i$ and $w_j$ plus 1. $RF_{t_j}$\emph{($t_i$)} is the ranking factor of tag $t_i$ with respect to tag $t_j$ . The tags are then ranked in descending order of \emph{RF} values.
\begin{table}
\centering
\caption{Table for 5 $\times$ 5 Ranking matrix}
\resizebox{\columnwidth}{!}{\begin{tabular}{|c|c|c|c|c|c|}
\hline
\textbf{} & change & vehicle & contract & responsibility & payment \\ \hline
change & 1.0 & 0.11 & 0.04 & 0.02 & 0.06 \\ \hline
vehicle & 0.11 & 1.0 & 0.13 & 0.04 & 0.02 \\ \hline
contract & 0.04 & 0.13 & 1.0 & 0.07 & 0.08 \\ \hline
responsibility & 0.02 & 0.04 & 0.07 & 1.0 & 0 \\ \hline
payment & 0.06 & 0.02 & 0.08 & 0 & 1.0 \\ \hline
\end{tabular}}
\label{tab:Ranking}
\end{table}
In our custom ranking method, the co-occurrence value in the equation is determined by calculating the number of sentences in which both words $w_i$ and $w_j$ are found. The frequency for words $w_i$ and $w_j$ are calculated by taking the complete unstructured text into consideration. Another factor $\alpha$ is present which adds extra weightage to the extracted tags. This factor accounts for the distance between the words that generate tags $t_i$ and $t_j$ from the knowledge graph embeddings. The distance measure can be defined as the number of words between the words in the unstructured text from which tags $t_i$ and $t_j$ are generated. Since, our hypothesis is based on giving highest priority to a tag which occurs in the neighborhood of most of the other tags, this factor helps in achieving the same.
Here is a small example explaining the working of this algorithm. Consider the note “Typically, the driver is responsible for all damage to the car during the tenure of the lease, even if they are not at fault. Your own insurance may apply to pay for the damage. Also, the credit-card you used to pay for the lease may have supplemental insurance for damage to the car.” After this text is passed through our pipeline, the tags extracted are \emph{responsibility}, \emph{contract}, \emph{payment}, \emph{vehicle} and \emph{change}. For visualization, we construct a 5X5 ranking matrix, calculating the relatedness of these extracted tags as shown in Table \ref{tab:Ranking}.
Finally, considering the values in the ranking matrix, the pairs $w_{i}\rightarrow t_{i}$ are ranked as
lease$\rightarrow$contract \textgreater \ car$\rightarrow$vehicle \textgreater \ damage$\rightarrow$change \textgreater \ pay$\rightarrow$payment \textgreater \ fault$\rightarrow$responsibility. Here $w_i$ is the word in the input text and $t_i$ is the extracted tag.
\section{Datasets}\label{sec:Datasets}
The dataset used for training the Convolutional Neural Knowledge Graph Learning model is ConceptNet.
The ConceptNet knowledge graph contains triplets\emph{(h, r, t)}
from various languages with a huge variety of concepts. Due
to on-device constraints, the entire ConceptNet dataset is too
vast to be inferred from. As a result of which we created
our own pruned ConceptNet dataset. We used a set of rules
in order to finally arrive at our filtered ConceptNet The first
filter we added to select a smaller set of data was to only
select those triplets that were in the English language. Another
selection technique we used was to select relationships \emph{(r in
(h, r, t))} that were such that the head entity is a superset
or parent of the tail entity. In order to ensure that the tags
extracted from unstructured textual data are of a summarizing
nature, we added this constraint. The 4 relations we used to
extract the triplets were \emph{IsA}, \emph{DerivedFrom}, \emph{InstanceOf} and
\emph{PartOf}. Other relations in the knowledge graph that were
of a slightly less summarizing nature were ambiguous and
were dropped. The ConceptNet knowledge graph also incorporated some
DBpedia relations that were filtered out since they were not
that relevant with respect to our work. This narrows down
the dataset to a few hundred thousand triplets. But this is
still too vast to be inferred from an on-device perspective
due to the model being around 200 MB after quantization
and compression. Therefore, we decided to manually select a
smaller dataset of most commonly used and relevant concepts
from the knowledge graph. This results in a dataset of around
15K triplets which reduced the model size to 2 MB after
quantization and compression.
Apart from the dataset used for
training the Graph-CNN, we have used open source datasets
of Amazon Reviews\footnote{https://nijianmo.github.io/amazon/index.html\#files} and Enron Emails\footnote{https://www.kaggle.com/wcukierski/enron-email-dataset} for benchmarking
our proposed system. We also used a dataset of user Notes application to evaluate the feasibility of the proposed pipeline. The
Amazon Review dataset consists of short and long texts of
user reviews on various shopping categories. The Enron Email
dataset contains emails generated by employees of the Enron
Corporation. The Notes application dataset consists of Notes of variable lengths ranging from short to-do lists to
lengthy email drafts.
\section{Experiments}\label{sec:Experiments}
\subsection{Evaluation Metric for Quality of Tags}
Since the tags extracted from our text contain mostly of
concepts which are not exact same words present in our text,
we cannot use gold standard datasets to compare our method.
Another comparison method involves annotators judging the
most appropriate tags for a given piece of text but this ends
up incorporating a bias towards the authors’ own methodology
and we clearly wanted to avoid that. Inspired by Bellaachia et al. \cite{bellaachia2012ne}, we introduce
a new way to compare the quality of tags generated by various
methods. We use volume of Google search query to get an idea
about the popularity of a tag extracted.
The rationale behind using this approach is that if a keyword
is more frequently used by the masses, it must have more
significance while representing a piece of text. On an average
our method generated 9-11 tags per test sample in the datasets
mentioned in the above section. We randomly selected 5
tags extracted by our method. We then sorted them according to their
popularity and compared their search volumes one on one with
that of 5 random tags extracted from top 10 tags generated
by the given 3 methods. For comparison purposes, we made
sure we were not comparing Proper Nouns which would be
nothing but some entity names. We use Word Tracker\footnote{https://www.wordtracker.com/} to get the volume of extracted keywords and thereafter go ahead with
comparisons.
Let $t_{correct}$ be the number of keywords for a given method
which has more popularity then keywords extracted by other
methods and $t_{extracted}$ be the total number of keywords extracted
(which in our case is 5 for each sample text), \emph{Precision}
can be defined as
\begin{equation}
Precision =\frac{t_{correct}}{t_{extracted}}
\end{equation}
The comparison results of our pipeline with respect to
methods discussed in the above section are shown in Table \ref{tab:Results}
\subsection{Evaluation Metric for Quality of Tags}
We again use the volume of Google search query for the
extracted 5 tags as a measure to rank them. If a keyword
or tag is more widely searched on the internet it’s word co-occurrence
factor on which most of the ranking algorithms
are based must be of high significance for any given piece
of text. As discussed in Bellaachia et al. \cite{bellaachia2012ne} we use Binary Preference
Measure\emph{(BPM)} for calculating rank of extracted keywords. The Binary Preference Measure or BPM can be calculated as
\begin{equation}
BPM=\frac{1}{|T|} \ \sum _{t\in T} 1-\frac{|n\ ranked\ higher\ than\ t|}{|M|}
\end{equation}
where T is the set of correct tags within the set M of
tags extracted by a method and t is a correct tag
and n is an incorrect tag.
\subsection{Model Parameters}\label{sub:Model Parameters}
\begin{table}
\centering
\caption{GRAPH CNN MODEL METRICS}
\begin{tabular}{|l|l|l|l|l|}
\hline
\begin{tabular}[c]{@{}l@{}}\textbf{No. of Entities in} \\ \textbf{Knowledge Graph} \end{tabular} & \begin{tabular}[c]{@{}l@{}}\textbf{No. of} \\ \textbf{Triplets}\end{tabular} & \begin{tabular}[c]{@{}l@{}}\textbf{Model Size}\end{tabular} & \textbf{Parameters} \\ \hline
7077 & 13000 & 5 MB & \begin{tabular}[c]{@{}l@{}}Number of Nodes \\ in Final Layer = \\Number of Entities \\ in Knowledge Graph\end{tabular} \\ \hline
7077 & 13000 & 2 MB & \begin{tabular}[c]{@{}l@{}}Size of Fully \\ Connected Layer = \\ Half the size of \\ Pooling Layer\end{tabular} \\ \hline
166554 & 50000 & 188 MB & \begin{tabular}[c]{@{}l@{}}Two Convolutional \\ Layers\end{tabular} \\ \hline
\end{tabular} \\
\label{tab:Metrics}
\end{table}
Our Graph-CNN model uses Adam \cite{kingma2014adam} to optimize and
learn all the parameters. In our model, we can set the width
of convolutional kernels with different size, for simplicity we
fixed the kernel size as 3x3. When using pairwise ranking loss
to learn CNN, we fixed the margin value as 1. The learning
rate in our model is fixed as 0.001. Epoch number is set as 500
for ConceptNet dataset of 13-15K triplets. We use the negative
sampling method as explained in Section \ref{sub:CNNKB}. The batch size
of triplets for mini batch Stochastic Gradient Descent is set to
500. The embedding dimension is set to 200. The dissimilarity
distance measure used is the L1 norm. The evaluation triplet
size is set to 500. The number of filters used for Convolution
is set to 8. The dropout keep probability is set to 0.5.
\begin{table}
\centering
\caption{ENTITY SIMILARITY MODULE IMPACT METRICS}
\begin{tabular}{|l|l|l|}
\hline
\textbf{Dataset} &
\begin{tabular}[c]{@{}l@{}}\textbf{Out of Vocabulary} \\ \textbf{Entities (per Test Sample)} \end{tabular} &
\begin{tabular}[c]{@{}l@{}}\textbf{Average Length} \\ \textbf{of Each Sample} \\ \textbf{(No. of Words)} \end{tabular} \\ \hline
Amazon Reviews & 2.2 & 39 \\ \hline
Enron Emails & 3.4 & 57 \\ \hline
Notes & 2.8 & 65 \\ \hline
\end{tabular}
\label{tab:EntitySim}
\end{table}
The on-device metrics for different graph CNN models
while experimenting in terms of number of triplets are listed
in Table \ref{tab:Metrics}. Model size and vocabulary length are essential
metrics that need to be taken into consideration when deploying
the model on mobile devices. As we can clearly see
from Table \ref{tab:Metrics}, the size of graph CNN model trained with
166554 entities is around 188 MB which is not at all feasible
from on-device perspectives. This is the reason we went ahead
with lightweight model along the Entity Similarity Module.
As mentioned in Section \ref{sub:Similar}, for developing a set of similar
entities in order to deal with entities outside the knowledge
graph, we optimally chose a similarity score threshold of 0.7
based on trial and error. Table \ref{tab:EntitySim} showcases the effectiveness
of our Entity Similarity Module. It shows the average number
of entities detected outside knowledge graph across all our chosen datasets.
\section{Methods For Comparison}
We used the following 3 methods for comparison with our
proposed system:
\subsection{Topic Modelling using Latent Dirichlet Allocation}
Latent Dirichlet Allocation(LDA) \cite{blei2003latent} is a generative statistical
model used in natural language processing. In Topic Modelling, it explains topics by using unobserved clusters of words which explain reasons behind some parts of data being
similar.It is an unsupervised learning model that clusters
similar groups of observations. It posits that each document is
a mixture of a small number of topics/concepts and that each
observation’s presence is attributable to one of topics of that
specific document. For our comparisons we set the number of topics to 1
and extract the top relevant keywords representing that topic.
\subsection{Automatic Summarization using Text Rank Algorithm}
Automatic Summarization is the process of computational
reduction/shortening of data in order to create a synopsis
containing highly relevant and important information whilst
abstracting the unnecessary aspects of the larger data. For
example, finding the most informative sentences from a news
article, the most representative images from a collection of
images or even the most important frames in a video fall under
the umbrella of automatic summarization.
Text Rank(TR) \cite{mihalcea2004textrank} is an unsupervised approach to automatic
summarization of text. It is a graph-based ranking
algorithm used in natural language processing. We use the
default parameters for candidate parts of speech and case of
input text and a window size of 4.
\subsection{Rapid Automatic Keyword Extraction(RAKE)}
RAKE \cite{rose2010automatic} is a popular keyword extraction technique in
natural language processing. It involves using lists of stopwords
and phrase delimiters to extract the most relevant
keywords in textual data. Python implementation of RAKE
in the rake nltk library was used with default parameters for
comparison experiments.
Common methods such as TF-IDF (Term Frequency-Inverse Document Frequency) or Bag Of Word models have not been compared with due to the length of the input texts being relatively shorter. Generating an appropriate IDF score or vocabulary for comparison would require a substantial amount of relevant text. Therefore, taking into account the average length of input texts in our specific case, we choose to not compare with such methods.
\section{Results}
Tags are extracted by the model on a set of 1500 test
samples across 3 different datasets and the evaluation metrics
mentioned in Section \ref{sec:Datasets} are used to calculate results. The
precision and BPM of the conducted experiments are shown in
Table \ref{tab:Results} and the on-device inference times and model sizes are
shown in Table \ref{tab:ondevice}. The on-device metrics have been calculated
using Samsung’s Galaxy A51 with 4 GB RAM and a 2.7 Ghz
octa-core processor.
The results clearly show an improvement in both \emph{Precision}
and \emph{BPM} on the serving data and give a quantitative perspective
to the outcomes of our proposed approach.
Apart from these results, our proposed system demonstrates
efficiency with respect to device based computational restrictions.
Our entire pipeline’s size is restricted to just around 30 MB with
inference time being as low as 670 ms. An important thing
to note here is that the overall pipeline’s size and inference
timing is more than the sum of components mentioned in
Table \ref{tab:ondevice} because of presence of additional resources like GloVe
embeddings which are used across multiple components.
\begin{table}
\centering
\caption{Results across the three datasets}
\resizebox{\columnwidth}{!}{\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
& \multicolumn{2}{|l|}{\textit{Enron Email}} & \multicolumn{2}{|l|}{\textit{Amazon Reviews}} & \multicolumn{2}{|l|}{\textit{Notes}} \\ \hline
\textbf{Methods} & \textbf{Precision} & \textbf{BPM} & \textbf{Precision} & \textbf{BPM} & \textbf{Precision} & \textbf{BPM} \\ \hline
LDA & 0.13 & 0.32 & 0.19 & 0.26 & 0.18 & 0.24 \\ \hline
TR & 0.26 & 0.43 & 0.29 & 0.41 & 0.33 & 0.44 \\ \hline
RAKE & 0.24 & 0.38 & 0.31 & 0.33 & 0.27 & 0.38 \\ \hline
Proposed System & 0.47 & 0.57 & 0.42 & 0.46 & 0.49 & 0.43 \\ \hline
\end{tabular}}
\label{tab:Results}
\end{table}
\begin{table}
\centering
\caption{On-Device Inference Times and Model Sizes}
\begin{tabular}{|l|l|l|}
\hline
\textbf{Component} & \textbf{Size (MB)} & \textbf{Inference Time per sample (ms)} \\ \hline
POS & 8 & 60 \\ \hline
Graph CNN & 2 & 500 \\ \hline
Proposed System & 31 & 670 \\ \hline
\end{tabular}
\label{tab:ondevice}
\end{table}
\begin{figure*}
\centering
\includegraphics[width=0.95\linewidth]{app_flow_color.png}
\caption{Application Content Presentation}
\label{fig:Appflow}
\end{figure*}
\section{Applications}
\label{sec:App}
An arbitrary search for on-device Note taking applications
on the web will list down 25 to 30 such applications, thus
providing strong evidence about the utility and significance of Notes application in the modern world smartphones. From To-Do lists to email drafts, key conversations and blog snapshots, everything can be stored as a Note. Thus, Notes are a form of text that cannot be expected to have any kind of structure whatsoever and are bound to have enormous variations depending on multiple factors associated with the user. Unstructured text in Notes may or may not have punctuation marks, correct sentence formation or correct grammar.
Recently, there have been many developments in the field of Notes applications ranging from automatic detection of list type of Notes to Notes containing images but none of these new features actually address the problem of cluttering of data. In this section we show one of the ways in which this cluttering of data in Notes application can be handled using our proposed work. Fig. \ref{fig:Appflow} shows on-device screenshots which can significantly enhance user experience while navigating through Notes application. Initially in step 1, the user uses one of the querying keywords to search for the desired Note. Then in step 2, all the Notes which are indexed by the search made by the user are displayed with their set of tags extracted using our pipeline in a summarized manner. All the Notes which are indexed in step 2 are selected if there is a match between the querying keyword in step 1 and tags being displayed for each Note in step 2. Finally in step 3, user can select its desired Note out of all the indexed Notes in step 2. This is just one of the ways in which content presentation with our pipeline running in the background can be done but there can be other better ways to render the content as well.
\section{Conclusion and Future Work}
Unstructured text is a special type of text having no defined
format or pattern. Generating relevant tags in the form of concepts and keywords
from unstructured text, therefore, cannot involve the use
of contextual semantics usually associated across entire text. Thus
our proposed pipeline uses word level dynamics to extract concepts and keywords from unstructured textual data. Also, because of
disorganized nature of unstructured data, extracted tags can prove really helpful while navigating through
such text. The most popular application targeted for on-device
usage of the proposed pipeline is Notes application. With recent
developments in device based Note taking applications, our
proposed pipeline with on-device feasibility can play a vital
role in enhancing user experience as we have seen in Section \ref{sec:App}.
One of the areas where
we can significantly improve is by analysing multiple input
data formats such as images, texts, audio etc. at the same
time. Multiple input data formats can give us a better context
and thus extracting a more subtle set of tags. Analysing these
multiple input formats would require techniques such as OCR
or speech recognition depending on the input provided by the
user.
\section{Acknowledgement}
The authors would like to thank all the users who contributed
in Notes application data collection. The authors would
like to express their gratitude towards all the reviewers who
have given constructive feedback to improve the paper.
\bibliographystyle{IEEEtran}
|
1,314,259,995,049 | arxiv | \section{Introduction}
The operator homology\let\thefootnote\relax\footnotetext{2010 \e{Mathematics Subject Classification:} Primary 46L07 46H25 46M10, Secondary 46M18 43A15.} of the Fourier algebra $A(G)$ of a locally compact group $G$ has been a topic of interest in abstract harmonic analysis since Ruan's seminal work \cite{Ru3}, where, among other things, he established the equivalence of amenability of $G$ and operator amenability of $A(G)$. From the perspective of Pontryagin duality, this result is the dual analogue of Johnson's celebrated equivalence of amenability of $G$ and (operator) amenability of $L^1(G)$ \cite{John}. In much the same spirit, dual analogues of various homological properties of $L^1(G)$ were established within the category of operator $A(G)$-modules, including the operator weak amenability of $A(G)$ \cite{Sp}, and the equivalence of discreteness of $G$ and relative operator biprojectivity of $A(G)$ \cite{Ar2,Wo}.
Continuing in this spirit, Ruan and Xu (implicity) showed that $A(G)$ is relatively operator 1-projective whenever $G$ is an IN group (see also \cite{FLS}), and that $A(G)$ is relatively operator 1-flat whenever $G$ is inner amenable \cite{RX}. In this paper, we establish the converse of both of these results, and exhibit the first known class of groups for which $A(G)$ is not relatively $C$-flat for any $C\geq1$. Along the way, we show that inner amenability passes to closed subgroups, and answer an open question of Lau and Paterson \cite{LP1}.
The relative operator biflatness of $A(G)$ has been studied by Ruan and Xu \cite{RX} and Aristov, Runde, and Spronk \cite{ARS}, where it was shown (by different methods) that $A(G)$ is relatively operator biflat whenever $G$ is QSIN, meaning $L^1(G)$ has a quasi-central bounded approximate identity (see \cite{ARS,LR,St2}). The approach of Aristov, Runde, and Spronk is via approximate indicators, where they show that $A(G)$ is relatively operator $C$-biflat whenever the diagonal subgroup $G_\Delta\leq G\times G$ has a bounded approximate indicator in $B(G\times G)$ of norm at most $C$. One of the main results of this paper establishes the converse when $C=1$, that is, $A(G)$ is relatively operator 1-biflat if and only if $G_\Delta$ has a contractive approximate indicator in $B(G\times G)$. Recalling that $A(G)$ is operator amenable precisely when $A(G\times G)$ has a bounded approximate diagonal \cite{Ru3}, we see that $A(G)$ is relatively operator 1-biflat precisely when $A(G\times G)$ has a contractive approximate diagonal in the \e{Fourier--Stieltjes algebra} $B(G\times G)$, a result which elucidates the relationship between operator amenability and relative operator biflatness for $A(G)$, and for completely contractive Banach algebras more generally.
We conjecture that relative operator $1$-biflatness of $A(G)$ is equivalent to the QSIN condition, and we verify the conjecture in many special cases. For a discrete group $H$ acting ergodically by automorphisms on a compact group $K$, we also establish a connection between relative operator biflatness of $A(K\rtimes H)$ and the existence of $H$-invariant means on $L^{\infty}(K)$ distinct from the Haar integral.
Combining results of Leptin \cite{Lep} and Ruan \cite{Ru3}, we see that $A(G)$ has a bounded approximate identity precisely when it is operator amenable. It is known that $G$ is weakly amenable if and only if the algebra $A_{cb}(G)$ has a bounded approximate identity \cite{Fo}, and it was suggested in \cite{FRS} that $A_{cb}(G)$ may be operator amenable exactly when $G$ is weakly amenable. Another major result of the paper (see Theorem \ref{t:OA} for details) shows that operator amenability of $A_{cb}(G)$ is fundamentally linked to the existence of bounded approximate indicators for $G_\Delta$ \textit{in addition} to
weak amenability. As a corollary, we show that for the class of QSIN groups, operator amenability of $A_{cb}(G)$ is equivalent to weak amenability of $G$, and we provide several examples of non-QSIN weakly amenable groups for which $A_{cb}(G)$ fails to be operator amenable.
\section{Preliminaries}
Let $\mc{A}$ be a completely contractive Banach algebra. We say that an operator space $X$ is a right \e{operator $\mc{A}$-module} if it is a right Banach $\mc{A}$-module such that the module map $m_X:X\widehat{\otimes}\mc{A}\rightarrow X$ is completely contractive, where $\widehat{\otimes}$ denotes the operator space projective tensor product. We say that $X$ is \e{faithful} if for every non-zero $x\in X$, there is $a\in\mc{A}$ such that $x\cdot a\neq 0$, and we say that $X$ is \e{essential} if $\langle X\cdot\mc{A}\rangle=X$, where $\langle\cdot\rangle$ denotes the closed linear span. We denote by $\mathbf{mod}-\mc{A}$ the category of right operator $\mc{A}$-modules with morphisms given by completely bounded module homomorphisms. Left operator $\mc{A}$-modules and operator $\mc{A}$-bimodules are defined similarly, and we denote the respective categories by $\mc{A}-\mathbf{mod}$ and $\mc{A}-\mathbf{mod}-\mc{A}$.
\begin{remark} Regarding terminology, in what follows we will often omit the term ``operator'' when discussing homological properties of operator modules as we will be working exclusively in the operator space category.
\end{remark}
Let $\mc{A}$ be a completely contractive Banach algebra, $X\in\mathbf{mod}-\mc{A}$ and $Y\in\mc{A}-\mathbf{mod}$. The \e{$\mc{A}$-module tensor product} of $X$ and $Y$ is the quotient space $X\widehat{\otimes}_{\mc{A}}Y:=X\widehat{\otimes} Y/N$, where
$$N=\langle x\cdot a\otimes y-x\otimes a\cdot y\mid x\in X, \ y\in Y, \ a\in\mc{A}\rangle,$$
and, again, $\langle\cdot\rangle$ denotes the closed linear span. It follows that
$$\mc{CB}_{\mc{A}}(X,Y^*)\cong N^{\perp}\cong(X\widehat{\otimes}_{\mc{A}} Y)^*,$$
where $\mc{CB}_{\mc{A}}(X,Y^*)$ denotes the space of completely bounded right $\mc{A}$-module maps $\Phi:X\rightarrow Y^*$.
If $Y=\mc{A}$, then clearly $N\subseteq\mathrm{Ker}(m_X)$ where $m_X:X\widehat{\otimes}\mc{A}\rightarrow X$ is the multiplication map. If the induced mapping $\widetilde{m}_X:X\widehat{\otimes}_{\mc{A}}\mc{A}\rightarrow X$ is a completely isometric isomorphism we say that $X$ is an \e{induced $\mc{A}$-module}. A similar definition applies for left modules. In particular, we say that $\mc{A}$ is \e{self-induced} if $\widetilde{m}_\mc{A}:\mc{A}\widehat{\otimes}_{\mc{A}}\mc{A}\cong\mc{A}$ completely isometrically.
Let $\mc{A}$ be a completely contractive Banach algebra and $X\in\mathbf{mod}-\mc{A}$. The identification $\mc{A}^+=\mc{A}\oplus_1\mathbb{C}$ turns the unitization of $\mc{A}$ into a unital completely contractive Banach algebra, and it follows that $X$ becomes a right operator $\mc{A}^+$-module via the extended action
\begin{equation*}x\cdot(a+\lambda e)=x\cdot a+\lambda x, \ \ \ a\in\mc{A}^+, \ \lambda\in\mathbb{C}, \ x\in X.\end{equation*}
Let $C\geq1$. We say that $X$ is \e{relatively $C$-projective} if there exists a morphism $\Phi^+:X\rightarrow X\widehat{\otimes}\mc{A}^+$ satisfying $\norm{\Phi^+}_{cb}\leq C$ which is a right inverse to the extended module map $m_X^+:X\widehat{\otimes}\mc{A}^+\rightarrow X$. When $X$ is essential, this is equivalent to the existence of a morphism $\Phi:X\rightarrow X\widehat{\otimes}\mc{A}$ satisfying $\norm{\Phi}_{cb}\leq C$ and $m_X\circ\Phi=\textnormal{id}_{X}$ by the operator analogue of \cite[Proposition 1.2]{DP}.
Given a completely contractive Banach algebra $\mc{A}$ and $X\in\mathbf{mod}-\mc{A}$, there is a canonical completely contractive morphism $\Delta_X^+:X\rightarrow\mc{CB}(\mc{A}^+,X)$ given by
\begin{equation*}\Delta_X^+(x)(a)=x\cdot a, \ \ \ x\in X, \ a\in\mc{A}^+,\end{equation*}
where the right $\mc{A}$-module structure on $\mc{CB}(\mc{A}^+,X)$ is defined by
\begin{equation*}(\Psi\cdot a)(b)=\Psi(ab), \ \ \ a\in\mc{A}, \ \Psi\in\mc{CB}(\mc{A}^+,X), \ b\in\mc{A}^+.\end{equation*}
An analogous construction exists for objects in $\mc{A}-\mathbf{mod}$. For $C\geq 1$, we say that $X$ is \e{relatively $C$-injective} if there exists a morphism $\Phi^+:\mc{CB}(\mc{A}^+,X)\rightarrow X$ such that $\Phi^+\circ\Delta_X^+=\textnormal{id}_{X}$ and $\norm{\Phi^+}_{cb}\leq C$. When $X$ is faithful, this is equivalent to the existence a morphism $\Phi:\mc{CB}(\mc{A},X)\rightarrow X$ such that $\Phi\circ\Delta_X=\textnormal{id}_{X}$ and $\norm{\Phi}_{cb}\leq C$ by the operator analogue of \cite[Proposition 1.7]{DP}, where $\Delta_X(x)(a):=\Delta_X^+(x)(a)$ for $x\in X$ and $a\in\mc{A}$.
We say that $X$ is \e{$C$-injective} if for every $Y,Z\in\mathbf{mod}-\mc{A}$, every completely isometric morphism $\Psi:Y\hookrightarrow Z$, and every morphism $\Phi:Y\rightarrow X$, there exists a morphism $\widetilde{\Phi}:Z\rightarrow X$ such that $\norm{\widetilde{\Phi}}_{cb}\leq C\norm{\Phi}_{cb}$ and $\widetilde{\Phi}\circ\Psi=\Phi$.
For a completely contractive Banach algebra $\mc{A}$, we say that $X\in\mathbf{mod}-\mc{A}$ is \e{relatively $C$-flat} (respectively, \e{$C$-flat}) if its dual $X^*$ is relatively $C$-injective (respectively, $C$-injective) in $\mc{A}-\mathbf{mod}$ with respect to the canonical module structure given by
$$\langle a\cdot f,x\rangle = \langle f, x\cdot a\rangle, \ \ \ f\in X^*, \ x\in X, \ a\in\mc{A}.$$
Similar definitions apply to left operator $\mc{A}$-modules. In the case of operator bimodules, we say that $X\in\mc{A}-\mathbf{mod}-\mc{A}$ is \e{relatively $C$-biflat} (respectively, \e{$C$-biflat}) if its dual $X^*$ is relatively $C$-injective (respectively, $C$-injective) in $\mc{A}-\mathbf{mod}-\mc{A}$. Viewing $\mc{A}$ as an operator $\mc{A}$-bimodule via
$$a\cdot(b\otimes c)=ab\otimes c, \ \ (b\otimes c)\cdot a=b\otimes ca, \ \ \ a,b,c\in\mc{A},$$
we say that $\mc{A}$ is \e{operator amenable} if it is relatively $C$-biflat in $\mc{A}-\mathbf{mod}-\mc{A}$ for some $C\geq1$, and has a bounded approximate identity. By \cite[Proposition 2.4]{Ru3} this is equivalent to the existence of a bounded approximate diagonal in $\mc{A}\widehat{\otimes}\mc{A}$, that is, a bounded net $(A_\alpha)$ in $\mc{A}\widehat{\otimes}\mc{A}$ satisfying
$$a\cdot A_\alpha - A_\alpha\cdot a, \ m_{\mc{A}}(A_\alpha)\cdot a \rightarrow 0, \ \ \ a\in\mc{A}.$$
We let $OA(\mc{A})$ denote the \textit{operator amenability constant of $\mc{A}$}, the infimum of all bounds of approximate diagonals in $\mc{A}\widehat{\otimes}\mc{A}$.
For a locally compact group $G$, the left and right regular representations $\lambda,\rho:G\rightarrow\mc{B}(L^2(G))$ are given by
$$\lambda(s)\xi(t)=\xi(s^{-1}t), \ \ \rho(s)\xi(t)=\xi(ts)\Delta(s)^{1/2}, \ \ \ s,t\in G, \ \xi\inL^2(G).$$
The von Neumann algebra generated by $\lambda(G)$ is called the \e{group von Neumann algebra} of $G$ and is denoted by $VN(G)$. It is known that $VN(G)$ is \e{standardly represented} on $L^2(G)$ (cf. \cite{H}), so that every normal state $\omega\in VN(G)_*$ is the restriction of a vector state $\omega_\xi$ to $VN(G)$ for a unique unit vector $\xi\in\mc{P}:=\overline{\{\eta\ast J\eta\mid \eta\in C_c(G)\}}$ \cite[Lemma 2.10]{H}, where $C_c(G)$ denotes the continuous functions on $G$ with compact support, and $J$ is the conjugate linear isometry given by
$$J\eta(s)=\overline{\eta(s^{-1})}\Delta(s^{-1})^{1/2}, \ \ \ s\in G, \ \eta\inL^2(G).$$
The set of coefficient functions of the left regular representation,
\begin{equation*}A(G)=\{u:G\rightarrow\mathbb{C} : u (s)=\langle\lambda(s)\xi,\eta\rangle, \ \xi,\eta\inL^2(G), \ s\in G\},\end{equation*}
is called the \e{Fourier algebra} of $G$. It was shown by Eymard that, endowed with the norm
$$\norm{u}_{A(G)}=\text{inf}\{\norm{\xi}_{L^2(G)}\norm{\eta}_{L^2(G)} : u(\cdot)=\langle\lambda(\cdot)\xi,\eta\rangle\},$$
$A(G)$ is a Banach algebra under pointwise multiplication \cite[Proposition 3.4]{E}. Furthermore, it is the predual of $VN(G)$, where the duality is given by
\begin{equation*}\langle u,\lambda(s)\rangle=u(s),\ \ \ u\in A(G), \ s\in G.\end{equation*}
Eymard also showed that the space of functions $\varphi:G\rightarrow\mathbb{C}$ for which there exists a strongly continuous unitary representation $\pi:G\rightarrow\mc{B}(H_\pi)$ and $\xi,\eta\in H_\pi$ such that $\varphi(s)=\langle\pi(s)\xi,\eta\rangle$, $s\in G$, is a unital Banach algebra (with pointwise multiplication) under the norm
$$\norm{\varphi}_{B(G)}=\text{inf}\{\norm{\xi}_{H_\pi}\norm{\eta}_{H_\pi} : \varphi(\cdot)=\langle\pi(\cdot)\xi,\eta\rangle\},$$
called the \e{Fourier-Stieltjes algebra} of $G$ \cite[Proposition 2.16]{E}, denoted by $B(G)$. We denote the convex subset of continuous positive definite functions of norm one by $\mc{P}_1(G)$.
The adjoint of the multiplication $m:A(G)\widehat{\otimes} A(G)\rightarrow A(G)$ defines a co-associative co-multiplication $\Gamma:VN(G)\rightarrow VN(G\times G)$, where we have used the fact that $VN(G\times G)=VN(G)\overline{\otimes} VN(G)=(A(G)\widehat{\otimes} A(G))^*$ \cite[Theorem 7.2.4]{ER}, and $\overline{\otimes}$ denotes the von Neumann algebra tensor product. This co-multiplication is symmetric in the sense that $\Gamma=\Sigma\circ\Gamma$, where $\Sigma:VN(G\times G)\rightarrow VN(G\times G)$ is the flip map; it satisfies $\Gamma(\lambda(s))=\lambda(s)\otimes\lambda(s)$, $s\in G$, and can be written as
$$\Gamma(x)=V(x\otimes 1)V^*, \ \ \ x\in VN(G),$$
where $V$ is the unitary in $L^{\infty}(G)\overline{\otimes} VN(G)$ given by
$$V\xi(s,t)=\xi(s,s^{-1}t), \ \ \ s,t\in G, \ \xi\in L^2(G\times G).$$
The co-associativity of $\Gamma$ translates into the following \e{pentagonal relation} for $V$:
\begin{equation}\label{e:pentagonal} V_{12}V_{13}V_{23}=V_{23}V_{12},\end{equation}
where $V_{12}=V\otimes 1$, $V_{23}=1\otimes V$, $V_{13}=(\sigma\otimes 1)V_{23}(\sigma\otimes 1)$, and $\sigma$ is the flip map on $L^2(G\times G)$.
The group von Neumann algebra $VN(G)$ becomes an operator $A(G)$-bimodule in the canonical fashion, and the bimodule actions can be written in terms of the co-multiplication:
$$u\cdot x = x\cdot u = (\textnormal{id}\otimes u)\Gamma(x) = (u\otimes\textnormal{id})\Gamma(x), \ \ \ u\in A(G), \ x\in VN(G).$$
It follows that $VN(G)$ is faithful as a left/right operator $A(G)$-module (respectively, $A(G)$-bimodule), and that under the isomorphism $\mc{CB}(A(G),VN(G))\cong VN(G\times G)$, the canonical morphism $\Delta_{VN(G)}=\Gamma$.
Given a closed subgroup $H\leq G$, we let $I(H)=\{u\in A(G)\mid u|_{H}\equiv 0\}$ denote the closed ideal of functions in $A(G)$ which vanish on $H$. By the proof of \cite[Proposition 1.7]{ARS} $I(H)$ is an essential ideal. It follows from \cite{Herz} that the restriction $r:A(G)\twoheadrightarrow A(H)$ is a complete quotient map with kernel $I(H)$, therefore $A(H)\cong A(G)/I(H)$.
\section{Relative flatness and inner amenability}
If $G$ is a locally compact group and $p\in[1,\infty]$, then $G$ acts by conjugation on $L^p(G)$ via
$$\beta_p(s)f(t)=f(s^{-1}ts)\Delta(s)^{1/p}, \ \ \ s,t\in G, \ f\in L^p(G).$$
When $p=2$, we obtain a strongly continuous unitary representation $\beta_2:G\rightarrow\mc{B}(L^2(G))$ satisfying $\beta_2(s)=\lambda(s)\rho(s)$ for $s\in G$, and when $p=\infty$, the conjugation action becomes
$$\beta_\infty(s)f(t)=f(s^{-1}ts), \ \ \ s,t\in G, \ f\inL^{\infty}(G).$$
Following Paterson \cite[2.35.H]{Pat2}, we say that $G$ is \e{inner amenable} if there exists a state $m\inL^{\infty}(G)^*$ satisfying
\begin{equation}\label{e:PatIA}\langle m,\beta_{\infty}(s)f\rangle=\langle m,f\rangle \ \ \ s\in G, \ f\inL^{\infty}(G).\end{equation}
\begin{remark} In \cite{Effros}, Effros defined a discrete group $G$ to be ``inner amenable'' if there exists a conjugation invariant mean $m\in\ell^{\infty}(G)^*$ such that $m\neq\delta_e$. In what follows, inner amenability will always refer to the definition (\ref{e:PatIA}) given above.\end{remark}
The class of inner amenable locally compact groups forms a large, interesting class of groups containing all amenable groups and IN groups, where a locally compact group $G$ is IN if there exists a compact neighbourhood of the identity which is invariant under conjugation. For example, compact, abelian and discrete groups are IN, and therefore inner amenable.
A strongly continuous unitary representation $\pi:G\rightarrow\mc{B}(H_\pi)$ of a locally compact group $G$ is said to be \e{amenable} if there exists a state $m_\pi\in\mc{B}(H_\pi)^*$ such that
$$\langle m_\pi,\pi(s)^*T\pi(s)\rangle=\langle m_\pi,T\rangle, \ \ \ \forall \ s\in G, \ T\in\mc{B}(H_\pi).$$
This concept was introduced by Bekka \cite{Bekka}, who showed, among other things, that $G$ is inner amenable precisely when $\beta_2$ is an amenable unitary representation \cite[Theorem 2.4]{Bekka}. By \cite[Proposition 3.1]{St1}, inner amenability is equivalent to the existence of a $\beta_2$-invariant state on $\beta_2(G)''\subseteq\mc{B}(L^2(G))$, the von Neumann subalgebra generated by the conjugate representation. We now show that inner amenability is equivalent to the existence of a $\beta_2$-invariant state on $VN(G)$, i.e., a $G$-invariant state under the canonical $G$-action:
$$x\lhd s=\lambda(s)^*x\lambda(s), \ \ \ x\inVN(G), \ s\in G.$$
In turn, we answer a question raised by Lau and Paterson in \cite[Example 5]{LP2}.
\begin{prop}\label{p:IA} A locally compact group $G$ is inner amenable if and only if there exists a $G$-invariant state on $VN(G)$.\end{prop}
\begin{proof} If $G$ is inner amenable, then by \cite[Theorem 2.4]{Bekka} there exists a $\beta_2$-invariant state $m\in\mc{B}(L^2(G))^*$, whose restriction to $VN(G)$ is necessarily $G$-invariant, as
$$\langle m,\lambda(s)^*x\lambda(s)\rangle=\langle m,\lambda(s)^*\rho(s)^*x\rho(s)\lambda(s)\rangle=\langle m,\beta_2(s)^*x\beta_2(s)\rangle=\langle m,x\rangle$$
for all $x\in VN(G)$, $s\in G.$
Conversely, suppose $m\in VN(G)^*$ is a $G$-invariant state. Since $VN(G)$ is standardly represented on $L^2(G)$, there exists a net of unit vectors $(\xi_\alpha)$ in $\mc{P}$ such that $(\omega_{\xi_\alpha})$ converges to $m$ in the weak* topology of $VN(G)^*$. By $G$-invariance, it follows that
\begin{equation*}\beta_2(s)\cdot\omega_{\xi_\alpha}\cdot\beta_2(s)^*-\omega_{\xi_\alpha}=\omega_{\beta_2(s)\xi_\alpha}-\omega_{\xi_\alpha}\rightarrow 0\end{equation*}
weakly in $A(G)=VN(G)_*$ for all $s\in G$. By the standard convexity argument, we obtain a net of unit vectors $(\eta_\gamma)$ in $\mc{P}$ satisfying
\begin{equation*}\norm{\beta_2(s)\cdot\omega_{\eta_\gamma}\cdot\beta_2(s)^*-\omega_{\eta_\gamma}}_{A(G)}=\norm{\omega_{\beta_2(s)\eta_\gamma}-\omega_{\eta_\gamma}}_{A(G)}\rightarrow 0, \ \ \ s\in G.\end{equation*}
However, since $\beta_2(s)=\lambda(s)\rho(s)=\lambda(s)J\lambda(s)J$ we have $\beta_2(s)\mc{P}\subseteq\mc{P}$ for any $s\in G$ by \cite[Theorem 1.1]{H}. Then \cite[Lemma 2.10]{H} entails
\begin{equation*}\norm{\beta_2(s)\eta_\gamma-\eta_\gamma}_{L^2(G)}^2\leq\norm{\omega_{\beta_2(s)\eta_\gamma}-\omega_{\eta_\gamma}}_{A(G)}\rightarrow0, \ \ \ s\in G.\end{equation*}
Letting $f_\gamma:=|\eta_\gamma|^2$, we obtain a net of states in $L^1(G)$ satisfying
$$\norm{\beta_1(s)f_\gamma - f_\gamma}_{L^1(G)}=\norm{\omega_{\beta_2(s)\eta_\gamma}-\omega_{\eta_\gamma}}_{L^1(G)}\leq 2\norm{\beta_2(s)\eta_\gamma - \eta_\gamma}_{L^2(G)}\rightarrow0, \ \ \ s\in G.$$
Any weak* cluster point $M\inL^{\infty}(G)^*$ of $(f_\gamma)$ will therefore be conjugate invariant, and $G$ is inner amenable.
\end{proof}
As an immediate corollary, we obtain the following hereditary property of inner amenability, which appears to be new.
\begin{cor}\label{c:subgroup} Let $G$ be a locally compact group and let $H$ be a closed subgroup of $G$. If $G$ is inner amenable, then $H$ is inner amenable.
\end{cor}
\begin{proof} Let $VN_H(G):=\{\lambda_G(s)\mid s\in H\}''\subseteqVN(G)$. Then the map $i_H:VN(H)\rightarrow VN_H(G)$ given by
$$i_H(\lambda_H(s))=\lambda_G(s), \ \ \ s\in H,$$
is a *-isomorphism of von Neumann algebras. Thus, if $m\inVN(G)^*$ is a $G$-invariant state then $m_H:=m|_{VN_H(G)}\circ i_H\in VN(H)^*$ is an $H$-invariant state on $VN(H)$, so $H$ is inner amenable by Proposition \ref{p:IA}.
\end{proof}
In \cite[Corollary 3.2]{LP1}, Lau and Paterson proved the following equivalence for a locally compact group $G$:
\begin{enumerate}
\item $G$ is amenable;
\item $G$ is inner amenable and $VN(G)$ is $1$-injective in $\mathbb{C}-\mathbf{mod}$.
\end{enumerate}
The following theorem will allow us to describe the above equivalence from a purely homological perspective, elucidating the relationship between amenability and inner amenability.
\begin{thm}\label{t:IA=relinj} A locally compact group $G$ is inner amenable if and only if $A(G)$ is relatively $1$-flat in $\mathbf{mod}-A(G)$.
\end{thm}
\begin{proof} If $G$ is inner amenable, then by \cite[Proposition 1.13]{St2} there exists a net of states $(f_\alpha)$ in $L^1(G)$ satisfying
$$\norm{\beta_1(s)f_\alpha-f_\alpha}_{L^1(G)}\rightarrow0, \ \ \ s\in G,$$
uniformly on compact sets. The square roots $\xi_\alpha:=f_\alpha^{1/2}\inL^2(G)$ then satisfy
$$\norm{\beta_2(s)\xi_\alpha - \xi_\alpha}^2_{L^2(G)}\leq\norm{\beta_1(s)f_\alpha-f_\alpha}_{L^1(G)}\rightarrow0, \ \ \ s\in G,$$
uniformly on compact sets. Thus, combining \cite[Lemma 3.1, Lemma 4.1]{RX}, it follows that $\Gamma:VN(G)\rightarrow VN(G\times G)$ has a completely contractive left inverse $\Phi$ which is a left $A(G)$-module map. Since $VN(G)$ is faithful in $A(G)-\mathbf{mod}$, this entails the relative 1-injectivity of $VN(G)$ in $A(G)-\mathbf{mod}$, and hence, the relative 1-flatness of $A(G)$ in $\mathbf{mod}-A(G)$.
Conversely, relative $1$-flatness of $A(G)$ in $\mathbf{mod}-A(G)$ implies the existence of a completely contractive morphism $\Phi: VN(G\times G)\rightarrow VN(G)$ satisfying $\Phi\circ\Gamma=\textnormal{id}_{VN(G)}$.
It follows that $\Gamma\circ\Phi: VN(G\times G)\rightarrow VN(G\times G)$ is a projection of norm one onto the image of $\Gamma$. Thus, by \cite{To}, $\Gamma\circ\Phi$ is a $\Gamma(VN(G))$-bimodule map, which by injectivity of $\Gamma$ yields the identity
\begin{equation}\label{2} x\Phi(T)y=\Phi(\Gamma(x)T\Gamma(y))\end{equation}
for all $x,y\inVN(G)$ and $T\in VN(G\times G)$.
For $x\in VN(G)$, the module property of $\Phi$ implies $u\cdot\Phi(x\otimes 1)=\Phi(x\otimes u\cdot1)=u(e)\Phi(x\otimes 1)$ for all $u\in A(G)$. The standard argument then shows $\Phi(x\ten1)\in\C1$, so that $m:VN(G)\rightarrow\mathbb{C}$ defined by $\langle m,x\rangle=\Phi(x\otimes 1)$, $x\in VN(G)$, yields a state on $VN(G)$. Moreover, by equation (\ref{2}) we obtain
\begin{align*}\langle m,\lambda(s)x\lambda(s)^*\rangle&=\Phi(\lambda(s)x\lambda(s)^*\otimes 1)=\Phi((\lambda(s)\otimes\lambda(s))(x\otimes 1)(\lambda(s)^*\otimes\lambda(s)^*))\\
&=\Phi(\Gamma(\lambda(s))(x\otimes 1)\Gamma(\lambda(s)^*))=\lambda(s)\Phi(x\otimes 1)\lambda(s)^*=\Phi(x\otimes 1)\\
&=\langle m,x\rangle\end{align*}
for any $x\in VN(G)$ and $s\in G$. Thus, $m$ is a $G$-invariant state on $VN(G)$, which by Proposition \ref{p:IA} implies that $G$ is inner amenable.
\end{proof}
Combining Theorem \ref{t:IA=relinj} with \cite[Corollary 5.3]{C}, we can now recast the Lau--Paterson equivalence in purely homological terms:
\begin{enumerate}
\item $VN(G)$ is $1$-injective in $A(G)-\mathbf{mod}$;
\item $VN(G)$ is relatively $1$-injective in $A(G)-\mathbf{mod}$ and $1$-injective in $\mathbb{C}-\mathbf{mod}$.
\end{enumerate}
At present, we believe but have been unable to show that inner amenability of $G$ is equivalent to relative $C$-flatness of $A(G)$ in $\mathbf{mod}-A(G)$ for $C>1$. We can, however, provide a number of examples which support the conjecture based on the following proposition.
\begin{prop}\label{p:C-inj} Let $G$ be a locally compact group and let $H$ be a closed subgroup. If $VN(G)$ is $C$-injective in $A(G)-\mathbf{mod}$ for $C\geq 1$, then $VN(H)$ is $C$-injective in $A(H)-\mathbf{mod}$.
\end{prop}
\begin{proof} Let $r:A(G)\twoheadrightarrow A(H)$ be the complete quotient map given by restriction. Then $\mc{B}(L^2(H))$ becomes a left $A(G)$-module via
$$u\cdot T=(\textnormal{id}\otimes r(u))\Gamma^r(T), \ \ \ u\in A(G), \ T\in\mc{B}(L^2(H)),$$
where $\Gamma^r:\mc{B}(L^2(H))\rightarrow\mc{B}(L^2(H))\overline{\otimes} VN(H)$ is the canonical lifting of the co-multiplication on $VN(H)$, given by
$$\Gamma^r(T)=V(T\otimes 1)V^*, \ \ \ T\in\mc{B}(L^2(H)).$$
Clearly, $VN(H)$ is a closed $A(G)$-submodule of $\mc{B}(L^2(H))$. Hence, the inclusion $VN(H)\hookrightarrowVN(G)$ extends to a morphism $E:\mc{B}(L^2(H))\rightarrowVN(G)$ with $\norm{E}_{cb}\leq C$. We show that $E(\mc{B}(L^2(H)))=VN(H)$. To this end, fix $T\in \mc{B}(L^2(H))$. Then for $u\in A(G)$ and $v\in I(H)$, we have
$$\langle E(T),u\cdot v\rangle=\langle v\cdot E(T),u\rangle=\langle E(v\cdot T),u\rangle=0$$
as $r(v)=0$. Since $I(H)$ is essential it follows that $E(T)\in I(H)^{\perp}=VN(H)$. Thus, $E:\mc{B}(L^2(H))\rightarrow VN(H)$ is a completely bounded $A(H)$-module projection with $\norm{E}_{cb}\leq C$. Since $VN(H)$ has an $A(H)$-invariant state $m\in VN(H)^*$ satisfying
$$\langle m,u\cdot x\rangle=u(e)\langle m,x\rangle, \ \ \ u\in A(H), \ x\in VN(H),$$
it follows that $VN(H)$ is an amenable quantum group, and the proof of \cite[Theorem 5.5]{CN} implies that $\mc{B}(L^2(H))$ is $1$-injective in $A(H)-\mathbf{mod}$. Thus, $VN(H)$ is $C$-injective in $A(H)-\mathbf{mod}$.
\end{proof}
\begin{cor}\label{c:C-inj} Let $G$ be a locally compact group such that $VN(G)$ is $C$-injective in $A(G)-\mathbf{mod}$ for some $C\geq 1$. Then every closed inner amenable subgroup of $G$ is amenable.
\end{cor}
\begin{proof} By Proposition \ref{p:C-inj} we know that $VN(H)$ is $C$-injective in $A(H)-\mathbf{mod}$ for any closed subgroup $H$. Hence, there exists a completely bounded projection $E:\mc{B}(L^2(H))\rightarrow VN(H)$, which, by \cite[Theoerem 3.1]{CS} (see also \cite{Pi}) implies that $VN(H)$ is an injective von Neumann algebra. If $H$ is inner amenable, then by \cite[Corollary 3.2]{LP1} it is necessarily amenable.
\end{proof}
\begin{cor}\label{c:F_2} Let $G$ be a locally compact group containing $\mathbb{F}_2$ as a closed subgroup and for which $VN(G)$ is $1$-injective in $\mathbb{C}-\mathbf{mod}$. Then $VN(G)$ is not relatively $C$-injective in $A(G)-\mathbf{mod}$ for any $C\geq1$.
\end{cor}
\begin{proof} If $VN(G)$ were relatively $C$-injective in $A(G)-\mathbf{mod}$, then it would be $C$-injective in $A(G)-\mathbf{mod}$ by \cite[Proposition 2.3]{C}. Since $\mathbb{F}_2$ is inner amenable, Corollary \ref{c:C-inj} would imply that it is amenable, which is absurd.
\end{proof}
Since almost connected groups have injective von Neumann algebras (see \cite{Pat3} and the references therein), and are non-amenable precisely when they contain $\mathbb{F}_2$ has a closed subgroup \cite[Theorem 5.5]{Rick}, Corollary \ref{c:F_2} implies that any non-amenable almost connected group $G$ cannot have a relatively $C$-flat (and hence $C$-biflat) Fourier algebra for any $C\geq 1$. In particular, $A(SL(n,\mathbb{R}))$, $A(SL(n,\mathbb{C}))$ and $A(SO(1,n))$ are not relatively flat (or biflat) for $n\geq 2$. This result builds on the analysis of \cite[\S4]{ARS}, where it was suspected that $A(SL(3,\mathbb{C}))$ would fail to be relatively biflat.
Regarding the relative projectivity of $A(G)$, we now establish the converse to \cite[Lemma 3.2]{RX}, providing a partial solution to the open question of relative projectivity of $A(G)$ \cite[\S4]{FLS}.
\begin{prop} Let $G$ be a locally compact group. Then $A(G)$ is relatively $1$-projective in $\mathbf{mod}-A(G)$ if and only if $G$ is an IN group.
\end{prop}
\begin{proof} Assuming relative $1$-projectivity of $A(G)$ in $\mathbf{mod}-A(G)$, there exists a normal completely contractive left $A(G)$-module map $\Phi:VN(G\times G)\rightarrowVN(G)$ such that $\Phi\circ\Gamma=\textnormal{id}_{VN(G)}$. By the proof of Theorem \ref{t:IA=relinj} we obtain a normal $G$-invariant state on $VN(G)$, which, by \cite[Proposition 4.2]{Tay} implies that $G$ is IN. The converse follows from \cite[Lemma 3.2]{RX}.
\end{proof}
\section{Relative biflatness of $A(G)$}\label{s:relbiflat}
Given a locally compact group $G$ and a closed subgroup $H$, a bounded net $(\varphi_\alpha)$ in $B(G)$ is called an \e{approximate indicator} for $H$ \cite[Definition 2.1]{ARS} if
\begin{enumerate}
\item $\lim_{\alpha} (\varphi_{\alpha}|_{H})\cdot u=u$ for all $u\in A(H)$;
\item $\lim_{\alpha} \varphi_{\alpha}\cdot v=0$ for all $v\in I(H)$.
\end{enumerate}
If $\norm{\varphi_\alpha}_{B(G)}\leq 1$ for all $\alpha$ we say that $(\varphi_\alpha)$ is a \e{contractive approximate indicator} for $H$.
In \cite[Proposition 2.3]{ARS} it was shown that $A(G)$ is relatively $C$-biflat if the diagonal subgroup $G_\Delta\leq G\times G$ has an approximate indicator $(\varphi_{\alpha})$ with $\norm{\varphi_{\alpha}}_{B(G)}\leq C$. We now establish the converse when $C=1$, which is one of the main results of the paper.
\begin{thm}\label{t:biflat} Let $G$ be a locally compact group. Then $A(G)$ is relatively 1-biflat if and only if $G_\Delta$ has a contractive approximate indicator.
\end{thm}
\begin{proof} We need only establish necessity. Consider the right $L^1(G)$-action on $VN(G)$ given by
$$x\lhd f=\int_G\lambda(s)^*x\lambda(s)f(s)ds, \ \ \ x\inVN(G), \ f\inL^1(G).$$
For $f\inL^1(G)$, we let $\h{\Theta}(f):VN(G)\rightarrowVN(G)$ and $\h{\theta}_f:VN(G\times G)\rightarrow VN(G\times G)$ be the normal completely bounded maps given respectively by $\h{\Theta}(f)(x)=x\lhd f$, $x\inVN(G)$, and
$$\h{\theta}_f(X)=\int_G (\lambda(s)^*\otimes\lambda(s)^*)X(\lambda(s)\otimes\lambda(s))f(s)ds, \ \ \ X\in VN(G\times G).$$
Relative 1-biflatness of $A(G)$ implies the existence of a completely contractive $A(G)$-bimodule left inverse $\Phi:VN(G\times G)\rightarrowVN(G)$ to $\Gamma$. It follows as in Theorem \ref{t:IA=relinj} that $\Gamma\circ\Phi$ is a $\Gamma(VN(G))$-bimodule map. By Wittstock's bimodule extension theorem \cite{Witt2}, this map extends to an $\Gamma(VN(G))$-bimodule map $\Psi:\mc{B}(L^2(G\times G))\rightarrow\mc{B}(L^2(G\times G))$. Moreover, \cite[Lemma 2.3]{MNW} allows us to approximate $\Psi$ in the point weak* topology by a net $(\Psi_\alpha)$ of normal completely bounded $\Gamma(VN(G))$-bimodule maps. Thus, for any $X\in VN(G\times G)$, we have
\begin{align*}\Gamma\circ\Phi(\h{\theta}_f(X))&=\Psi(\h{\theta}_f(X))=\Psi\bigg(\int_G (\lambda(s)^*\otimes\lambda(s)^*)X(\lambda(s)\otimes\lambda(s))f(s)ds\bigg)\\
&=\lim_\alpha\Psi_\alpha\bigg(\int_G (\lambda(s)^*\otimes\lambda(s)^*)X(\lambda(s)\otimes\lambda(s))f(s)ds\bigg)\\
&=\lim_\alpha\bigg(\int_G \Psi_\alpha((\lambda(s)^*\otimes\lambda(s)^*)X(\lambda(s)\otimes\lambda(s)))f(s)ds\bigg)\\
&=\lim_\alpha\bigg(\int_G \Psi_\alpha(\Gamma(\lambda(s)^*)X\Gamma(\lambda(s)))f(s)ds\bigg)\\
&=\lim_\alpha\bigg(\int_G \Gamma(\lambda(s)^*)\Psi_\alpha(X)\Gamma(\lambda(s))f(s)ds\bigg)\\
&=\lim_\alpha\h{\theta}_f(\Psi_\alpha(X))\\
&=\h{\theta}_f(\Psi(X))\\
&=\h{\theta}_f(\Gamma\circ\Phi(X)),
\end{align*}
where we used normality of $\Psi_\alpha$ and $\h{\theta}_f$ in the fourth and eighth equality, respectively. By definition of $\h{\theta}_f$, we have $\h{\theta}_f\circ\Gamma=\Gamma\circ\h{\Theta}(f)$, so the above calculation entails $\Gamma\circ\Phi\circ\h{\theta}_f=\Gamma\circ\h{\Theta}(f)\circ\Phi$, which, by injectivity of $\Gamma$, implies $\Phi\circ\h{\theta}_f=\h{\Theta}(f)\circ\Phi$.
As in the proof of Theorem \ref{t:IA=relinj}, the restriction $\Phi|_{VN(G)\otimes 1}$ defines a state $m\inVN(G)^*$. The bimodule property of $\Phi$ ensures that $m$ is invariant for the $A(G)$-action on $VN(G)$, that is,
$$\langle m,u\cdot x\rangle=u(e)\langle m,x\rangle, \ \ \ x\inVN(G), \ u\in A(G).$$
Moreover, for $f\inL^1(G)$ and $x\inVN(G)$ we have
\begin{align*}\langle m,x\lhd f\rangle&=\Phi\bigg(\int_G (\lambda(s)^*\otimes\lambda(s)^*)(x\ten1)(\lambda(s)\otimes\lambda(s))f(s)ds\bigg)\\
&=\Phi(\h{\theta}_f(x\otimes 1))\\
&=\h{\Theta}(f)(\Phi(x\otimes 1))\\
&=\langle f,1\rangle\langle m,x\rangle.
\end{align*}
Approximating $m\in VN(G)^*$ in the weak* topology by a net of states $(u_\beta)$ in $A(G)$, it follows that
$$u_\beta\cdot v-v(e)u_\beta\rightarrow 0 \ \ \textnormal{and} \ \ f\lhd u_\beta-\langle f,1\rangle u_\beta\rightarrow 0$$
weakly in $A(G)$ for all $v\in A(G)$ and $f\inL^1(G)$, where $f\lhd u_\beta=(\h{\Theta}(f))_*(u_\beta)$. By the standard convexity argument, we obtain a net of states $(u_\gamma)$ in $A(G)$ satisfying
\begin{equation}\label{e:L1inv}\norm{u_\gamma\cdot v-v(e)u_\gamma}_{A(G)}, \ \norm{f\lhd u_\gamma-\langle f,1\rangle u_\gamma}_{A(G)}\rightarrow0, \ \ \ v\in A(G), \ f\inL^1(G).\end{equation}
For $s\in G$ and $v\in A(G)$ we define $s\lhd v\in A(G)$ by $s\lhd v(t)=v(s^{-1}ts)$, $t\in G$. Then by left invariance of the Haar measure it follows that
\begin{equation}\label{e:inv}s\lhd(f\lhd v)=(l_sf)\lhd v, \ \ \ s\in G, \ f\inL^1(G), \ v\in A(G),\end{equation}
where $l_sf(t)=f(st)$, $s,t\in G$. Fix a state $f_0\inL^1(G)$, and consider the net $(f_0\lhd u_\gamma)$. For $\varepsilon>0$, take a neighbourhood $U$ of the identity $e\in G$ such that
$$\norm{l_sf_0-f_0}_{L^1(G)}<\frac{\varepsilon}{2}, \ \ \ s\in U.$$
Then for any compact set $K\subseteq G$, there exist $s_1,...,s_n\in K$ such that $K\subseteq\cup_{i=1}^n Us_i$. Take $\gamma_\varepsilon$ such that for $\gamma\geq\gamma_\varepsilon$
\begin{equation*}\norm{(l_{s_i}f_0)\lhd u_\gamma-u_\gamma}_{A(G)}<\frac{\varepsilon}{4}, \ \ \ 1\leq i\leq n.\end{equation*}
Applying (\ref{e:inv}) together with the $L^1(G)$-invariance in (\ref{e:L1inv}), it follows by the standard argument (see \cite[Lemma 7.1.1]{R1}) that
$$\norm{k\lhd(f_0\lhd u_\gamma)-f_0\lhd u_\gamma}_{A(G)}<\varepsilon, \ \ \ k\in K.$$
Hence, the net $(f_0\lhd\psi_\gamma)$ satisfies
$$\norm{s\lhd(f_0\lhd u_\gamma)-f_0\lhd u_\gamma}_{A(G)}\rightarrow 0, \ \ \ s\in G,$$
uniformly on compact sets. Using both the $A(G)$ and $L^1(G)$-invariance from equation (\ref{e:L1inv}), a $3\varepsilon$-argument also shows that
$$\norm{(f_0\lhd u_\gamma)\cdot v-v(e)f_0\lhd u_\gamma}_{A(G)}\rightarrow 0, \ \ \ v\in A(G).$$
Forming $|f_0\lhd u_\gamma|^2$, we may further assume $f_0\lhd u_\gamma(s)\geq0$ for all $s\in G$, as one may easily verify using boundedness and multiplicativity of the $G$-action that
$$\norm{u\cdot|f_0\lhd u_\gamma|^2-u(e)|f_0\lhd u_\gamma|^2}_{A(G)}, \ \norm{s\lhd|f_0\lhd u_\gamma|^2-|f_0\lhd u_\gamma|^2}_{A(G)}\rightarrow0$$
for all $u\in A(G)$ and for all $s\in G$, uniformly on compact sets.
Now, since $VN(G)$ is standardly represented on $L^2(G)$, there exist unit vectors $\xi_\gamma\in\mc{P}$ satisfying
$$\omega_{\xi_\gamma}|_{VN(G)}=f_0\lhd u_\gamma.$$
Note that $J\xi_\gamma=\xi_\gamma$ and that $\xi_\gamma$ is necessarily real-valued by uniqueness. For any $s\in G$ we have $s\lhd\omega_{\xi_\gamma}=\omega_{\beta_2(s)\xi_\gamma}$ and $\beta_2(s)\mc{P}\subseteq\mc{P}$. Thus \cite[Lemma 2.10]{H} implies
\begin{equation}\label{e:uni}\norm{\beta_2(s)\xi_\gamma-\xi_\gamma}_{L^2(G)}^2\leq\norm{\omega_{\beta_2(s)\xi_\gamma}-\omega_{\xi_\gamma}}_{A(G)}= \norm{s\lhd u_\gamma- u_\gamma}_{A(G)}\rightarrow0\end{equation}
for all $s\in G$, uniformly on compact sets.
Define the function $\varphi_\gamma\in\mc{P}_1(G\times G)\subseteq B(G\times G)$ by
$$\varphi_\gamma(s,t)=\langle\lambda(s)\rho(t)\xi_\gamma,\xi_\gamma\rangle, \ \ \ s,t\in G,$$
and consider the associated normal completely positive map $\Theta(\varphi_\gamma)\in\mc{CB}_{A(G\times G)}(VN(G\times G))$ given by
$$\Theta(\varphi_\gamma)(\lambda(s)\otimes\lambda(t))=\varphi_\gamma(s,t)\lambda(s)\otimes\lambda(t), \ \ \ s,t\in G.$$
We claim that the bounded net $(\Theta(\varphi_\gamma))$ clusters to a completely positive $A(G\times G)$-module projection $VN(G\times G)\rightarrow VN(G_\Delta)$.
To verify the claim, first consider the net $(\omega_{\xi_\gamma})$ in $\mc{T}(L^2(G))=\mc{B}(L^2(G))_*$. By passing to a subnet we may assume that $(\omega_{\xi_\gamma})$ converges weak* to a state $M\in\mc{B}(L^2(G))^*$. For each $\gamma$ define the unital completely positive map $\Phi_\gamma:VN(G\times G)\rightarrow VN(G)$ by
$$\Phi_\gamma(X)=(\textnormal{id}\otimes\omega_{\xi_\gamma})V(1\otimes U)X(1\otimes U)V^*, \ \ \ X\in VN(G\times G),$$
where $U$ is the self-adjoint unitary given by $U=\h{J}J$, and $\h{J}$ is complex conjugation on $L^2(G)$. Since $\Gamma(x)=V(x\otimes 1)V^*$, $x\in VN(G)$, and $UVN(G)U=VN(G)'$, one easily sees that the range of $\Phi_\gamma$ is indeed contained in $VN(G)$.
For every $\gamma$ and $s,t\in G$, we have
\begin{align*}\Theta(\varphi_\gamma)(\lambda(s)\otimes\lambda(t))&=\langle\lambda(s)\rho(t)\xi_\gamma,\xi_\gamma\rangle\lambda(s)\otimes\lambda(t)\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\otimes\lambda(t)\otimes\lambda(s)\rho(t))\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\ten1\otimes\lambda(s))(1\otimes\lambda(t)\otimes\rho(t))\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\ten1\otimes\lambda(s))(1\otimes(1\otimes U)V(\lambda(t)\otimes 1)V^*(1\otimes U))\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\ten1\ten1)(1\otimes(1\otimes U)V(\lambda(t)\otimes\rho(s))V^*(1\otimes U))\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\ten1\ten1)(1\otimes V(\lambda(t)\otimes\rho(s))V^*) \ \ \ \ (\textnormal{as $U\xi_\gamma=\xi_\gamma$})\\
&=(\textnormal{id}\otimes\textnormal{id}\otimes\omega_{\xi_\gamma})(\lambda(s)\ten1\ten1)(1\otimes V((1\otimes U)(\lambda(t)\otimes\lambda(s))(1\otimes U))V^*)\\
&=\lambda(s)\otimes\Phi_\gamma(\lambda(t)\otimes\lambda(s))\\
&=\lambda(s)\otimes\Phi_\gamma(\Sigma(\lambda(s)\otimes\lambda(t)))\\
&=(\textnormal{id}\otimes\Phi_\gamma\circ\Sigma)(\lambda(s)\otimes\lambda(s)\otimes\lambda(t)))\\
&=(\textnormal{id}\otimes\Phi_\gamma\circ\Sigma)(\Gamma\otimes\textnormal{id})(\lambda(s)\otimes\lambda(t)).\end{align*}
By normality we see that $\Theta(\varphi_\gamma)=(\textnormal{id}\otimes\Phi_\gamma\circ\Sigma)(\Gamma\otimes\textnormal{id})$. Since $(\Phi_\gamma)$ is bounded, it follows that $(\Phi_\gamma)$ converges in the stable point weak* topology to the map $\Phi_M\in\mc{CB}(VN(G\times G),VN(G))$ given by
$$\Phi_M(X)=(\textnormal{id}\otimes M)V(1\otimes U)X(1\otimes U)V^*, \ \ \ X\in VN(G\times G).$$
Hence, the net $(\Theta(\varphi_\gamma))$ converges weak* to a map $\Theta\in\mc{CB}(VN(G\times G))$ satisfying $$\Theta=(\textnormal{id}\otimes\Phi_M\circ\Sigma)(\Gamma\otimes\textnormal{id}).$$
If $\Phi_M$ were a left $A(G)$-module left inverse to $\Gamma$, it would follow that $\Theta=\Gamma\circ\Phi_M\circ\Sigma$, hence the claim. We therefore turn to the required properties of $\Phi_M$.
First, let $\h{V}$ be the unitary in $VN(G)'\otenL^{\infty}(G)$ given by
$$\h{V}\zeta(s,t)=\zeta(st,t)\Delta(t)^{1/2}, \ \ \ \zeta\in L^2(G\times G), \ s,t\in G.$$
Then, for $\eta\inL^2(G)$, the compact convergence (\ref{e:uni}) entails
$$\norm{V\sigma \h{V}\sigma\eta\otimes\xi_\gamma - \eta\otimes\xi_\gamma}^2_{L^2(G\times G)}=\int_G\int_G |\eta(s)|^2|\beta_2(s)\xi_\gamma(t)-\xi_\gamma(t)|^2dsdt\rightarrow 0.$$
Noting that $\h{V}=\sigma(1\otimes U)V(1\otimes U)\sigma$, for $X\in VN(G\times G)$ we therefore have
\begin{align*}\langle\Phi_M(X),\omega_{\eta}\rangle&=\lim_\gamma\langle V(1\otimes U)X(1\otimes U)V^*\eta\otimes\xi_\gamma,\eta\otimes\xi_\gamma\rangle\\
&=\lim_\gamma\langle(1\otimes U)V(1\otimes U)X(1\otimes U)V^*(1\otimes U)\eta\otimes\xi_\gamma,\eta\otimes\xi_\gamma\rangle\\
&=\lim_\gamma\langle\sigma \h{V}\sigma X\sigma \h{V}^*\sigma\eta\otimes\xi_\gamma,\eta\otimes\xi_\gamma\rangle\\
&=\lim_\gamma\langle V^*XV\eta\otimes\xi_\gamma,\eta\otimes\xi_\gamma\rangle\\
&=\langle (\textnormal{id}\otimes M)V^*XV,\omega_\eta\rangle.\end{align*}
Since $\eta\inL^2(G)$ was arbitrary, by linearity we obtain
$$\Phi_M(X)=(\textnormal{id}\otimes M)(V^*XV), \ \ \ X\in VN(G\times G),$$
from which it follows that $\Phi_M\circ\Gamma=\textnormal{id}_{VN(G)}$. The $A(G)$-module property can be deduced from the proof of \cite[Theorem 5.5]{CN}, but we provide the details for the convenience of the reader.
For $X\in VN(G\times G)$ and $u\in A(G)$, we have
\begin{align*}\Phi_M(u\cdot X)&=\Phi_M((\textnormal{id}\otimes\textnormal{id}\otimes u)(V_{23}X_{12}V_{23}^*))\\
&=(\textnormal{id}\otimes M)(\textnormal{id}\otimes\textnormal{id}\otimes u)(V_{12}^*V_{23}X_{12}V_{23}^*V_{12})\\
&=(\textnormal{id}\otimes M)(\textnormal{id}\otimes\textnormal{id}\otimes u)(V_{13}V_{23}V_{12}^*X_{12}V_{12}V_{23}^*V_{13}^*) \ \ \ \ \textnormal{(by equation (\ref{e:pentagonal}))}\\
&=(\textnormal{id}\otimes u)(V(\textnormal{id}\otimes M\otimes\textnormal{id})(V_{23}V_{12}^*X_{12}V_{12}V_{23}^*)V^*). \end{align*}
Denoting by $\pi:\mc{T}(L^2(G))\twoheadrightarrow A(G)$ the canonical restriction map, and recalling that $M|_{VN(G)}$ is $A(G)$-invariant, for $\tau,\omega\in\mc{T}(L^2(G))$, we have
\begin{align*}\langle(\textnormal{id}\otimes M\otimes\textnormal{id})(V_{23}V_{12}^*X_{12}V_{12}V_{23}^*),\tau\otimes\omega\rangle&=\langle(M\otimes\textnormal{id})V((\tau\otimes\textnormal{id})(V^*XV)\ten1)V^*,\omega\rangle\\
&=\langle M,\pi(\omega)\cdot((\tau\otimes\textnormal{id})V^*XV)\rangle\\
&=\langle\omega,1\rangle\langle M,((\tau\otimes\textnormal{id})V^*XV)\rangle\\
&=\langle M\otimes\omega,(\tau\otimes\textnormal{id})(V^*XV)\otimes 1\rangle\\
&=\langle(\textnormal{id}\otimes M\otimes\textnormal{id})(V^*XV\otimes 1),\tau\otimes\omega\rangle\\
&=\langle\Phi_M(X)\otimes 1,\tau\otimes\omega\rangle.
\end{align*}
Since $\tau$ and $\omega$ in $\mc{T}(L^2(G))$ were arbitrary, it follows that
\begin{align*}\Phi_M(u \cdot X)&=(\textnormal{id}\otimes u)(V(\textnormal{id}\otimes M\otimes\textnormal{id})(V_{23}V_{12}^*A_{12}V_{12}V_{23}^*)V^*)\\
&=(\textnormal{id}\otimes u)(V(\Phi_M(X)\otimes 1)V^*)\\
&= u\cdot \Phi_M(X).\end{align*}
Our original claim is therefore established, and $\Theta(\varphi_\gamma)$ converges weak* in $\mc{CB}(VN(G\times G))$ to an $A(G\times G)$-module projection $\Theta$ from $VN(G\times G)$ onto $VN(G_\Delta)=\Gamma(VN(G))$. Then
$$\varphi_{\gamma}|_{G_\Delta}\cdot u-u\rightarrow 0$$
weakly for $u\in A(G_\Delta)$, and using the fact that $\Gamma(VN(G))=\{X\in VN(G\times G)\mid (\Gamma\otimes\textnormal{id})(X)=(\textnormal{id}\otimes\Gamma)(X)\}$ \cite[Theorem 6.5]{D4}, together with the essentiality $I(G_\Delta)=\langle I(G_\Delta)\cdot A(G\times G)\rangle$, we also have
$$\varphi_{\gamma}\cdot v\rightarrow 0$$
weakly for $v\in I(G_\Delta)$. Passing to convex combinations, and noting that $(\varphi_\gamma)\subseteq\mc{P}_1(G\times G)$, we obtain a contractive approximate indicator for $G_\Delta$ in $\mc{P}_1(G\times G)$.
\end{proof}
We conjecture that $A(G)$ is relatively $1$-biflat if and only if $G$ is QSIN, meaning $L^1(G)$ has a bounded approximate identity $(f_\alpha)$ satisfying
$$\norm{\beta_1(s)f_\alpha - f_\alpha}_{L^1(G)}\rightarrow0, \ \ \ s\in G.$$
By Theorem \ref{t:IA=relinj} and \cite[Corollary 3.2]{LP1}, for any locally compact group $G$ such that $VN(G)$ is 1-injective in $\mathbb{C}-\mathbf{mod}$, relative 1-biflatness of $A(G)$ implies that $G$ is amenable, and therefore QSIN by \cite[Theorem 3]{LR}. Hence, the conjecture is valid for all $G$ such that $VN(G)$ is an injective von Neumann algebra, in particular, for any type I or almost connected group (cf. \cite{Pat3}). We now establish the conjecture for totally disconnected groups.
\begin{prop}\label{p:totallydisconn} Let $G$ be a totally disconnected locally compact group. Then $A(G)$ is relatively 1-biflat if and only if $G$ is QSIN.\end{prop}
\begin{proof} Sufficiency follows from \cite[Theorem 2.4]{ARS}, so suppose that $A(G)$ is relatively 1-biflat. Proceeding as in the proof of Theorem \ref{t:biflat}, we obtain a net of states $(u_\gamma)$ in $A(G)$ satisfying
$$\norm{v\cdot u_\gamma-v(e)u_\gamma}_{A(G)}, \ \norm{s\lhd u_\gamma-u_\gamma}_{A(G)}\rightarrow0$$
for all $v\in A(G)$ and for all $s\in G$.
Now, let $\mathcal{H}$ be a neighbourhood basis of the identity consisting of compact open subgroups. By \cite[Lemme 4.13]{E} for each $H\in\mc{H}$ there exists a state $\varphi_H\in A(G)$ satisfying $\mathrm{supp}(\varphi_H)\subseteq H^2\subseteq H$ and
$$\norm{\varphi_H\cdot v-v(e)\varphi_H}_{A(G)}\rightarrow 0, \ \ \ v\in A(G).$$
For each $H\in\mc{H}$, a standard $3\varepsilon$-argument shows
$$\norm{s\lhd(\varphi_H\cdot u_\gamma)-\varphi_H\cdot u_\gamma}_{A(G)}\rightarrow0, \ \ \ s\in G.$$
Denoting the index set of $(u_\gamma)$ by $\mc{C}$, we form the product $\mc{I}:=\mc{H}\times\mc{C}^{\mc{H}}$. For each $\alpha=(H,(\gamma_H)_{H\in\mc{H}})\in\mc{I}$, letting $u_\alpha:=\varphi_H\cdot u_{\gamma(H)}$, we obtain a net of states in $A(G)$ satisfying the iterated convergence
$$\lim_{\alpha\in\mc{I}}\norm{s\lhd u_\alpha- u_\alpha}_{A(G)}=\lim_{H\in\mc{H}}\lim_{\gamma\in\mc{C}}\norm{s\lhd\varphi_H\cdot u_{\gamma}-\varphi_H\cdot u_{\gamma}}_{A(G)}=0$$
for all $s\in G$ by \cite[pg. 69]{Kelley}. Moreover, $\mathrm{supp}(u_\alpha)\rightarrow\{e\}$, in the sense that for every neighbourhood $U$ of the identity, there exists $\alpha_U$ such that $\mathrm{supp}(u_\alpha)\subseteq U$ for $\alpha\geq\alpha_U$.
Let ($\xi_\alpha$) be the unique representing vectors from $\mc{P}$ for the net $(u_\alpha)$. For each $\alpha=(H,(\gamma_H)_{H\in\mc{H}})\in\mc{I}$, $u_\alpha$ is supported in the open subgroup $H$, i.e., $u_\alpha\in A(H)\subseteq A(G)$. Under the canonical subspace inclusion $L^2(H)\hookrightarrow L^2(G)$ we have $\mc{P}_H=\overline{\{f\ast Jf\mid f\in C_c(H)\}}\subseteq \mc{P}_G$, so by uniqueness of representing vectors \cite[Lemma 2.10]{H}, we may assume $\mathrm{supp}(\xi_\alpha)\subseteq H$.
Applying Haagerup's Powers--St\o rmer inequality \cite[Lemma 2.10]{H} once again, we obtain
\begin{equation*}\norm{\omega_{\beta_2(s)\xi_\alpha}-\omega_{\xi_\alpha}}^2_{L^1(G)}\leq 4\norm{\beta_2(s)\xi_\alpha-\xi_\alpha}^2_{L^2(G)}\leq 4\norm{s\lhd u_\alpha-u_\alpha}_{A(G)}\rightarrow0, \ \ \ s\in G.\end{equation*}
Letting $f_\alpha:=|\xi_\alpha|^2$, we obtain a net of states in $L^1(G)$ satisfying
$$\norm{\beta_1(s)f_\alpha - f_\alpha}_{L^1(G)}=\norm{\omega_{\beta_2(s)\xi_\alpha}-\omega_{\xi_\alpha}}_{L^1(G)}\rightarrow0, \ \ \ s\in G,$$
and $\mathrm{supp}(f_\alpha)\rightarrow\{e\}$. Hence, $G$ is QSIN.
\end{proof}
For the semidirect product of an infinite compact group $K$ by a discrete group
$H$, we now show that relative $1$-biflatness of $A(K\rtimes H)$
entails that the unitary representation \[
\pi_{K}:H\rightarrow\mathcal{B}(L_0^2(K)):h\mapsto[\xi\mapsto h\cdot\xi]\]
weakly contains the trivial representation. Here, $L_0^2(K)=\{ \xi\in L^2(K):\int_{K}\xi=0\} $
and $h\cdot\xi(k)=\xi(h^{-1}kh)$
for $h\in H$ and $\xi\in L^2(K)$, where $h^{-1}kh$
is the product in $K\rtimes H$, i.e. the action of $h^{-1}$ on $k$.
If, moreover, the action of $H$ on $K$ is ergodic, we show that
the Haar integral on $K$ is not the unique $H$-invariant mean on
$L^{\infty}(K)$. \emph{Ergodicity} of the $H$-action
on $K$ is the assertion that if $E\subseteq K$ is Borel with $E\triangle h\cdot E$
null for all $h\in H$, then $E$ must be null or co-null, and is equivalent
to the non-existence of normal $H$-invariant means on $L^{\infty}(K)$
other than $1_{K}$.
\begin{prop}\label{p:semidirect}
Let $K\rtimes H$ be the semidirect product of an infinite compact group $K$
by a discrete group $H$. If $A(K\rtimes H)$ is relatively
$1$-biflat, then $\pi_{K}$ weakly contains the trivial representation.\end{prop}
\begin{proof}
Let $G$ denote $K\rtimes H$. As in the proof of Theorem \ref{t:biflat},
relative $1$-biflatness of $A(G)$ yields a net of states
$(\omega_{\xi_{\alpha}})$ in $A(G)$ with $\xi_{\alpha}\in\mathcal{P}_{G}$
satisfying\[
\Vert v\cdot\omega_{\xi_{\alpha}}-v(e)\omega_{\xi_{\alpha}}\Vert _{A(G)}, \ \Vert s\vartriangleleft\omega_{\xi_{\alpha}}-\omega_{\xi_{\alpha}}\Vert _{A(G)}\rightarrow0, \ \ \ v\in A(G), \ s\in G.\]
Arguing as in the proof of Proposition \ref{p:totallydisconn}, we may assume $\mbox{supp}(\omega_{\xi_{\alpha}})\rightarrow\{ e\} $
and, since $K$ is an open subgroup of $G$, we may identify $A(K)$
with a subspace of $A(G)$ and further assume that $\mbox{supp}(\xi_{\alpha})\subseteq K$.
Viewing $L^2(K)$ as a subspace of
$L^{2}(G)$ via extension by zero, we have $\beta_{2}^{G}(h)\xi=h\cdot\xi$ for $\xi\in L^2(K)$ and $h\in H$
by unimodularity of $G$, and, noting once again that $\beta_{2}^{G}(G)\mathcal{P}_{G}\subseteq\mathcal{P}_{G}$,
\cite[Lemma 2.10]{H} implies\[
\Vert h\cdot\xi_{\alpha}-\xi_{\alpha}\Vert _{L^2(K)}^{2}=\norm{ \beta_{2}^{G}(h)\xi_{\alpha}-\xi_{\alpha}} _{L^{2}(G)}^{2}\leq\norm{ \omega_{\beta_{2}^{G}(h)\xi_{\alpha}}-\omega_{\xi_{\alpha}}} _{A(G)}=\Vert h\vartriangleleft\omega_{\xi_{\alpha}}-\omega_{\xi_{\alpha}}\Vert _{A(G)}\rightarrow0\]
for all $h\in H$. Let $\xi_{\alpha}=\xi_{\alpha}^{0}+c_{\alpha}1_{K}$
correspond to the decomposition $L^2(K)=L_0^2(K)\oplus_{2}\mathbb{C}1_{K}$,
so that $1=\Vert \xi_{\alpha}^{0}\Vert _{L_0^{2}(K)}^{2}+|c_{\alpha}|^{2}$
and $\Vert h\cdot\xi_{\alpha}^{0}-\xi_{\alpha}^{0}\Vert _{L_0^{2}(K)}\rightarrow0$ for all $h\in H$.
Fix a neighbourhood $U$ of the identity in $K$ with $|K\setminus U|>0$.
If it were the case that $\Vert \xi_{\alpha}^{0}\Vert _{L_0^{2}(K)}\rightarrow0$,
then, for $\alpha$ large enough that $|c_{\alpha}|^{2}>\frac{1}{2}$
and $\mbox{supp}(\omega_{\xi_{\alpha}})\subseteq U$, we
have for $k\in K\setminus U$ that \[
0=\omega_{\xi_{\alpha}}(k)=\omega_{\xi_{\alpha}^{0}}(k)+\omega_{\xi_{\alpha}^{0},c_{\alpha}1_{K}}(k)+\omega_{c_{\alpha}1_{K},\xi_{\alpha}^{0}}(k)+|c_{\alpha}|^{2}=\omega_{\xi_{\alpha}^{0}}(k)+|c_{\alpha}|^{2}\]
because $\xi_{\alpha}^{0}\in L_0^2(K)$, whence\[
-\frac{1}{2}|K\setminus U|>\int_{K\setminus U}\omega_{\xi_{\alpha}^{0}}(k)dk=\langle \omega_{\xi_{\alpha}^{0}},\lambda_{K}(1_{K\setminus U})\rangle _{A(K),VN(K)}\rightarrow0,\]
a contradiction. Therefore, passing to a subnet if necessary, we may
assume $\Vert \xi_{\alpha}^{0}\Vert _{L_0^{2}(K)}$ is bounded away
from zero, in which case the vectors $\xi_{\alpha}^{0}$ may be normalized
while retaining the property that $\Vert h\cdot\xi^0_{\alpha}-\xi^0_{\alpha}\Vert _{L_0^{2}(K)}\rightarrow0$
for all $h\in H$. Thus $\pi_{K}$ weakly contains the trivial representation.\end{proof}
A locally compact group $G$ is said to have \emph{Kazhdan's
property (T)} if whenever a strongly continuous unitary representation
of $G$ weakly contains the trivial representation it must contain
the trivial representation.
\begin{cor}
Let $K\rtimes H$ be the semidirect product of an infinite compact group $K$
by a discrete group $H$ such that the action of $H$ on $K$ is ergodic.
If $A(K\rtimes H)$ is relatively $1$-biflat, then $H$
does not have Kazhdan's property (T).\end{cor}
\begin{proof}
If $H$ had Kazhdan's property (T), then $\pi_{K}$
would contain the trivial representation and we would obtain a nonzero
vector $\xi\in L_0^2(K)$ such that $h\cdot\xi=\xi$
for all $h\in H$, contradicting the ergodicity of the $H$-action
on $K$.
\end{proof}
This shows, for example, that if $K$ is an infinite
compact group with an ergodic action of $SL(n,\mathbb{Z})$
by automorphisms and $n\geq3$, then the Fourier algebra of $K\rtimes SL(n,\mathbb{Z})$
is not relatively 1-biflat.
The QSIN condition on a locally compact group $G$ is equivalent to
the existence of a conjugation invariant mean on $L^{\infty}(G)$
extending evaluation at the identity on $C_{0}(G)$. In
\cite{LR} it is established that for $n\geq2$ the group $\mathbb{T}^{n}\rtimes SL(n,\mathbb{Z})$
fails to be QSIN by appealing to the fact that the Haar integral on
$\mathbb{T}^{n}$ is the unique mean on $L^{\infty}(\mathbb{T}^{n})$
that is invariant under the $SL(n,\mathbb{Z})$-action.
Indeed, the restriction to $L^{\infty}(\mathbb{T}^{n})$
of any conjugation invariant mean on $L^{\infty}(\mathbb{T}^{n}\rtimes SL(n,\mathbb{Z}))$
is clearly invariant under the action of $SL(n,\mathbb{Z})$.
For semidirect products associated to ergodic actions as above, we
have the following.
\begin{cor}
Let $K\rtimes H$ be the semidirect product of an infinite compact group $K$
by a discrete group $H$ such that the action of $H$ on $K$ is ergodic.
If $A(K\rtimes H)$ is relatively $1$-biflat, then there
is an $H$-invariant mean on $L^{\infty}(K)$ distinct
from the Haar integral on $K$.\end{cor}
\begin{proof}
By \cite[Theorem 1.6]{FS}, $L^{\infty}(K)$ admits an
$H$-invariant mean distinct from the Haar measure when $\pi_{K}$,
considered as a representation on $L_{0}^{2}(K,\mathbb{R})$,
weakly contains the trivial representation. We may assure that the
almost invariant vectors for $\pi_{K}$ produced in Proposition \ref{p:semidirect}
are real valued by replacing the states $\omega_{\xi_{\alpha}}$ with
$\omega_{\xi_{\alpha}}\overline{\omega_{\xi_{\alpha}}}$, in which
case we have $\omega_{\xi_{\alpha}}\overline{\omega_{\xi_{\alpha}}}=\omega_{\xi_{\alpha}^{\prime}}$
for $\xi_{\alpha}^{\prime}\in\mathcal{P}_{G}$ that are then real-valued
by uniqueness.
\end{proof}
Since the $SL(2,\mathbb{Z})$-action on $\mathbb{T}^{2}$
is ergodic, this confirms that $A(\mathbb{T}^{2}\rtimes SL(2,\mathbb{Z}))$
fails to be relatively 1-biflat. More examples of groups $H$ and $K$ and conditions on these pairs for which there
is a unique $H$-invariant mean on $L^{\infty}(K)$ may be found in \cite{Bekka2} and \cite{FS}.
\section{Operator amenability of $A_{cb}(G)$}
For a locally compact group $G$, let $M_{cb}A(G)$ denote
the completely bounded multiplier algebra of $A(G)$ and
$A_{cb}(G)$ the norm closure of $A(G)$ in
$M_{cb}A(G)$. Given a closed subgroup $H$ of $G$, we
may consider approximate indicators for $H$ consisting of completely
bounded multipliers by replacing $B(G)$ with $M_{cb}A(G)$
in the definition of Section \ref{s:relbiflat}. The existence of an approximate indicator
for $G_{\Delta}$ in the larger algebra $M_{cb}A(G\times G)$
still yields relative biflatness of $A(G)$, the proof
of \cite[Proposition 2.3]{ARS} carrying over mutatis mutandis.
For the algebra $A_{cb}(G)$, the existence
of a bounded approximate identity is equivalent to weak amenability
of $G$ \cite{Fo} and it was suggested in \cite{FRS} that $A_{cb}(G)$
may be operator amenable exactly when $G$ is weakly amenable. In Theorem \ref{t:OA} we show that operator amenability of $A_{cb}(G)$ is fundamentally linked to the existence of bounded approximate indicators for $G_\Delta$, \textit{in addition} to weak amenability of $G$. As a corollary, we establish the suggested equivalence from \cite{FRS} for the class of QSIN groups. In general, however, Theorem \ref{t:OA} in combination with Corollary \ref{c:F_2}
yields a large class of counter-examples, as we shall see.
In what follows we let $\Lambda_{cb}(G)$ denote the Cowling--Haagerup constant of $G$, the infimum of bounds of approximate identities for $A_{cb}(G)$. Similarly, we let $AI_{B(G\times G)}(G_{\Delta})$ and $AI_{cb}(G_{\Delta})$ denote the infimum of bounds of approximate indicators for $G_\Delta$ in $B(G\times G)$ and $M_{cb}A(G\times G)$, respectively.
The tensor square of the completely isometric inclusion $A_{cb}(G)\hookrightarrowM_{cb}A(G)$ induces a canonical complete contraction (cf. \cite[Corollary 7.1.3]{ER})
$$A_{cb}(G)\htenA_{cb}(G)\rightarrowM_{cb}A(G)\htenM_{cb}A(G).$$
For simplicity, below we will use the same notation for an element of $A_{cb}(G)\htenA_{cb}(G)$ as well as its image in $M_{cb}A(G)\htenM_{cb}A(G)$.
\begin{lem}\label{l:hten} Let $G$ be a locally compact group. Then for $u\in A_{cb}(G)\widehat{\otimes} A_{cb}(G)$, we have
$$\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}\leq\norm{u}_{A_{cb}(G)\htenA_{cb}(G)}\leq\Lambda_{cb}(G)^2\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}.$$
\end{lem}
\begin{proof} The first inequality is immediate from above. If $G$ is not weakly amenable (i.e., $\Lambda_{cb}(G)=\infty$), the second inequality is trivially satisfied. Thus, for the remainder of the proof we assume that $G$ is weakly amenable, witnessed by an approximate identity $(u_\alpha)$ in $A(G)$ with $\norm{u_\alpha}_{A_{cb}(G)}\leq\Lambda_{cb}(G)$.
First, suppose that $u\inA_{cb}(G)\tenA_{cb}(G)$. Viewing $u\inM_{cb}A(G)\tenM_{cb}A(G)$ canonically, for any $\varepsilon>0$ there exist $p,q\in\mathbb{N}$, $\beta\in M_{1,p\times q}(\mathbb{C})$, $\gamma\in M_{p\times q,1}(\mathbb{C})$, $v\in M_p(M_{cb}A(G))$, $w\in M_q(M_{cb}A(G))$ such that $u=\beta(v\otimes w)\gamma$, and
$$\norm{\beta}\norm{v}_{M_p(M_{cb}A(G))}\norm{w}_{M_q(M_{cb}A(G))}\norm{\gamma}<\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}+\varepsilon.$$
Then for any $\alpha$,
$$\frac{1}{\norm{u_\alpha}_{A_{cb}(G)}^2}\norm{\beta}\norm{(u_\alpha)^p\cdot v}_{M_p(M_{cb}A(G))}\norm{(u_\alpha)^q\cdot w}_{M_q(M_{cb}A(G))}\norm{\gamma}
\leq\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}+\varepsilon.$$
But $(u_\alpha\otimes u_\alpha)\cdot u=\beta((u_\alpha)^p\cdot v\otimes (u_\alpha)^q\cdot w)\gamma\inA_{cb}(G)\tenA_{cb}(G)$, and since $A_{cb}(G)\hookrightarrowM_{cb}A(G)$ is a complete isometry, we have
\begin{align*}\norm{(u_\alpha\otimes u_\alpha)\cdot u}_{A_{cb}(G)\htenA_{cb}(G)} &\leq\norm{\beta}\norm{(u_\alpha)^p\cdot v}_{M_p(A_{cb}(G))}\norm{(u_\alpha)^q\cdot w}_{M_q(A_{cb}(G))}\norm{\gamma}\\
&=\norm{\beta}\norm{(u_\alpha)^p\cdot v}_{M_p(M_{cb}A(G))}\norm{(u_\alpha)^q\cdot w}_{M_q(M_{cb}A(G))}\norm{\gamma}.\end{align*}
Hence,
$$\frac{1}{\norm{u_\alpha}_{A_{cb}(G)}^2}\norm{(u_\alpha\otimes u_\alpha)\cdot u}_{A_{cb}(G)\htenA_{cb}(G)}<\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}+\varepsilon.$$
Since $\varepsilon>0$ was arbitrary, it follows that
$$\norm{(u_\alpha\otimes u_\alpha)\cdot u}_{A_{cb}(G)\htenA_{cb}(G)}<\norm{u_\alpha}_{A_{cb}(G)}^2\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}\leq\Lambda_{cb}(G)^2\norm{u}_{M_{cb}A(G)\htenM_{cb}A(G)}.$$
As $(u_\alpha\otimes u_\alpha)\cdot u\rightarrow u$, we obtain the desired inequality. The density of $A_{cb}(G)\tenA_{cb}(G)$ in $A_{cb}(G)\htenA_{cb}(G)$ then yields the claim.
\end{proof}
\begin{thm}\label{t:OA} Let $G$ be a locally compact group. Consider the following conditions:
\begin{enumerate}
\item $G$ is weakly amenable and $G_{\Delta}$ has a bounded approximate indicator in $B(G\times G)$;
\item $A_{cb}(G)$ is operator amenable;
\item $G$ is weakly amenable and $G_{\Delta}$ has a bounded approximate indicator in $A_{cb}(G\times G)$.
\end{enumerate}
Then $(1)\Rightarrow(2)\Rightarrow(3)$, and, moreover,
$$AI_{cb}(G_{\Delta})\leq OA(A_{cb}(G))\leq AI_{B(G\times G)}(G_{\Delta})\Lambda_{cb}(G)^4.$$
\end{thm}
\begin{proof} $(1)\Rightarrow(2)$: Let $(u_\alpha)$ be an approximate identity for $A(G)$ with $\norm{u_\alpha}_{A_{cb}(G)}\leq\Lambda_{cb}(G)$ and $(\varphi_\beta)$ be a bounded approximate indicator for $G_{\Delta}$ with $\norm{\varphi_\beta}_{B(G\times G)}\leq AI_{B(G\times G)}(G_{\Delta})$. As the predual of the universal group von Neumann algebra $W(G\times G)=W(G)\overline{\otimes} W(G)$, it follows from \cite[Theorem 7.2.4]{ER} that
$$B(G\times G)=W(G\times G)_*=(W(G)\overline{\otimes} W(G))_*=B(G)\widehat{\otimes} B(G).$$
The tensor square of the completely contractive inclusion $B(G)\rightarrowM_{cb}A(G)$ then yields a canonical complete contraction
$$B(G\times G)\rightarrowM_{cb}A(G)\htenM_{cb}A(G).$$
We may therefore view $\varphi_\beta$ inside $M_{cb}A(G)\htenM_{cb}A(G)$ with
$$\norm{\varphi_\beta}_{M_{cb}A(G)\htenM_{cb}A(G)}\leq AI_{B(G\times G)}(G_{\Delta}).$$
It follows from Lemma \ref{l:hten} that $\varphi_\beta\cdot(u_\alpha\otimes u_\alpha)\inA_{cb}(G)\htenA_{cb}(G)$ with
$$\norm{\varphi_\beta\cdot(u_\alpha\otimes u_\alpha)}_{A_{cb}(G)\htenA_{cb}(G)}\leq\Lambda_{cb}(G)^2\norm{\varphi_\beta\cdot(u_\alpha\otimes u_\alpha)}_{M_{cb}A(G)\htenM_{cb}A(G)}\leq AI_{B(G\times G)}(G_{\Delta})\Lambda_{cb}(G)^4.$$
Write $\Delta:A_{cb}(G)\widehat{\otimes}A_{cb}(G)\rightarrow A_{cb}(G)$
for the product map, $r:A_{cb}(G\times G)\rightarrow A_{cb}(G)$
for restriction to the diagonal $G_{\Delta}$ in $G\times G$, and
$\Lambda:A_{cb}(G)\widehat{\otimes}A_{cb}(G)\rightarrow A_{cb}(G\times G)$
for the complete contraction defined on elementary tensors by $\Lambda(u\otimes v)=u\times v$,
so that $\Delta=r\Lambda$. Then for $u\in A(G)$,
\begin{equation}\label{e:OA1}\lim_\alpha\lim_\beta\Delta(\varphi_\beta\cdot(u_\alpha\otimes u_\alpha))\cdot u=
\lim_\alpha\lim_\beta \varphi_\beta|_{G_{\Delta}}\cdot u_\alpha^2\cdot u=
\lim_\alpha u_\alpha^2\cdot u=u.\end{equation}
In addition, $uu_\alpha\otimes u_\alpha-u_\alpha\otimes u_\alpha u\in I(G_{\Delta})$ for every $\alpha$, so
$$\lim_\beta u\cdot(\varphi_\beta\cdot(u_\alpha\otimes u_\alpha))-(\varphi_\beta\cdot(u_\alpha\otimes u_\alpha))\cdot u
=\lim_\beta\varphi_\beta\cdot(uu_\alpha\otimes u_\alpha-u_\alpha\otimes uu_\alpha)=0.$$
Thus,
\begin{equation}\label{e:OA2}\lim_\alpha\lim_\beta u\cdot(\varphi_\beta\cdot(u_\alpha\otimes u_\alpha))-(\varphi_\beta\cdot(u_\alpha\otimes u_\alpha))\cdot u=0.\end{equation}
By density of $A(G)$ in $A_{cb}(G)$ and boundedness of $\varphi_\beta\cdot(u_\alpha\otimes u_\alpha)$ in $A_{cb}(G)\htenA_{cb}(G)$, the equations (\ref{e:OA1}) and (\ref{e:OA2}) are valid for all $u\inA_{cb}(G)$. Denoting the index sets of $(u_\alpha)$ and $(\varphi_\beta)$ by $A$ and $B$, respectively, we form the product $\mc{I}:=A\times B^{A}$ as in Proposition \ref{p:totallydisconn}, and for $i=(\alpha,(\beta_\alpha)_{\alpha\in A})\in\mc{I}$ we let $v_i:=\varphi_{\beta_{\alpha}}\cdot(u_\alpha\otimes u_\alpha)$. By \cite[pg. 69]{Kelley} and the above analysis, the resulting net $(v_i)$ in $A_{cb}(G)\htenA_{cb}(G)$ is a bounded approximate diagonal with $\norm{v_i}_{A_{cb}(G)\htenA_{cb}(G)}\leq AI_{B(G\times G)}(G_{\Delta})\Lambda_{cb}(G)^4$.
$(2)\Rightarrow(3)$: Let $(X_{\alpha})$ be an approximate
diagonal for $A_{cb}(G)$ of bound $OA(A_{cb}(G))$ and set $m_{\alpha}=\Lambda(X_{\alpha})$.
We show that the net $(m_{\alpha})$ is an approximate
indicator for $G_{\Delta}$. Let $u\in A(G)$ have compact
support and choose $v\in A(G)$ with $v\equiv1$ on $\mbox{supp}(u)$
\cite[Lemme 3.2]{E}, so that $u=uv$ and \[
\Vert ur(m_{\alpha})-u\Vert _{A(G)}=\Vert u\Delta(X_{\alpha})-u\Vert _{A(G)}\leq\Vert u\Vert _{A(G)}\Vert v\Delta(X_{\alpha})-v\Vert _{A_{cb}(G)}\rightarrow0.\]
As $A(G)$ is Tauberian and the net $(r(m_{\alpha}))$
is bounded in $\Vert \cdot\Vert _{A_{cb}(G)}$,
a routine estimate shows that the above holds for all $u\in A(G)$.
We claim that the elements of $I(G_{\Delta})$ of the form
$(a\times1_{G}-1_{G}\times a)v$ for $a\in A(G)$
and $v\in A(G\times G)$ have dense span. Recall that $A(G)$
is self-induced \cite{D4}, in particular \[
\ker\Delta_{A(G)}=\langle ab\otimes c-a\otimes bc:a,b,c\in A(G)\rangle ,\]
and that the map $a\otimes b\mapsto a\times b$ induces a completely
isometric isomorphism $A(G)\widehat{\otimes}A(G)\rightarrow A(G\times G)$
taking $\ker\Delta_{A(G)}$ onto $I(G_{\Delta})$,
from which it follows that \[
I(G_{\Delta})=\langle ab\times c-a\times bc:a,b,c\in A(G)\rangle .\]
Since $\{ a\times c:a,c\in A(G)\} $ has dense
span in $A(G\times G)$, \begin{eqnarray*}
I(G_{\Delta}) & = & \langle b\cdot(a\times c)-(a\times c)\cdot b:a,b,c\in A(G)\rangle \\
& = & \langle b\cdot v-v\cdot b:b\in A(G)\mbox{ and }v\in A(G\times G)\rangle \\
& = & \langle (b\times1_{G}-1_{G}\times b)v:b\in A(G)\mbox{ and }v\in A(G\times G)\rangle .\end{eqnarray*}
For such elements of $I(G_{\Delta})$,\begin{eqnarray*}
\Vert (b\times1_{G}-1_{G}\times b)vm_{\alpha}\Vert _{A(G\times G)} & \leq & \Vert v\Vert _{A(G\times G)}\Vert b\cdot m_{\alpha}-m_{\alpha}\cdot b\Vert _{A_{cb}(G\times G)}\\
& \leq & \Vert v\Vert _{A(G\times G)}\Vert b\cdot X_{\alpha}-X_{\alpha}\cdot b\Vert _{A_{cb}(G)\widehat{\otimes}A_{cb}(G)}\rightarrow0,\end{eqnarray*}
where the second inequality uses that $\Lambda$ is a contractive $A(G)$-bimodule
map. The density claim above and the boundedness of $(m_{\alpha})$ imply that $\Vert um_{\alpha}\Vert _{A(G\times G)}\rightarrow0$
for all $u\in I(G_{\Delta})$.
The chain of inequalities $AI_{cb}(G_{\Delta})\leq OA(A_{cb}(G))\leq AI_{B(G\times G)}(G_{\Delta})\Lambda_{cb}(G)^4$ is clear from the above arguments.\end{proof}
When the operator amenability constant of $A_{cb}(G)$ is 1, we obtain an equivalence in Theorem \ref{t:OA}.
\begin{cor} Let $G$ be a locally compact group. The algebra $A_{cb}(G)$ is operator amenable with $OA(A_{cb}(G))=1$ if and only if $G$ is weakly amenable with $\Lambda_{cb}(G)=1$ and $G_{\Delta}$ has a contractive approximate indicator in $B(G\times G)$.\end{cor}
\begin{proof} Sufficiency follows immediately from the proof $(1)\Rightarrow(2)$ in Theorem \ref{t:OA}. Conversely, if $OA(A_{cb}(G))=1$ then $G$ is necessarily weakly amenable with $\Lambda_{cb}(G)=1$, and $A(G)$ is relatively 1-biflat by the implication $(2)\Rightarrow(3)$ and the completely bounded multiplier analogue of \cite[Proposition 2.3]{ARS}. The converse then follows from Theorem \ref{t:biflat}.
\end{proof}
Since $G_{\Delta}$ has a contractive approximate indicator in $B(G\times G)$ for any QSIN group \cite[Theorem 2.4]{ARS}, we immediately obtain the following.
\begin{cor}\label{c:OA} Let $G$ be QSIN. Then $A_{cb}(G)$ is operator amenable if and only if $G$ is weakly amenable.\end{cor}
As discrete groups are QSIN, Corollary \ref{c:OA} subsumes the main result of \cite{FRS} which showed that $A_{cb}(G)$ is operator amenable for any weakly amenable discrete group whose $C^*$-algebra $C^*(G)$ is residually finite-dimensional.
Beyond the class of QSIN groups, there can be a distinction between weak amenability and the operator amenability of $A_{cb}(G)$.
\begin{cor}\label{c:Acbopamen}
Let $G$ be a locally compact group containing $\mathbb{F}_{2}$ as
a closed subgroup and for which $VN(G)$ is $1$-injective
in $\mathbb{C}-\mathbf{mod}$. Then $A_{cb}(G)$ is not
operator amenable.\end{cor}
\begin{proof}
If $A_{cb}(G)$ were operator amenable then by Theorem \ref{t:OA} an approximate
indicator for $G_{\Delta}$ would exist, implying that $VN(G)$
is relatively $C$-injective in $A(G)-\mathbf{mod}$ for some $C\geq 1$ by
the completely bounded multiplier analogue of \cite[Proposition 2.3]{ARS}, in contradiction
to Corollary \ref{c:F_2}.
\end{proof}
Any weakly amenable, non-amenable, almost connected group $G$ satisfies
the hypotheses of Corollary \ref{c:Acbopamen} by \cite{Pat3} and \cite[Theorem 5.5]{Rick}. For example, $SL(2,\mathbb{R})$, $SL(2,\mathbb{C})$, and $SO(1,n)$, $n\geq 2$. Since weak amenability is preserved under compact extensions \cite[Proposition 1.3]{CH}
and almost connected groups have injective group
von Neumann algebras, if $K$ is any compact group
with an action of $G$ by automorphisms, then $K\rtimes G$ is weakly
amenable and $A_{cb}(K\rtimes G)$ fails to be operator
amenable.
\section*{Acknowledgements}
This work contains results from the doctoral thesis of the first author, who would like to thank Matthias Neufang for helpful discussions, and was partially supported by an NSERC Canada Graduate Scholarship. A portion of this project was completed at the Fields Institute during the Thematic Program on Abstract Harmonic Analysis, Banach and Operator Algebras in 2014, as well as the retrospective meeting in 2015. We are grateful to the Institute for its kind hospitality.
|
1,314,259,995,050 | arxiv | \section{Introduction}
Nowadays, personal computers and laptops are often equip\-ped with
multicore architectures,
as well as with more and more powerful graphic cards.
The latter ones can be easily programmable for a general purpose
computing usage (Nvidia Cuda, Ati Stream, OpenCL).
Graphic processors offer nowadays quite frequently
superior performance on a same budget as their CPU counterparts.
However, programmers can also efficiently use many-core CPUs for
parallelization e.g. with the OpenMP standard.
On the numerical side, several libraries automatically tune the sparse
matrix kernels \cite{Vuduc05oski,VuducM:05,Tvrdik:06} and recently
some kernels
have been proposed e.g. for GPU's
\cite{ellr,buluc09,Bell:SpMV:SC:2009}.
In this paper we want to adapt those techniques for exact
computations and we first mostly focused on $\Zb/m\Zb$ rings, with $m$
smaller that a machine word.
The first idea is to use the numerical methods in an exact way as has
been done for dense matrix operations \cite{flas}. For sparse matrices,
however, the extraction of sparse matrices is slightly
different. Also, over small fields some more dedicated optimizations
(such as a separate format for ones and minus ones) can be useful.
Finally, we want to be able to use both multi-cores and GPU's at the
same time and the best format for a given matrix depends on the
underlying architecture.
Therefore, we propose an architecture with hybrid data formats,
user-specified or heuristically discovered dynamically.
The idea is that a given matrix will have different parts in different
formats adapted to its data or the resources. Also we present a
``just-in-time'' technique that allows to compile on the fly some
parts of the matrix vector product directly with the values of the
matrix.
We have efficiently
implemented\footnote{\url{https://ljkforge.imag.fr/projects/ffspmvgpu/}}
``\texttt{Sp}arse \texttt{M}atrix-\texttt{V}ector multiplication''
({\sc SpMV}\xspace{}) on finite rings, together with the transpose product and
iterative process to compute the power of a matrix times a vector, or
a sequence of matrix products.
We also make use of this library to improve the efficiency of the
block Wiedemann algorithm's of the
{\sc LinBox}\xspace\footnote{\url{http://linalg.org}} library.
Indeed, this kind of algorithm uses block ``black
box''~\cite{Kaltofen:1994:FHP} techniques:
the core operation is a matrix-vector multiplication and the matrix
is never modified. We use the new matrix-vector multiplication
library, together with a new parallel version of the sigma-basis
algorithm, used to compute minimal polynomials
\cite{Giorgi:2003:issac,jgd:2007:pasco}.
In section~\ref{sec:spmv} we present different approaches to the
parallelization of the {\sc SpMV}\xspace operation, with the adaptation of
numerical libraries (section \ref{ssec:num}), new formats adapted to
small finite rings (section \ref{ssec:newf}) together with our new hybrid
strategy and their iterative versions (section \ref{ssec:block}).
Then in section~\ref{sec:rank} we propose a new parallelization of
the block Wiedemann rank algorithm in {\sc LinBox}\xspace, via the parallelization
of the matrix-sequence generation (section \ref{ssec:parseq}) and the
parallelization of the matrix minimal polynomial computation (section
\ref{ssec:parSB}).
\section{Sparse-Vector Matrix multiplication}\label{sec:spmv}
\input{part1}
\section{Parallel block Wiedemann algorithm}\label{sec:rank}
\input{part2}
\section{Conclusion}
We have proposed a new {\sc SpMV}\xspace library providing good results on
$\Zb/m\Zb$ rings.
To attain this efficiency it has been mandatory to augment the
complexity of the {\sc SpMV}\xspace algorithms, since OpenMP, Cuda et al. all
manage differently the parallelization. Nonetheless, we provide new hybrid formats that improve the performance.
Moreover we have also specialized it to the computation of a sequence of
matrix-vector products together with a new parallelization of the
sigma-basis algorithm in order to enhance e.g. rank computations of
very large sparse matrices.
As seen in \ref{ssec:sigmabase}, a first parallelization of the
$\sigma$-basis~ computation has been achieved. Its efficiency is not matching
the expected scalability and lot of work needs to be done to circumvent this problem. First, a deeper study on the parallelization of
$\sigma$-basis~ computation has to be done.
Beside the parallelization of {\sf PM-Basis} and {\sf M-Basis}
algorithms themselves, we need to design new algorithms to avoid the
numerous task dependencies,
inherent to the existing methods.
This will also enable an easier parallelization of early termination
strategies (requiring to interleave the generation sequence and the
$\sigma$-basis~ computation).
Another important task is to extend the sigma-basis algorithm to work
on polynomial matrices over extension fields.
Indeed the use of random projections $U$ and $V$ over extension fields might improve the probabilities to get the full
minimal polynomial of the matrix \cite{Kaltofen:1995:ACB,Gilles:study,Chen01efficientmatrix}.
As shown in this paper and in \cite{jgd:2007:pasco}, $\sigma$-basis~ needs only a polynomial matrix multiplication implementation to work.
In order to adapt current LinBox's implementation to extension field, we will use the same technique as \cite{flas}: first use Kronecker substitution to transform the extension field polynomial representation to an integer representation ; then use a Chinese remaindered version of the polynomial matrix multiplication to recover the resulting matrix polynomial over Z ; and finally convert back the integers using e.g. the REDQ inverse operation of \cite{jgd:2008:issac}.
The {\sc SpMV}\xspace implementation also needs further work and other directions
to be explored.
For instance, we need to have dedicated implementations in $\Zb/2\Zb$ where $\xb$ and $\yb$ can be compressed.
More formats, including dense submatrices, have yet to be explored, which is linked to spending some more time on pre-processing the matrix: for instance the use of Metis\footnote{\url{http://glaros.dtc.umn.edu/gkhome/metis/metis/overview}} for partitioning and reordering $A$ would also improve the performance.
It will be interesting to deal with matrices such that $A$ and $\transpo{A}$ cannot be simultaneously stored~(\cite{buluc09}).
This problem indeed occurs on GPU's where on-chip memory is very limited.
Finally, we will also provide multi-GPU and hybrid GPU/CPU implementations
\bibliographystyle{abbrv}
\subsection{Sparse Matrix Formats and Multiplication}\label{ssec:mul}
Sparse matrices arise form various domains and their shapes can
be very specific.
Taking into consideration the structure of a sparse matrix can dramatically improve the performance of {\sc SpMV}\xspace{}.
However, there is no general storage format that is efficient for all kind of sparse matrices.
Among the most important storage formats is the \texttt{COO}\xspace (coordinate) format which stores triples.
It consists of three vectors of size \texttt{nbnz}\xspace, named \texttt{data}\xspace, \texttt{colid}\xspace and \texttt{rowid}\xspace, such that \texttt{data[k]=} $A$ \texttt{[rowid[k],colid[k]]}.
The \texttt{CSR}\xspace (compressed storage row) stores more efficiently the
previous representation: the \texttt{rowid}\xspace field is replaced by a $(\texttt{row}\xspace+1)$ long \texttt{start}\xspace vector such that if \texttt{start[$i$] $\leq k <$ start[$i+1$]}, then \texttt{data[$k$]$ = A $[$i$,colid[$k$]]}.
In other words, \texttt{start}\xspace indicates where a line starts and ends in the other two ordered fields.
The \ell (ELLpack) format stores data in a denser way: it has \texttt{data}\xspace and \texttt{colid}\xspace fields such that \texttt{data[$i,j_0$]$=A$[$i,$colid[$i,j_0$]]}, where $j_0$ varies between $0$ and the maximum number of non zero elements on a line of $A$.
One notices that these fields can be stored in row-major or column-major order.
A variant is the \texttt{ELL\_R}\xspace format that adds a $\texttt{row}\xspace$ long \texttt{rownb}\xspace vector that indicates how many non zero entries there are per line.
The \texttt{DIA}\xspace (DIAgonal) is used to store matrices with non zero elements grouped along diagonals.
It stores these diagonals in an array along with offsets where they start.
We refer to~\cite{Bell:SpMV:SC:2009},\cite{ellr} for more details on these formats.
This very schematic description of a few well-known formats shows that each of them has pros and cons.
Our aim is to produce a more efficient implementation of the {\sc SpMV}\xspace operation on finite fields than the one present in {\sc LinBox}\xspace, taking first advantage of this variety of formats.
\subsection{Finite field representation}\label{ssec:ff}
We present now how the data is stored.
We use data types such as \texttt{float}, \texttt{int}.
Firstly, when doing modular linear algebra, we try to minimize the number of costly $fmod$ (reduction) operation calls.
For instance, we prefer if possible the left loop to the right one in the next figure:
\begin{multicols}{2}
\begin{verbatim}
for (i=0 ; i<n ; ++i){
y += a[i] * b[i] ;
}
y = fmod(y,m);
\end{verbatim}
\begin{verbatim}
for (i=0 ; i<n ; ++i){
y += a[i] * b[i] ;
y = fmod(y,m);
}
\end{verbatim
\end{multicols}
In this case, suppose $y=0$ and $a[i]$, $b[i]$ are reduced modulo $m$ at first, and $M$ is the largest representable integer.
Say that on $\Zb/m\Zb$, we represent the ring on $\oc0, m-1\right\rrbracket$.
Then we can do at most $M/m^2$ such accumulations before reducing.
We can also represent the ring on $\left\llbracket-\left\lfloor\frac{m-1}{2}\right\rfloor,\left\lceil\frac{m-1}{2}\right\rceil\right\rrbracket$.
The latter representation enables us to perform twice more operations
before a reduction, but this reduction is slightly more expensive.
Another trade-off consists in choosing a \texttt{float} representation
instead of \texttt{double} (on the architectures that support
\texttt{double}).
Indeed, operations can be much faster on
\texttt{float} that on \texttt{double} but the \texttt{double}
representation lets us do more operations before reduction.
This is
particularly true on some GPU's.
\begin{figure}[H]
\begin{center}
\includegraphics[width=\textwidth]{flt-dbl}
\end{center}
\vspace{-1em}
\caption{\texttt{float}--\texttt{double} trade-off for different sizes of $m$, on the CPU and GPU}
\label{fig:dbl}
\end{figure}
In figure~\ref{fig:dbl}, we present variations in efficiency due to the storage data type and the size of $m$ on one core of a 3.2GHz Intel Xeon CPU and a Nvidia GTX280 GPU.
The timings correspond to the average of 50 {\sc SpMV}\xspace operations, where $\xb$ and $\yb$ are randomly generated on the CPU (which also takes into account every data transfer between the CPU and GPU).
The measures corresponds to the number of million floating point operations per seconds (flops); a {\sc SpMV}\xspace operation requires $2*\texttt{nbnz}\xspace$ such operations.
The performance correspond to the best ones achieved on the
matrices\footnote{matrices available
at~\url{http://www-ljk.imag.fr/membres/Jean-Guillaume.Dumas/simc.html}} presented in table~\ref{tab:mats}.
\begin{table}[H]
\centering
{\small
\newcolumntype{L}{>{\!\small}l<{\!}}
\newcolumntype{C}{>{\!\small}c<{\!}}
\begin{tabular}{C|LLLLL}
name & mat1916 & bibd\_81\_3 & EX5 & GL7d15 & mpolyout2 \\
\hline
$\texttt{row}\xspace$& 1916 & 3240 & 6545 & 460261 & 2410560\\
$\texttt{col}\xspace$& 1916 & 85320 & 6545 & 171375 & 2086560 \\
\texttt{nbnz}\xspace &195985 & 255960 & 295680&
6080381 &15707520\\
rank& 1916 & 3240 & 4740 & 132043 & 1352011\\
\end{tabular}
}
\caption{Matrices overview}
\label{tab:mats}
\end{table}
For further information about these techniques on these rings and fast arithmetic in
Galois extensions, see e.g.~\cite{flas}.
\subsection{Adapting numerical libraries}\label{ssec:num}
Another speed-up consists in using existing numerical libraries.
The ideas behind using them on the rings
$\Zb/m\Zb$, is twofold.
Firstly, we delay the modular reduction, secondly we can use highly optimized popular libraries and get instant speed-ups as compared to more naive self-written routines.
Just like BLAS libraries can be used to speed up modular linear algebra~\cite{Fflaflas}, we can use numerical libraries for our purposes, or get inspiration for our algorithms from their techniques.
For instance, there is the {\sc Oski}\xspace library~\cite{Vuduc05oski} for sequential numerical {\sc SpMV}\xspace{}, or the GPU implementation of {\sc SpMV}\xspace{} by Nathan Bell \emph{et al.} in~\cite{Bell:SpMV:SC:2009}.
The BLAS specifications include sparse BLAS\footnote{\url{www.netlib.org/blas/blast-forum/chapter3.pdf}} but they are seldom fully implemented on free BLAS implementations.
Unfortunately, they usually cannot be used as-is.
We need to extract submatrices from the sparse matrices, which
is more complicated than for its dense counterpart when the use of strides and dimensions suffices.
For instance, if one can do $b$ accumulations on $\yb[i]$ before reducing and the line $i$ of $A$ has $r_i$ non zero elements.
Then we want to split this line between $\left\lceil r_i/b\right\rceil$ matrices.
Finally, we can use the numerical libraries on these submatrices we have created.
The general algorithm reads like follows:
\begin{figure}[H]
\begin{verbatim}
spmv(y,A,x){
foreach submatrix Ai in A do{
spmv_num(y,Ai,x);
reduce(y,m);
}
}
\end{verbatim}
\vspace{-1em}
\caption{Using numerical routines}
\label{fig:num}
\end{figure}
\subsection{New formats}\label{ssec:newf}
Most of the formats implemented show a row-level parallelism, except \texttt{COO}\xspace that has element-wise parallelism.
The \texttt{COO}\xspace case is not obvious to implement and generally much slower.
The parallel efficiency of other formats will depend then on the length of the rows as well as the data regularity.
Unbalanced rows on a GPU architecture will produce many idle threads.
Two solutions exist: the vector approach of Bell (they split the rows into shorter chunks) or the rearranging of rows with permutations to sort the row according to their length.
The latter will not work in e.g. a power distribution of the row lengths.
The \ell format answers very well this problem because each row has the same length.
An other way to parallelize the {\sc SpMV}\xspace operation is to split the matrix $A$ along rows to get smaller submatrices and treat them in parallel.
We took this approach on the CPU \texttt{COO}\xspace algorithm.
Also, we are dealing with large matrices, used many times as black-boxes.
Therefore there is a trade-off between the time spent on optimizing
the matrix and how much faster these optimizations will make {\sc SpMV}\xspace run.
Things to consider during preprocessing may include for instance:
reordering row-columns to create denser parts, choosing best-fitting
formats, cutting the matrix into efficient sub-matrices~(\cite{VuducM:05},\cite{Tvrdik:06})\dots
The preprocessing approach is taken by {\sc Oski}\xspace: if the expected number of {\sc SpMV}\xspace
is very high, optimizing the matrix deeper will prove efficient.
\subsubsection{Base case: JIT}
One idea to improve {\sc SpMV}\xspace on a given matrix is to hard code this
operation in a static library.
We read the matrix file and create a library that will apply this matrix to input vectors.
For instance the $\yb \gets \yb + A \xb$ operation on the matrix $
\begin{pmatrix}
2 & 1 \\
0 & 3
\end{pmatrix}$ would be translated to (if $m=27$)
\begin{verbatim}
void spmv(float * y, const float * x) {
y[0] += 2*x[0] ;
y[0] += x[1] ;
y[0] = fmod(y[0],27);
y[1] += 3*x[1] ;
y[1] = fmod(y[1],27);
}
\end{verbatim}
Then we compile this generated file a as static library and use \texttt{dlopen} to access its functions.
As we can see in this example, one can implement various optimizations: rearranging the rows so that the work is more even (non implemented yet), replacing the occurrences of $\pm 1$ in the matrix by less costly additions or subtractions.
We have better control on what the compiler will produce.
However, large matrices take extremely long to compile.
\texttt{gcc} cannot compile the library if the source holds in one huge file, so we divide the matrix into parts of 1000 non zero elements and compile them.
Only then for instance, we could compile \texttt{bibd\_81\_3} but it takes 63s on the same Xeon machine.
Once it is compiled, the CPU version runs at 620 Mflops, which is
reasonably fast.
\subsubsection{Taking into account the $\pm1$}
The example of JIT and the observation that many matrices arising from different applications have a lot of $\pm 1_{F}$ tends to draw our attention to this special case.
Moreover, many matrices on a small fields also share this property.
Thus we can extract two submatrices corresponding to the $1$ and $-1$ from the rest of the matrix and
replace multiplications by usually less expensive additions.
Besides, the \texttt{data} field in most formats (except \ell, \texttt{DIA}\xspace) can be forgotten as we
know they only consist of $1$ or $-1$: this reduces the memory usage.
Doing only additions as opposed to \texttt{axpy} also hugely delays reduction.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.7\textwidth]{pm1}
\end{center}
\vspace{-1em}
\caption{Speed improvement on one 3.2GHz Intel Xeon CPU and a Nvidia GTX280 GPU when segregating or not the $\pm1$}
\label{fig:pm1}
\end{figure}
Figure~\ref{fig:pm1} shows a maximum 20\% improvement on a matrix with only 1s and 15\% on matrix with 50\% of $\pm1$.
\subsubsection{Basic Formats}
As evoked earlier, the matrix $A$ can be split into smaller submatrices.
These submatrices can have a format adapted to them and/or can be treated differently.
For instance, we can split row-wise and distribute these matrices for parallelism,
or split them column-wise as in the delaying case (figure~\ref{fig:num}.
This makes (possibly) many matrices that we each want to optimize individually
so we get better overall performance.
We start with some observations.
The \texttt{COO}\xspace format is slow due to the many
\texttt{fmod} calls, it is best used when the matrix is extremely
sparse.
The \texttt{CSR}\xspace format is denser and can let delayed reduction occur,
but one has to ensure the row lengths are well balanced
when parallelizing.
The \ell formats are very efficient on matrices that have roughly the same number of non zeros per line.
The \texttt{ELL\_R}\xspace format~(\cite{ellr}) is better for uneven rows lengths.
One difference in the CPU and GPU architecture makes the \ell row-major on the CPU (for better cache use) and column-major on the GPU (for better coalescing).
The following figure shows on one example (\texttt{bibd\_81\_3}) the variation of efficiency.
The data is normalized so that \texttt{CSR}\xspace is 1 on the CPU or GPU.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.8\textwidth]{comp}
\end{center}
\vspace{-1em}
\caption{Speed-ups for various formats on matrix
\texttt{bibd\_81\_3} both on one 3.2GHz Intel Xeon CPU and a Nvidia GTX280 GPU; reference
is \texttt{CSR}\xspace on each architecture}
\end{figure}
\subsubsection{Hybridization}
The previous remarks lead us to combine these formats to take advantage of them.
A hybrid format such as \texttt{ELL(R)+COO} or \texttt{ELL(R)+CRS} leads to good performance on the GPU.
When the \ell part is taken out of a matrix, many rows can be
left empty.
Then, we use a format called \texttt{COO\_S}\xspace that is a
\texttt{CSR}\xspace format with pointers only to the non empty rows.
It has \texttt{data}\xspace, \texttt{colid}\xspace same as in \texttt{CSR}\xspace and \texttt{COO}\xspace.
The number $\texttt{rowid}\xspace[k]$ corresponds to the $k$\textsuperscript{th} non empty row that starts in \texttt{data}\xspace and \texttt{colid}\xspace at $\texttt{start}\xspace[k]$.
This format could be avoided if we used row permutations and ordered the lines according to their weight.
\subsubsection{Heuristic format chooser}
The previous remarks show a great complexity in the formats and the cutting of the matrix.
We have implemented a user-helped heuristic format chooser.
For instance, the user can indicate if she wants to try and make use of $\pm1$.
If so, for each submatrix, the program tries to find an \emph{a priori} efficient format for them or if it fails, does not separate the $1$ or the $-1$ from the rest.
She can also indicate what is the format she wants to fill in priority.
The hybridization of the matrix is usually done as follows.
If the matrix is large enough and most of the lines are filled, it will try to fit a part of the matrix in an \ell or \texttt{ELL\_R}\xspace format.
This choice is supported by the observation that many matrices have a
$c+r$ row distribution where $c$ is some constant and $r \in \Zb$
varies and the fact that \ell is generally much faster that other formats
for matrices with even row weight.
The rest of the matrix will be put in a \texttt{CSR}\xspace, \texttt{COO}\xspace or \texttt{COO\_S}\xspace format, according to the number of empty lines and the number of residual non zero elements.
Parameters that decide when segregating the $1$s, that choose the best
length for \ell matrix, etc., vary according to the architecture of
the computer and need some specific tuning. This tuning is not yet
provided at compile time but some of it could be automatically performed at
install time.
Experiments show that this heuristic often gives equal or better results that simple formats on the CPU and the GPU.
\subsection{Block and iterative versions}\label{ssec:block}
\subsubsection{Using multi-vectors}
We have described the {\sc SpMV}\xspace operation $\yb\gets A\xb$ where $\xb$ and $\yb$ are vectors.
We also need $\xb$ and $\yb$ to be multi-vectors, for they may be used
for block algorithms.
There are at least two ways
of representing them : row or column-major order.
In the row-major order, we can use the standard {\sc SpMV}\xspace many times (and align the vectors).
In the column-major order, we can write dedicated versions that try and make use of the cache.
Indeed, in this case, we traverse the matrix only once and $\xb$ and
$\yb$ are read/written contiguously.
\begin{figure}[H]
\begin{center}
\includegraphics[width=\textwidth]{mult}
\end{center}
\vspace{-1em}
\caption{Matrix-multivector multiplication speed on one
3.2GHz Intel Xeon CPU (left) and a Nvidia GTX280 GPU
(right) for column-major multi-vectors, with $1,4,8$ and
$16$ vectors.}
\label{fig:mv}
\end{figure}
On figure~\ref{fig:mv}, we note that on the CPU, using column-major
multivectors is a non negligible gain of speed.
On the contrary, the GPU implementation fails to sustain good
efficiency for blocks of more than 8 vectors and some large matrices start to
reach the memory limit.
\subsubsection{Performance issues}
The GPU operation on a single {\sc SpMV}\xspace call from the host point of view is very slow because we need to move the vectors between the host and the device.
It is therefore only usable on operations that need no data moving between the host and the device.
Examples include the computation $y\leftarrow~A^n~x$
or the computation of the sequence
$\left\{A^i x\right\}_{i\in \oc0,m\right\rrbracket}$ that are used in many of the
black box methods.
On figure~\ref{fig:pow}, we illustrate this differences, mostly
reusing or not the data on the GPU, by comparing the performance of the following two pseudo-codes:
\begin{verbatim}
void smpv_n(y,A,x,n){
y_d = copy_on_gpu(y);
x_d = copy_on_gpu(x);
A_d = copy_on_gpu(A);
for (i=0 ; i<n ;++i) {
y_d = A_d * x_d ; // spmv on GPU
x_d = y_d; // copy
}
}
\end{verbatim}
\begin{verbatim}
void n_spmv(y,A,x,n){
A_d = copy_on_gpu(A);
for (i=0 ; i<n ;++i) {
y_d = copy_on_gpu(y_i);
x_d = copy_on_gpu(x_i);
y_d = A_d * x_d ; // spmv on GPU
}
}
\end{verbatim}
We confirm on figure~\ref{fig:pow} that it is highly desirable to
not move data on the GPU when avoidable.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.7\textwidth]{pow}
\end{center}
\vspace{-1em}
\caption{Nvidia GTX280 GPU speed up of $y \gets A^n x$ compared to $n$ times $y \gets A x$, with $n=5,10,20$}
\label{fig:pow}
\end{figure}
\subsection{Parallelization of the matrix sequence
generation}\label{ssec:parseq}
The parallelization proposed in \cite{jgd:2007:pasco} was to
ship independent set of vector blocks of $V$ to different cores and
apply them in parallel. Then gather the results to compute the dense
dot products by $U^T$.
An alternative is to use the {\sc SpMV}\xspace{} library and let it take care of the
iteration with the algorithm of the preceding section.
In figure \ref{fig:seq} we compare both approaches:
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.7\textwidth]{seq}
\end{center}
\vspace{-1em}
\caption{Speed up from the new {\sc SpMV}\xspace{} library compared to the native
{\sc LinBox}\xspace implementation in the generation of the matrix sequence
($2n$ iterations) on one core of a 2.33GHz Intel Xeon E5345 CPU}
\label{fig:seq}
\end{figure}
\subsection{Parallelization of the $\sigma$-basis~ computation}\label{ssec:parSB}
One can efficiently compute $\sigma$-basis~ using the algorithm {\sf PM-Basis} of \cite{Giorgi:2003:issac}.
This algorithm mainly reduces to polynomial matrix
multiplication. Therefore a first parallelization approach
is to parallelize the polynomial multiplication.
\subsubsection{Parallel polynomial matrix multiplication}
Let $A,B \in \F^{n \times n}[x]$ be two polynomial matrices of degree
$d$. One can multiply $A$ and $B$ in $O(n^3d+n^2d\log d)$ operations in
\F\, assuming \F\, has a $d$-th primitive root of unity
\cite{Cantor:1991:Kaltofen}.
Assuming one has $k$ processors such that $k\leq n^2$, one can perform this multiplication with a parallel complexity of $O(\frac{n^3d}{k}+\frac{n^2d\log d}{k})$ operation in \F. Let us now see the sequential fast polynomial matrix multiplication algorithm and how it achieves such a parallel complexity:\\
\noindent {\it Fast Polynomial Matrix Multiplication:}\\
{\bf Inputs:} $A,B \in \F^{n \times n}[x]$ of degree $d$, $\omega$ a $d$-th primitive root of unity inf \F.\\
{\bf Outputs:} $A\times B$\\
\hspace*{.4cm}1. $\bar{A} := DFT(A,[1,\omega,\omega^2,...,\omega^{2d}])$\\
\hspace*{.4cm}2. $\bar{B} := DFT(B,[1,\omega,\omega^2,...,\omega^{2d}])$\\
\hspace*{.4cm}3. $\bar{C}:= \bar{A}\otimes \bar{B}$\\
\hspace*{.4cm}4. $C:=\frac{1}{2d} DFT(\bar{C},[1,\omega^{-1},\omega^{-2},...,\omega^{-2d}])$\\
\hspace*{.4cm}{\bf return} C.\\
Here, $DFT(P,L)$ means the multi-points evaluation of the polynomial $P$ on each points of $L$, while $\otimes$ means the point-wise product.
\begin{itemize}
\item step 1,2 and 4 can be accomplished by using Fast Fourier Transform on each matrix entries which gives $n^2 \times O(d\log d)$ operations (see \cite[Theorem 8.15]{VonzurGathen:1999:MCA}). This clearly can be distributed on $k$ processors such that each processor achieves in parallel the FFT on $\frac{n^2}{k}+O(1)$ matrix entries. This gives a parallel complexity of $O(\frac{n^2d\log d}{k})$ operations in \F.
\item step 3 requires the computation of $2d$ independent matrix multiplications of dimension $n$, which gives $O(n^3d)$ operations in \F. One can easily see how to distribute this work on $k$ processors such that each processor has a workload of $O(\frac{n^3d}{k})$ operations. \\
\end{itemize}
\begin{figure}[ht]
\includegraphics[width=0.5\textwidth]{omp-multiplication-1}
\includegraphics[width=0.5\textwidth]{omp-multiplication-2}
\caption{Scalability of parallel polynomial matrix multiplication with LinBox and OpenMP on a 16 core machine (based on Quad-Core AMD Opteron). n is the matrix dimension.}\label{fig:multiplication}
\end{figure}
We report in figure \ref{fig:multiplication} the performance of the implementation of this parallel algorithm in the LinBox\footnote{www.linalg.org} library.
Our choice of using this parallel algorithm rather than another one, achieving a possible better parallel complexity, has been driven
by the re-usability of efficient sequential components of the library (e.g. matrix multiplication) and the ease of use within the library itself
(i.e. mostly the same code as sequential one, only some OpenMP pragmas
have been added). \\
One can see on figure \ref{fig:multiplication} that our coder does not completely match the theoretical parallel speedup.
The best we can achieved with 16 processors is a speedup of 5.5, which is only one third of the theoretical optimality.
Nevertheless, one can see that with less processors (e.g. less than 4) the speedup factor is closer to 75\% of the optimality, which is quite fair.
We think this phenomenon can be explained by the underlying many multi-core architecture (Quad-Core AMD Opteron), which may clearly suffers from cache
effect if computation are done on same chip or not.
As expected, we can also point out from figure \ref{fig:multiplication} that our implementation benefits at most from parallelism when matrices are larger.
Since workload on each core is more important, this allows to hide the penalty from memory operations and threads management of OpenMP.
This remarks also applies on the degree but the impact is less important.
\subsubsection{Parallel $\sigma$-basis~ implementation}\label{ssec:sigmabase}
According to the reduction of {\sf PM-Basis} to polynomial matrix multiplication, one can achieve a parallel
complexity of ${O {\;\!\tilde{}}\,}(\frac{n^3d}{k}+\frac{n^2d\log d}{k})$ operations in \F\, with $k$ processors for $\sigma$-basis~ calculation, assuming $k\leq n^2$.
Therefore, it suffices to directly plug in our parallel polynomial matrix multiplication into the original code of the LinBox library to
get a parallel $\sigma$-basis~ implementation.
\begin{figure}[ht]\center
\includegraphics[width=0.6\textwidth]{omp-sigmabase-1}
\caption{Scalability of parallel $\sigma$-basis~ computation with LinBox
and OpenMP on a 16 core machine (based on a Quad-Core AMD Opteron). n is the matrix dimension of the series.}\label{fig:sigmabase}
\end{figure}
We report in figure \ref{fig:sigmabase} the performance of the parallel version of {\sf PM-Basis} algorithm within LinBox.
Here again, the speedup factor of parallelism is quite low when compared to the theoretical optimality.
At most we were able to obtain a speedup of 3 with 16 processors.
However, this timings are consistent with the previous ones in figure \ref{fig:multiplication} where the best speedup was 5.
One may notice that reduction to polynomial matrix multiplication of
the {\sf PM-Basis} algorithm relies on a divide a conquer approach on the
degree of the approximation (see \cite[theorem
2.4]{Giorgi:2003:issac}). Therefore, the recursion calls are made
with smaller and smaller approximation's degrees, which leads to use
less efficient parallel multiplications. Moreover, when the degree is
too small the use of the {\sf M-Basis} algorithm of
\cite{Giorgi:2003:issac} should be prefered since it becomes more
efficient in practice. We have not yet implemented a parallel
implementation of this algorithm in LinBox and this clearly
affects the performance of our implementation.
\subsection{Parallel determinant co-degree}
Here we just launch in parallel the evaluations of the matrix
polynomial at different points, and the computation of the determinant of the
obtained matrix at the given point, and gather the results sequentially
with the \verb!Poly1CRT! class of {\sc Givaro}\xspace.
\subsection{Parallel block Wiedemann performance}
In table \ref{tab:overall} we show the overall performance of our
algorithm on an
octo-processor Xeon E5345 CPU, 8 $\times$ 2.33GHz.
\verb!*-LB! shows the timings of the current {\sc LinBox}\xspace
implementation, where \verb!*-SpMV! presents our new improvement, both
in sequential and in parallel.
The speed-up for {\sc SpMV}\xspace{} between 1 and 8 processors is slightly larger
than $5$ for all the matrices where the speed-up for {\sc LinBox}\xspace ranges
from $4$ to $4.9$.
Furthermore, the speed-up obtained with {\sc SpMV}\xspace{} versus {\sc LinBox}\xspace on the
sequence generation seems scalable as it even
improves when used in a parallel setting.
\begin{table}[htb]\center
\begin{tabular}{|l||r|r|r|r|r|r|}
\hline
Matrix & \multicolumn{2}{c|}{mat1916} & \multicolumn{2}{c|}{bibd\_81\_3} & \multicolumn{2}{c|}{EX5} \\
\hline
Cores & 1 & 8 & 1 & 8 & 1 & 8 \\
\hline
Seq-LB & 15.09 & 3.08 & 47.73 & 12.41 & 84.21 & 20.22\\
Seq-{\sc SpMV}\xspace{} & 5.02 & 0.91 & 41.28 & 7.56 & 49.66 & 7.36\\
\hline
$\sigma$-basis~ & 9.02 & 1.64 & 18.45 & 3.63 & 37.45 & 8.39\\
Interpolation& 0.37 & 0.29 & 1.07 & 0.82 & 2.29 & 1.75\\
\hline
\hline
Total-LB & 24.48 & 5.01 & 67.25 & 16.86 &123.95 & 30.36\\
Total-{\sc SpMV}\xspace{} & 14.41 & \bf 2.84 & 60.80 & \bf 12.01 & 89.40 & \bf 17.50\\
\hline
\end{tabular}
\caption{Rank modulo 65521 with OpenMP Parallel block Wiedemann on a Xeon
E5345, 8 $\times$ 2.33GHz}\label{tab:overall}
\end{table}
|
1,314,259,995,051 | arxiv | \section*{Introduction}
Let $X$ be a complex algebraic variety.
We call \emph{real form} of $X$ a real algebraic variety $W$ together with an isomorphism $X \simeq W_{\mathbb{C}}:=W \times_{\mathrm{Spec}(\mathbb{R})} \mathrm{Spec}(\mathbb{C})$ of complex algebraic varieties. (Note that for some authors, the definition of real form is slightly different from ours and applies to $W$ instead of $X$; this does not change the Key Problem we will consider, it modifies only its formulation.)
Determining the real forms of a given complex algebraic variety is a classical problem in algebraic geometry that usually splits into two parts.
\begin{problem}[real forms in the classical setting]\label{prob1} \
\begin{enumerate}
\item Does the complex algebraic variety $X$ admit a real form?
\item If yes, then how many are there (up to isomorphism)?
\end{enumerate}
\end{problem}
However, in the present article we are mainly interested in algebraic varieties endowed with a reductive group action, and so only real forms with symmetries coming from the group action will be taken into account. Let us explain what this means.
Let $G$ be a complex reductive group, let $F$ be a real algebraic group such that $G \simeq F_\mathbb{C}$ (i.e.~$F$ is a real form of $G$ in the category of algebraic groups), and let now $X$ be a complex algebraic $G$-variety; we call \emph{real $F$-form} of $X$ a real algebraic $F$-variety $W$ together with an isomorphism $X \simeq W_{\mathbb{C}}$ of complex algebraic $G$-varieties.
In this setting, Problem \ref{prob1} admits the following reformulation.
\begin{problem}[real forms in the equivariant setting]\label{prob2} \
\begin{enumerate}
\item Does the complex algebraic $G$-variety $X$ admit a real $F$-form?
\item If yes, then how many are there (up to $F$-equivariant isomorphism)?
\end{enumerate}
\end{problem}
It follows from the work of Weil \cite{Wei56} and Borel--Serre \cite{BS64} that there is a correspondence between the notions of real forms and of \emph{real structures} (i.e.~antiregular scheme involutions). More precisely, real forms of complex algebraic groups correspond to \emph{real group structures} (see Definition \ref{def:real group structure}) and
real forms of complex algebraic varieties endowed with a reductive group action correspond to \emph{effective equivariant real structures} (see Definition \ref{def: equiv real structure}). This correspondence is usually referred to as \emph{Galois descent}.
In practice, it is more convenient to work with real structures instead of real forms; indeed, real structures can be extended or restricted with respect to a dense open subset, and they are parametrized by some Galois cohomology pointed set (see Proposition \ref{prop:Galois H1 to param eq real structures}). Consequently, when studying real forms of complex algebraic varieties endowed with a reductive group action, we are spontaneously led not to consider Problem \ref{prob2}, but rather to the following equivalent one.
\begin{key-problem}[real structures in the equivariant setting] \
\begin{enumerate}
\item\label{item: existence part of Key-Prob3} Given a real group structure $\sigma$ on $G$, does there exist an effective $(G,\sigma)$-equivariant real structure on the complex algebraic $G$-variety $X$?
\item\label{item: quantity part of Key-Prob3} If yes, then how many are there (up to equivalence)?
\end{enumerate}
\end{key-problem}
The goal of this survey article is to succinctly present the current state of the art concerning the Key Problem, with a special focus on progress made over the past decade. Concretely, we consider the Key Problem for several families of algebraic $G$-varieties \footnote{In all this article, varieties are assumed to be normal as this assumption is crucial in the different combinatorial descriptions that will appear.} for which there exist well-established combinatorial descriptions. Since each of these descriptions is however quite technical, we do not to go into details, but rather try to focus on the big picture.
We will see that each time, a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem can be given in terms of Galois group actions on the combinatorial data (sometimes with an additional cohomological condition), but that in most cases Proposition \ref{prop:Galois H1 to param eq real structures} is the best answer that can be made to question \ref{item: quantity part of Key-Prob3} of the Key Problem.
\subsection*{Structure of the article}
Section \ref{sec: real group structures on reductive groups} contains preliminaries on real group structures on reductive groups and a definition of the famous $\star$-action of the Galois group $\Gal(\mathbb{C}/\mathbb{R})$ on the based root datum associated to a triple $(G,B,T)$ with $B \subseteq G$ a Borel subgroup and $T \subseteq B$ a maximal torus; this action will play a crucial role when answering question \ref{item: existence part of Key-Prob3} of the Key Problem.
Section \ref{sec: equivariant real structures} contains preliminaries on equivariant real structures on $G$-varieties, with particular emphasis on the case of almost homogeneous $G$-varieties.
In Section \ref{sec: warm-up} we then consider the Key Problem for two classical families of algebraic varieties with a reductive group action, namely the linear representations and the nilpotent orbits (and the normalizations of their closures) in semisimple Lie algebras.
Section \ref{sec: real structures on T-varieties} is devoted to varieties with a torus action; we first consider the case of toric varieties, then the case of arbitrary affine $T$-varieties.
In Section \ref{sec: real structures on spherical varieties} we pass from the toric case to the spherical case; this generalization is done in two steps: we first consider spherical homogeneous spaces, then their equivariant embeddings.
In Section \ref{sec:almost homg SL2-threefolds} we consider the Key Problem for the almost homogeneous $\SL_2$-threefolds for which, contrary to the spherical case, the $\star$-action is not sufficient to respond combinatorially to question \ref{item: existence part of Key-Prob3} of the Key Problem.
Finally, Section \ref{sec: open questions} contains some open questions and lines of research related to the Key Problem.
\subsection*{Foreword}
This is a survey article. We have tried to be quite exhaustive, but the choices we made so that this article is not too long or too technical certainly reflects the author's interests.
\subsection*{Acknowledgments}
We would like to thank Adrien Dubouloz, Pierre-Alexandre Gillard and Lucy Moser-Jauslin for their valuable comments and suggestions, which helped me to improve the quality of this article.
\subsection*{Notation}
In this article, a \emph{variety} (over a field $\mathrm{K}$) is a separated scheme of finite type (over $\mathrm{K}$) that is geometrically integral and \textbf{normal}, and an \emph{algebraic group} (over a field $\mathrm{K}$) is a finite type group scheme (over $\mathrm{K}$). By an \emph{algebraic subgroup}, we always mean a closed subgroup scheme.
A connected linear algebraic group $G$ is called \emph{reductive} if its largest smooth connected unipotent normal subgroup is trivial. We refer to \cite{Hum75} for a general reference on linear algebraic groups, and to \cite{Con14} for a more advanced reference on reductive group schemes.
We denote by $G$ a complex reductive group, and we will always assume (except in Sections \ref{sec: real group structures on reductive groups} and \ref{sec: equivariant real structures}) that $G$ is of \emph{simply-connected type}, i.e.~isomorphic to the product of a torus and a simply-connected semisimple group.
We denote by $Z(G)$ the center of $G$, by $B$ a Borel subgroup of $G$, by $T$ a maximal torus of $G$ contained in $B$, and by $U$ the unipotent radical of $B$ (which is also a maximal unipotent subgroup of $G$).
We write $\mathbb{X}=\mathbb{X}(T)=\mathrm{Hom}_{\mathrm{gr}}(T,\mathbb{G}_m)$ for the character group of $T$, $\mathbb{X}^\vee=\mathbb{X}^\vee(T)=\mathrm{Hom}_{\mathrm{gr}}(\mathbb{G}_m,T)$ for the cocharacter group of $T$, and $\mathbb{X}_\mathbb{Q}:=\mathbb{X} \otimes_\mathbb{Z} \mathbb{Q}$ and $\mathbb{X}_\mathbb{Q}^\vee:=\mathbb{X}^\vee \otimes_\mathbb{Z} \mathbb{Q}$ for the corresponding $\mathbb{Q}$-vector spaces.
If $H$ is an algebraic subgroup of $G$, then $N_G(H)$ denotes the normalizer of $H$ in $G$.
When $G$ is semisimple, we denote its Dynkin diagram by $\mathrm{Dyn}(G)$.
For $c \in G$, we write \[\mathrm{inn}_c\colon G \to G,\ g \mapsto cgc^{-1}.\] We say that $\mathrm{inn}_c$ is an \emph{inner} automorphism of $G$. An automorphism of $G$ that is not inner is called \emph{outer}.
The set of inner automorphisms of $G$ is a normal subgroup of $\Aut_{\mathrm{gr}}(G)$ whose quotient group identifies with $\Aut(\mathrm{Dyn}(G))$ when $G$ is semisimple.
The Galois group of the extension $\mathbb{C}/\mathbb{R}$ is denoted by
\[
\Gamma:=\Gal(\mathbb{C}/\mathbb{R})=\{\mathrm{Id},\gamma\} \simeq \mathbb{Z}/2\mathbb{Z}.
\]
If $A$ is a $\Gamma$-group, then the first Galois cohomology pointed set associated to $A$ is denoted by $\mathbb{H}^1(\Gamma,A)$.
If furthermore $A$ is an abelian group, then we can also consider $\mathbb{H}^2(\Gamma,A)$, and both $\mathbb{H}^1(\Gamma, A)$ and $\mathbb{H}^2(\Gamma,A)$ are abelian groups.
We refer to \cite{Ser02} for a general reference on Galois cohomology, and to \cite{Man20} for a general reference on real algebraic geometry.
\section{Real group structures on reductive groups}\label{sec: real group structures on reductive groups}
In this section, we introduce the notion of \emph{real group structures} on complex algebraic groups. In particular, since we are above all interested in the case of reductive groups, we explain how to obtain all real group structures on complex reductive groups by piecing together real group structures on complex tori and on complex simply-connected simple groups. Then we recall how the choice of a real group structure on a reductive group induces a $\Gamma$-action (the so-called \emph{$\star$-action}) on its based root datum; this $\star$-action will play an important role when looking at the Key Problem in the next sections for linear representations, nilpotent orbits in semisimple Lie algebras, $T$-varieties, and spherical varieties.
\begin{definition}Let $G$ be a complex algebraic group. \label{def:real group structure}
\begin{enumerate}
\item A \emph{real form} of $G$ is a pair $(F,\Theta)$ with $F$ a real algebraic group and $\Theta\colon\ G \to F_\mathbb{C}$ an isomorphism of complex algebraic groups. (Most of the time we will drop the isomorphism $\Theta$ and simply write that $F$ is a form of $G$, but it is important to remember that a real form of $G$ is a pair, and not just a real algebraic group.)
\item A \emph{real group structure} $\sigma$ on $G$ is a scheme involution on $G$ such that the diagram
\[
\xymatrix@R=4mm@C=2cm{
G \ar[rr]^{\sigma} \ar[d] && G \ar[d] \\
\mathrm{Spec}(\mathbb{C}) \ar[rr]^{\mathrm{Spec}(z \mapsto \overline{z})} && \mathrm{Spec}(\mathbb{C})
}
\]
commutes, and such that the induced morphism $G \to \gamma^*G $ is an isomorphism of complex algebraic groups, where $\gamma^*G \to \mathrm{Spec}(\mathbb{C})$ is the base change of $G \to \mathrm{Spec}(\mathbb{C})$ along the morphism $\mathrm{Spec}(z \mapsto \overline{z})$. (This last condition means that $\iota_G \circ \sigma=\sigma \circ \iota_G$ and $m_G \circ (\sigma \times \sigma)=\sigma \circ m_G$, where $\iota_G\colon G \to G$ is the inverse morphism and $m_G\colon G \times G \to G$ is the multiplication morphism.)
\item Two real group structures $\sigma$ and $\sigma'$ on $G$ are \emph{equivalent} if there exists a group automorphism $\psi \in \Aut_{\mathrm{gr}}(G)$ such that $\sigma'=\psi \circ \sigma \circ \psi^{-1}$.
\item If $\sigma_1$ and $\sigma_2$ are two real group structures on $G$, we may write $\sigma_2=\varphi \circ \sigma_1$ with $\varphi \in \Aut_{\mathrm{gr}}(G)$, and we say that $\sigma_2$ is an \emph{inner} (resp.~an \emph{outer}) \emph{twist} of $\sigma_1$ if $\varphi$ is an inner (resp.~an outer) automorphism of $G$. Let us note that the relation ($\sigma_2$ is an inner twist of $\sigma_1$) is an equivalence relation on the set of real group structures on $G$.
\item If $G$ is a complex algebraic group with a real group structure $\sigma$, then the set of fixed points $G(\mathbb{C})^\sigma$ is called the \emph{real locus} (or \emph{real part}) of $(G,\sigma)$.
\end{enumerate}
\end{definition}
There is a correspondence between real group structures on $G$ and real forms of $G$ given as follows (see also \cite[Section 1.4]{FSS98}).
\begin{itemize}
\item
If $F$ is a real form of $G$, then the composition $G \simeq F_\mathbb{C} \xrightarrow{\mathrm{Id} \times \mathrm{Spec}(z \mapsto \overline{z})} F_\mathbb{C} \simeq G$ is a real group structure on $G$.
\item Conversely, if $\sigma$ is a real group structure on $G$, then the categorical quotient $F:=G/\langle \sigma \rangle$ gives a real form of $G$; an isomorphism $G \simeq F_\mathbb{C} $ being induced by $(q,f)$, where $q\colon\ G\to F$ is the quotient morphism and $f\colon\ G \to \mathrm{Spec}(\mathbb{C})$ is the structure morphism.
\item Moreover, two real forms of $G$ are isomorphic (as real algebraic groups) if and only if the corresponding real group structures are equivalent.
\end{itemize}
\begin{remark}
The real locus of $(G,\sigma)$ is a real Lie group that identifies with the group of $\mathbb{R}$-points of the real algebraic group $G/\langle \sigma \rangle$ (see \cite[Proposition 3.14]{Ben16} for details), whence the name.
\end{remark}
\smallskip
We now assume that $G$ is a complex reductive group. There exists a central isogeny $\widetilde{G} \to G$, where $\widetilde{G}$ is the direct product of a torus $T$ and of a simply-connected semisimple group $G'$, such that every real group structure $\sigma$ on $G$ lifts uniquely to a real group structure $\widetilde{\sigma}$ on $\widetilde{G}$ (see \cite[Section 1.1]{MJT18} for details), and $\widetilde{\sigma}$ stabilizes the two factors $T$ and $G'$. Hence, determining real group structures on complex reductive groups reduces to determining real group structures on complex tori and on complex simply-connected semisimple groups.
Both classifications are well-known: for complex tori it is recalled in Lemma \ref{lem: real form on tori} below, and for simply-connected semisimple groups, it reduces to the case of simply-connected simple groups (see Lemma \ref{lem:easy_lemma_reduction}), in which case real group structures are classified in terms of Dynkin diagrams enriched by blackening a subset of the nodes and connecting some pairs of vertices by arrows (the \emph{Satake diagrams} or the \emph{Vogan diagrams}); see e.g.~\cite[Section 1.7.2]{GW09} or \cite[Section V\!I.10]{Kna02} for a summary of the classification of real group structures on simple groups.
\begin{lemma} \label{lem: real form on tori} \emph{(see \cite[Lemma 1.5]{MJT18} and \cite[Theorem 2]{Cas08})}\\
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}^n$ be an $n$-dimensional complex torus.
\begin{enumerate}
\item If $n=1$, then $T$ has two inequivalent real group structures, defined by $\sigma_0\colon t \mapsto \overline{t}$, corresponding to $\mathbb{G}_{m,\mathbb{R}}$, and by $\sigma_1\colon t \mapsto \overline{t^{-1}}$, corresponding to the circle $\mathbb{S}^1$.
\item If $n=2$, then $\sigma_2\colon (t_1,t_2) \mapsto (\overline{t_2},\overline{t_1})$ defines a real group structure on $T$ corresponding to the Weil restriction $R_{\mathbb{C}/\mathbb{R}}(\mathbb{G}_{m,\mathbb{C}})$.
\item \label{item: n at least 2} If $n\ge 1$, then every real group structure on $T$ is equivalent to exactly one real group structure of the form $\sigma_0^{\times n_0}\times\sigma_1^{\times n_1}\times\sigma_2^{\times n_2}$, where $n=n_0+n_1+2n_2$. In other words, every real torus is isomorphic to $\mathbb{G}_{m,\mathbb{R}}^{n_0} \times (\mathbb{S}^1)^{n_1} \times R_{\mathbb{C}/\mathbb{R}}(\mathbb{G}_{m,\mathbb{C}})^{n_2}$.
\end{enumerate}
\end{lemma}
\begin{lemma} \emph{(see \cite[Lemma 1.7]{MJT18})}\label{lem:easy_lemma_reduction}
Let $\sigma$ be a real group structure on a complex simply-connected semisimple group $G' \simeq \prod_{i \in I} G_i$, where the $G_i$ are the simple factors of $G'$. Then, for a given $i \in I$, we have the following possibilities:
\begin{enumerate}[leftmargin=4mm]
\item $\sigma(G_i)=G_i$ and $\sigma_{|G_i}$ is a real group structure on $G_i$; or
\item there exists $j \neq i$ such that $\sigma(G_i)=G_j$, then $G_i \simeq G_j$ and $\sigma_{| G_i \times G_j}$ is equivalent to $(g_1,g_2) \mapsto (\sigma_0(g_2),\sigma_0(g_1))$ for an arbitrary real group structure $\sigma_0$ on $G_i \simeq G_j$.
\end{enumerate}
\end{lemma}
\begin{example}\label{ex:real group structures on SL2}
There are two inequivalent real group structures on $\SL_2$ given by $\sigma_s(g)=\overline{g}$, whose real locus is $\SL_2(\mathbb{R})$, and its inner twist $\sigma_c(g)={}^t{\overline{g}^{-1}}$, whose real locus is $\SU_2(\mathbb{C})$.
(Here $\overline{g}$ denotes the complex conjugate.)
There are four inequivalent real group structures on $\SL_2 \times \SL_2$ given by $\sigma_i \times \sigma_j$ with $(i,j) \in \{(s,s),(s,c),(c,c)\}$, and $\sigma\colon (g_1,g_2) \mapsto (\sigma_s(g_2),\sigma_s(g_1))$, whose real locus is $\SL_2(\mathbb{C})$.
Similarly, we let the reader check that there are six inequivalent real group structures on $\SL_2 \times \SL_2 \times \SL_2$ and nine inequivalent real group structures on $\SL_2 \times \SL_2 \times \SL_2 \times \SL_2$.
\end{example}
\smallskip
Let $B \subseteq G$ be a Borel subgroup, and let $T \subseteq B$ be a maximal torus. We say that $(T,B)$ is a \emph{Borel pair} in $G$.
Let $\sigma$ be a real group structure on $G$.
Then $(T',B'):=(\sigma(T),\sigma(B))$ is another Borel pair in $G$, and so there exists $c_{\sigma} \in G$ (unique up to left multiplication by an element of $T$) such that
\[
c_{\sigma} T' c_{\sigma}^{-1}=T\ \ \ \ \text{and}\ \ \ \ c_{\sigma} B' c_{\sigma}^{-1}=B.
\]
Let $\theta:=\mathrm{inn}_{c_{\sigma}} \circ \sigma$ that is an antiregular automorphism of $G$.
Since $\theta(T)=T$ and $\theta(B)=B$, the automorphism $\theta$ induces lattice automorphisms of $\mathbb{X}=\mathbb{X}(T)$ and $\mathbb{X}^\vee=\mathbb{X}^\vee(T)$ defined as follows:
\[
\forall \chi \in \mathbb{X},\ \chi \mapsto\,^\gamma\hskip-1pt \chi :=\sigma_0 \circ \chi \circ \theta^{-1}\ \ \ \ \text{ and } \ \ \ \ \forall \lambda \in \mathbb{X}^\vee,\ \lambda \mapsto \,^\gamma\hskip-1pt \lambda:=\theta \circ \lambda \circ \sigma_0,\]
where $\sigma_0(t)=\overline{t}$ is the complex conjugation. One can check that these two lattice automorphisms are of order $\leq 2$, that they do not depend on the choice of $c_{\sigma}$, and that they stabilize the sets of roots $\mathcal{R} \subseteq \mathbb{X}$, simple roots $\SS \subseteq \mathcal{R}$, coroots $\mathcal{R}^\vee \subseteq \mathbb{X}^\vee$, and simple coroots $\SS^\vee \subseteq \mathcal{R}^\vee$ associated with the triple $(G,B,T)$; see e.g.~\cite[Remark 7.1.2]{Con14} or \cite[Section A.2]{MJT18} and references therein for details.
\begin{definition}\label{def: star-action}
With the previous notation, the $\Gamma$-action on the based root datum $(\mathbb{X},\mathbb{X}^\vee,\mathcal{R},\mathcal{R}^\vee,\SS,\SS^\vee)$ associated with $(G,B,T)$ is called the \emph{$\star$-action} induced by $\sigma$.
\end{definition}
\begin{example}\label{ex: star-action for tori}
If $G=B=T$ and $\sigma$ is a real group structure on $T$, then the corresponding $\star$-action is given by
\[
\forall \chi \in \mathbb{X},\ \chi \mapsto\,^\gamma\hskip-1pt \chi =\sigma_0 \circ \chi \circ\sigma \ \ \ \ \text{ and } \ \ \ \ \forall \lambda \in \mathbb{X}^\vee,\ \lambda \mapsto \,^\gamma\hskip-1pt \lambda=\sigma \circ \lambda \circ \sigma_0.\]
\end{example}
\begin{remark}\label{rk: star action trivial in the split case}
If $\sigma$ is an inner twist of a \emph{split} real group structure $\sigma'$ on $G$ (i.e.~$G$ contains a maximal torus $T_0$ preserved by $\sigma'$ and such that $\sigma'_{|T_0}=\sigma_0^{\times \dim(T_0)}$, with the notation of Lemma \ref{lem: real form on tori}), then the $\star$-action induced by $\sigma$ is trivial.
\end{remark}
\section{Equivariant real structures on \texorpdfstring{$G$}{G}-varieties}\label{sec: equivariant real structures}
In this section, we first introduce the notion of \emph{equivariant real structures} on algebraic varieties endowed with an algebraic group action, then we give a general criterion for the existence of an equivariant real structure in the homogeneous case, and finally we recall how Galois cohomology can be used to parametrize equivalence classes of equivariant real structures.
\smallskip
Let $G$ be a complex linear algebraic group, let $F$ be a real form of $G$, and let $\sigma$ be the corresponding real group structure on $G$.
\begin{definition}\label{def: equiv real structure}
Let $X$ be a complex algebraic $G$-variety.
\begin{itemize}
\item A \emph{real $F$-form} of $X$ is a pair $(W,\Xi)$ with $W$ a real algebraic $F$-variety and $\Xi\colon X \to W_\mathbb{C}$ an isomorphism of complex algebraic $G$-varieties. (As for real forms of complex algebraic groups, we will most of the time drop the isomorphism $\Xi$ and simply write that $W$ is a form of $X$.)
\item A \emph{$(G,\sigma)$-equivariant real structure} on $X$ is an antiregular involution $\mu$ on $X$, that is, a scheme involution on $X$ such that the following diagram commutes
\[
\xymatrix@R=4mm@C=2cm{
X \ar[rr]^{\mu} \ar[d] && X \ar[d] \\
\mathrm{Spec}(\mathbb{C}) \ar[rr]^{\mathrm{Spec}(z \mapsto \overline{z})} && \mathrm{Spec}(\mathbb{C})
}
\]
and satisfying the condition
\begin{equation}\label{eq:def eq real structure}
\forall g \in G, \ \forall x \in X,\ \ \mu(g \cdot x)=\sigma(g) \cdot \mu(x).
\end{equation}
\item A $(G,\sigma)$-equivariant real structure $\mu$ on $X$ is \emph{effective} if $X$ is covered by $\Gamma$-stable affine open subsets ($\Gamma$ acting on $X$ through $\mu$), i.e.~if the categorical quotient $X/\langle \mu \rangle$, which always exists as a real algebraic space, is in fact a real algebraic variety (see \cite[Proposition~V.1.8]{SGA1}).
\item Two $(G,\sigma)$-equivariant real structures $\mu$ and $\mu'$ on $X$ are \emph{equivalent} if there exists a $G$-equivariant automorphism $\varphi \in \Aut^{G}(X)$ such that $\mu'=\varphi \circ \mu\circ \varphi^{-1}$. They are \emph{strongly equivalent} if we can choose $\varphi=\mathrm{inn}_c$ for some $c \in G$.
\item The \emph{real locus} (or \emph{real part}) of a $(G,\sigma)$-equivariant real structure $\mu$ on $X$ is the (possibly empty) set of fixed points $X(\mathbb{C})^\mu$; it identifies with the set of $\mathbb{R}$-points of the corresponding real form $X/\left \langle \mu \right \rangle$ and is furthermore endowed with an action of the real Lie group $G(\mathbb{C})^\sigma$.
\end{itemize}
\end{definition}
As for algebraic groups (see Section \ref{sec: real group structures on reductive groups}), there is a bijection between isomorphism classes of real $F$-forms of $X$ (as real algebraic $F$-varieties) and equivalence classes of effective $(G,\rho)$-equivariant real structures on $X$ (see \cite[Section 5]{Bor20}).
\begin{remark}
If $X$ is quasiprojective or covered by $\Gamma$-stable quasiprojective open subsets, then $X$ is covered by $\Gamma$-stable affine open subsets.
In particular, since homogeneous spaces under the action of a linear algebraic group are quasiprojective (this follows from a theorem of Chevalley; see \cite[Section 11.2, Theorem]{Hum75}), equivariant real structures on homogeneous spaces under $G$ are always effective.
\end{remark}
The following lemma gives a general criterion for the existence of an equivariant real structure in the case where the algebraic $G$-variety $X$ is a homogeneous space.
(This criterion is however not so useful in real life as condition \ref{eq: involution} of Lemma \ref{lem: two conditions} is generally difficult to check.)
\begin{lemma} \emph{(\cite[Lemma 2.4]{MJT18})}\label{lem: two conditions}
Let $X=G/H$ be a homogeneous space.
Then $X$ has $(G,\sigma)$-equivariant real structure if and only if there exists $t \in G$ such that
\begin{enumerate}
\item \label{eq: sigma compatible}
$\sigma(H)=tHt^{-1}$ \ $((G,\sigma)$\emph{-compatibility condition}); and
\item \label{eq: involution}
$\sigma(t)t \in H$\ \ (\emph{involution condition});
\end{enumerate}
in which case a $(G,\sigma)$-equivariant real structure on $X$ is given by
\[\forall k\in G,\ \mu(kH):=\sigma(k)tH.\]
\end{lemma}
\begin{example} \label{ex:G/P 1}
Let $G$ be a complex reductive group with a real group structure $\sigma$, and let $X=G/P$ be a flag variety. Then Lemma \ref{lem: two conditions} implies that $X$ has a $(G,\sigma)$-equivariant real structure if and only if $\sigma(P)$ is conjugate to $P$. This last condition can be interpreted combinatorially. Let $B \subseteq G$ be a Borel subgroup and let $T \subseteq B$ be a maximal torus, and consider the $\star$-action induced by $\sigma$ on the based root datum $(\mathbb{X},\mathbb{X}^\vee,\mathcal{R},\mathcal{R}^\vee,\SS,\SS^\vee)$ associated with the triple $(G,B,T)$.
Conjugacy classes of parabolic subgroup of $G$ are in bijection with the powerset of $\SS$, and one can check that $\sigma(P)$ is conjugate to $P$ if and only if the subset of simple roots corresponding to the conjugacy class of $P$ is $\Gamma$-stable.
Moreover, if a $(G,\sigma)$-equivariant real structure exists on $X$, then it is unique up to equivalence (this will be a consequence of Proposition \ref{prop:Galois H1 to param eq real structures} since $\Aut^G(X)$ is trivial when $X$ is a flag variety).
\end{example}
Let us mention that if $\sigma$ and $\sigma'$ are two real group structures on $G$ conjugate by an inner automorphism, then there is a bijection between the equivalence classes of $(G,\sigma)$-equivariant real structures and of $(G,\sigma')$-equivariant real structure on $X=G/H$ (see \cite[Proposition A]{MJT20}). On the other hand, if $\sigma$ and $\sigma'$ are conjugate by an outer automorphism, then these two sets may have different cardinality.
\begin{example}\label{ex:counter-ex not strongly eq}
Let $G=\mathbb{G}_{m,\mathbb{C}}^{2}$, let $H=\{1\} \times \mathbb{G}_{m,\mathbb{C}}$, and let
\[
\begin{array}{rcll}
\sigma\ \colon & G \to G,&\ (u,v) \mapsto (\overline{u},\overline{v^{-1}});&\\
\varphi\ \colon & G \to G,&\ (u,v) \mapsto (uv,v);&\ \text{and}\\
\sigma'=\varphi \circ \sigma \circ \varphi^{-1}\ \colon & G \to G, &\ (u,v) \mapsto (\overline{u} \overline{v^{-2}},\overline{v^{-1}}).&
\vspace{-1mm}
\end{array}
\]
Then $\sigma(H)=H$ but $\sigma'(H)=\{(t^2,t)\ | \ t \in \mathbb{G}_{m,\mathbb{C}}\} \neq H$, and so Lemma \ref{lem: two conditions} implies that $X=G/H$ admits a $(G,\sigma)$-equivariant real structure but no $(G,\sigma')$-equivariant real structure.
\end{example}
If we now suppose that $X$ is an \emph{almost homogeneous} $G$-variety, i.e.~ an algebraic $G$-variety with a dense open orbit $X_0=G/H$, then condition \eqref{eq:def eq real structure} implies that an equivariant real structure $\mu$ on $X$ must stabilize $X_0$, and so $\mu_{|X_0}$ is an equivariant real structure on $X_0$.
But an equivariant real structure on $X_0$ need not extend to $X$ (however, if such an extension exists, then it is unique).
That is why, when studying equivariant real structures on almost homogeneous varieties, we are naturally led to study first equivariant real structures on homogeneous spaces.
Let us note that the restriction map $\mu \mapsto \mu_{|X_0}$ induces a map
\begin{align*}
\Upsilon \colon &\{\text{equivalence classes of $(G,\sigma)$-equivariant real structure on $X$}\} \\
&\to \{\text{equivalence classes of $(G,\sigma)$-equivariant real structure on $X_0$}\}
\end{align*}
that is generally neither injective nor surjective (see Example \ref{ex:first bis example}).
The map $\Upsilon$ is however injective when the natural injective homomorphism $\Aut^{G}(X) \hookrightarrow \Aut^{G}(X_0)$ is an isomorphism (which is for instance the case for toric varieties).
Let us mention that a general strategy to determine the equivariant real structures on almost homogeneous varieties, relying on Luna-Vust theory, is detailed in \cite[Section 2.5]{MJT20}; it is this strategy that will be applied to determine the equivariant real structures on toric varieties (in Section \ref{subsec: real structures on toric varieties}), on spherical embeddings (in Section \ref{subsec: extension of real structures to sph embeddings}), and on almost homogeneous $\SL_2$-threefolds (in Section \ref{sec:almost homg SL2-threefolds}).
\smallskip
Once we know the existence of a $(G,\sigma)$-equivariant real structure $\mu$ on an algebraic $G$-variety $X$, Galois cohomology can be used to parametrize the equivalence classes of all of them.
First, observe that the group of $G$-equivariant automorphisms $\Aut^{G}(X)$ is endowed with a $\Gamma$-group structure as follows:
\[
\Gamma \times \Aut^{G}(X) \to \Aut^{G}(X) ,\ (\gamma,\varphi) \mapsto
\mu \circ \varphi \circ \mu.
\]
The next result is then a straightforward consequence of the definition of cohomologous $1$-cocycles with values in the $\Gamma$-group $\Aut^{G}(X)$.
\begin{proposition} \label{prop:Galois H1 to param eq real structures} \emph{(see \cite[Corollary 8.2]{Wed18} or \cite[Lemma 2.11]{MJT18})}\\
Let $X$ be a complex algebraic $G$-variety with a $(G,\sigma)$-equivariant real structure $\mu_0$. The Galois group $\Gamma$ acts on $\Aut^G(X)$ by $\mu_0$-conjugacy as above. Then the map
\[\begin{array}{ccl}
\hspace{-4mm} \mathbb{H}^1(\Gamma,\Aut^G(X)) &\to &\{ \text{equivalence classes of $(G,\sigma)$-equivariant real structures on $X$}\}\\
\varphi &\mapsto &\ \ \ \varphi \circ \mu_0
\end{array}\]
is a bijection that sends the trivial cohomology class to the equivalence class of $\mu_0$.
\end{proposition}
\begin{corollary}\emph{(see \cite[Corollary 1.8]{MJT20})} \label{cor: finiteness for homog spaces}
Let $X=G/H$ be a homogeneous space.
Then $X$ admits a finite number of equivalence classes of $(G,\sigma)$-equivariant real structures.
\end{corollary}
\section{Real structures on linear representations and nilpotent orbits}\label{sec: warm-up}
In this section, we consider the Key Problem for two classical families of varieties with a reductive group action and related to representation theory, namely the linear representations (in Section \ref{subsec:real structures on linear reps}) and the nilpotent orbits in semisimple Lie algebras, as well as the normalizations of their closures (in Section \ref{subsec:real structures on nilpotent orbits}).
\subsection{Real structures on linear representations}\label{subsec:real structures on linear reps}
In this subsection, based on the appendix of \cite{MJT18} written by Borovoi (and based itself on the work of Tits in \cite{Tits71}), we consider the Key Problem for finite-dimensional linear representations of complex simple groups.
To simplify the situation, we only consider the case of irreducible representations.
\smallskip
Let $G$ be a complex (simply-connected) simple group, let $B \subseteq G$ be a Borel subgroup, let $U$ be unipotent radical of $B$, and let $T \subseteq B$ be a maximal torus. Recall that the restriction map $\mathbb{X}(B) \to \mathbb{X}(T),\ \chi \to \chi_{|T}$ induces an isomorphism of character groups, and so we can identify $\mathbb{X}=\mathbb{X}(T)$ with $\mathbb{X}(B)$.
A \emph{dominant weight} of $G$ with respect to $(B,T)$ is a character $\lambda \in \mathbb{X}$ such that $\langle \lambda,\gamma \rangle \geq 0$ for every $\gamma \in \SS^\vee$. The subset $\mathbb{D} \subseteq \mathbb{X}$ of dominant weights satisfies $\mathbb{D}=\mathbb{X} \cap C$ in $\mathbb{X}_\mathbb{Q}$, where $C \subseteq \mathbb{X}_\mathbb{Q}$ is a polyhedral cone known as the \emph{fundamental Weyl chamber}.
\begin{example}
\begin{multicols}{2}
Let $G=\SL_3$. Then $\mathbb{X}$ is a rank two lattice generated by $\SS=\{\alpha_1,\alpha_2\}$, and the fundamental Weyl chamber is the grey cone. The dominant weights of $G$ are then all the linear combinations of $\alpha_1$ and $\alpha_2$ contained in the grey cone.
\begin{center}
\includegraphics[width=4.5cm]{Weyl_chambers_for_A2.png}
\end{center}
\end{multicols}
\end{example}
Let $V$ be an irreducible representation of $G$. Then $V^U$ is a line on which $B$ acts through some character $\lambda \in \mathbb{X}$ called \emph{highest weight} of the representation $V$. The map that associates to an irreducible representation its highest weight induces a one-to-one correspondence between the irreducible representations (up to isomorphism) and the dominant weights. For a given $\lambda \in \mathbb{D}$, we denote by $V_\lambda$ the corresponding irreducible representation.
\smallskip
Let $\sigma$ be a real group structure on $G$.
Let $Z=Z(G)$ be the center of $G$; it is stable by $\sigma$, and so we can view $Z$ as an abelian $\Gamma$-group and consider the abelian group $\mathbb{H}^2(\Gamma,Z)$. We denote by $\delta([\sigma]) \in \mathbb{H}^2(\Gamma,Z)$ the \emph{Tits class} of $\sigma$; see \cite[Section 4.2]{Tits71} or \cite[Section 1.3]{MJT18} for the definition of the Tits class.
\begin{remark}
Tits classes for all complex (simply-connected) simple groups have been computed by Borovoi using Theorem \ref{th: real structures on linear reps} below; complete tables can be found in \cite[Appendix]{MJT18}.
\end{remark}
The $\star$-action induced by $\sigma$ stabilizes the subset $\mathbb{D}$ in $\mathbb{X}$ (see \cite[Lemma A.1]{MJT18}).
Let $\lambda \in \mathbb{D}$ such that $\gamma \cdot \lambda=\lambda$. This implies that the restriction $\lambda_{|Z}\colon Z \to \mathbb{G}_{m,\mathbb{C}}$ satisfies $\lambda_{|Z} \circ \sigma_{|Z}=\sigma_0 \circ \lambda_{|Z}$, with $\sigma_0(t)=\overline{t}$; in other words, $\lambda_{|Z}\colon Z \to \mathbb{G}_{m,\mathbb{C}}$ is a morphism of abelian $\Gamma$-groups, and so it induces a homomorphism between second cohomology groups
\[
\lambda_*\colon \mathbb{H}^2(\Gamma,Z) \to \mathbb{H}^2(\Gamma,\mathbb{G}_{m,\mathbb{C}})=\{\pm 1\}.
\]
If we restrict ourselves to considering only real forms in the category of linear representations, then the next result gives a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem for irreducible linear representations.
\begin{theorem} \emph{(\cite[Theorem A.2]{MJT18}, see also \cite[Theorem 7.2]{Tits71})}\\ \label{th: real structures on linear reps}\noindent We keep the previous notation, and we denote by $F=G/\langle \sigma \rangle$ the real form of $G$ corresponding to $\sigma$.
Let $V_\lambda$ be an irreducible representation of $G$.
Then $V_\lambda$ admits a real $F$-form (in the category of linear representations of $F$) if and only if
\begin{enumerate}
\item \label{item i: th linear reps} the dominant weight $\lambda$ is fixed by the $\star$-action induced by $\sigma$; and
\item \label{item ii: th linear reps} the cohomology class $\lambda_*(\delta([\sigma])) \in \mathbb{H}^2(\Gamma,\mathbb{G}_{m,\mathbb{C}})$ is trivial.
\end{enumerate}
\end{theorem}
\begin{remark}
If only condition \ref{item i: th linear reps} of Theorem \ref{th: real structures on linear reps} is satisfied, then $V_\lambda \oplus V_\lambda$ admits a real $F$-form (in the category of linear representations of $F$) but not $V_\lambda$.
\end{remark}
If a linear representation of $G$ admits a real $F$-form (in the category of linear representations of $F$), then it follows from Schur's lemma that this real $F$-form is unique (in the category of linear representations of $F$), up to isomorphism.
Complete tables with the list of all the irreducible representations of $G$ admitting a real $F$-form, in the category of linear representations of $F$, can be found in \cite{Tits67}. However we do not know if there exists a linear representation of $G$ admitting a real $F$-form that is not a linear representation of $F$.
\subsection{Real structures on nilpotent orbits in semisimple Lie algebras}\label{subsec:real structures on nilpotent orbits}
In this subsection, based on the work of Moser-Jauslin--Terpereau \cite{MJT21}, we consider the Key Problem for nilpotent orbits in complex semisimple Lie algebras and for the normalizations of their closures; those are examples of varieties with symplectic singularities (whose symplectic desingularizations are quite well understood), and they furthermore have a series of applications in the representation theory of algebraic groups, Lie algebras and related objects (such as Weyl groups).
We refer to \cite{CM93} for a general reference on nilpotent orbits.
\smallskip
Let $G$ be a complex semisimple group, let $\sigma$ be a real group structure on $G$, and let $\mathfrak{g}$ be the Lie algebra of $G$.
Denote by
\[d\sigma_e\colon \mathfrak{g} \to \mathfrak{g}\]
the differential of $\sigma\colon G \to G$ at the identity element. One can check that $d\sigma_e$ is a $(G,\sigma)$-equivariant real structure on $\mathfrak{g}$, viewed as a $G$-variety for the adjoint action, and that $d\sigma_e$ stabilizes the (finite) set of nilpotent orbits in $\mathfrak{g}$.
In particular, if $\O$ is a nilpotent orbit in $\mathfrak{g}$ such that $d\sigma_e(\O)=\O$, then $(d\sigma_e)_{|\O}$ is a $(G,\sigma)$-equivariant real structure on $\O$.
However, there are also equivariant real structures on nilpotent orbits that are not obtained by differentiating a real group structure on $G$, nor even by restricting an equivariant real structure from the Lie algebra $\mathfrak{g}$; this assertion is illustrated by the following example.
\begin{example}(\cite[Example~1.1]{MJT21})\label{ex:non-gloabl}
\begin{itemize}
\item We keep the previous notation, and assume that $d\sigma_e(\O)=\O$. Let $\theta \in \mathbb{R}$.
Then
\[
\mu_\theta \colon \O \to \O,\ v \mapsto e^{i\theta} d\sigma_e(v)\]
is a $(G,\sigma)$-equivariant real structure on $\O$ which is not obtained by differentiating a real group structure on $G$ when $\theta \notin 2\pi \mathbb{Z}$ (because, in this case, $\mu_\theta$ does not preserve the Lie bracket).
\item\label{item:non-global} Let $G=\SL_3$ with $\sigma(g)=\overline{g}$ for all $g \in G$ (here $\overline{g}$ denotes the complex conjugate of $g$), and let $\O_{reg}$ be the \emph{regular} nilpotent orbit in $\sl_3$ (i.e.~the unique nilpotent orbit whose closure contains all the other nilpotent orbits).
Then it follows from Lemma \ref{lem: two conditions} that the map
\[
\mu\colon \O_{reg} \to \O_{reg},\ g \cdot \begin{bmatrix}
0 & 1 & 0 \\ 0 & 0 & 1\\ 0 &0&0
\end{bmatrix} \mapsto\sigma(g) \cdot \begin{bmatrix}
0 & 1 & i \\ 0 & 0 & 1\\ 0 &0&0
\end{bmatrix}
\]
is a $(G,\sigma)$-equivariant real structure on $\O_{reg}$. Moreover, $\mu$ does not extend to a $(G,\sigma)$-equivariant real structure on $\sl_3$ (see \cite[Section 3.4]{MJT21}).
\end{itemize}
\end{example}
We recall that to every nilpotent orbit $\O$ of $\mathfrak{g}$ is associated a unique \emph{weighted Dynkin diagram} $\Delta(\O)$, i.e.~a Dynkin diagram of type $\mathfrak{g}$ with a label in $\{0,1,2\}$ for each node; see \cite[Section 3.5]{CM93} for details. Let us note however that not every weighted Dynkin corresponds to a nilpotent orbit of $\mathfrak{g}$.
\begin{example}
Let $\mathfrak{g}=\sl_3$. Then the nilpotent cone of $\mathfrak{g}$ is the union of three nilpotent orbits, corresponding to the partitions $[3]$, $[2,1]$, $[1^3]$, whose corresponding weighted Dynkin diagrams are
\begin{center}
\begin{tikzpicture}[scale=.4]
\draw (-1,0) node[anchor=east]{} ;
\draw[xshift=0 cm,thick] (0 cm,0) circle (.2cm) node[anchor=south]{2};
\draw[xshift=1 cm,thick] (1 cm,0) circle (.2cm) node[anchor=south]{2};
\draw[xshift=2 cm,thick] (2 cm,0) circle (.2cm)node[anchor=south]{2};
\draw[thick] (0.2 cm,0) -- +(1.6 cm,0);
\draw[thick] (2.2 cm,0) -- +(1.6 cm,0);
\end{tikzpicture}
\begin{tikzpicture}[scale=.4]
\draw (-1,0) node[anchor=east]{} ;
\draw[xshift=0 cm,thick] (0 cm,0) circle (.2cm) node[anchor=south]{1};
\draw[xshift=1 cm,thick] (1 cm,0) circle (.2cm) node[anchor=south]{0};
\draw[xshift=2 cm,thick] (2 cm,0) circle (.2cm)node[anchor=south]{1};
\draw[thick] (0.2 cm,0) -- +(1.6 cm,0);
\draw[thick] (2.2 cm,0) -- +(1.6 cm,0);
\end{tikzpicture}
\begin{tikzpicture}[scale=.4]
\draw (-1,0) node[anchor=east]{} ;
\draw[xshift=0 cm,thick] (0 cm,0) circle (.2cm) node[anchor=south]{0};
\draw[xshift=1 cm,thick] (1 cm,0) circle (.2cm) node[anchor=south]{0};
\draw[xshift=2 cm,thick] (2 cm,0) circle (.2cm)node[anchor=south]{0};
\draw[thick] (0.2 cm,0) -- +(1.6 cm,0);
\draw[thick] (2.2 cm,0) -- +(1.6 cm,0);
\end{tikzpicture}
\end{center}
\end{example}
Also, denoting by $\gamma \cdot \Delta(\O)$ the weighted Dynkin diagram obtained from $\Delta(\O)$ by applying the $\star$-action induced by $\sigma$ (see Definition \ref{def: star-action}), we have that
\[\gamma \cdot \Delta(\O)=\Delta(d\sigma_e(\O)).\]
The following theorem provides a complete answer to the Key Problem for nilpotent orbits and for the normalizations of their closures in $\mathfrak{g}$.
\begin{theorem}\emph{(\cite[Main Theorem]{MJT21})}\label{th:main th nilpotent}
We keep the previous notation.
\begin{enumerate}
\item\label{item:existence} The nilpotent orbit $\O$ in $\mathfrak{g}$ admits a $(G,\sigma)$-equivariant real structure if and only if $\gamma \cdot \Delta(\O)=\Delta(\O)$, in which case $(d\sigma_e)_{|\O}$ is a $(G,\sigma)$-equivariant real structure on $\O$. Moreover, all $(G,\sigma)$-equivariant real structures on $\O$ are equivalent.
\item\label{item:extension}
Every $(G,\sigma)$-equivariant real structure on $\O$ extends uniquely to the normalization $\widetilde{\O}$ of $\overline{\O}$.
Moreover, all $(G,\sigma)$-equivariant real structures on $\widetilde{\O}$ are equivalent.
\end{enumerate}
\end{theorem}
\begin{remark}\
\begin{itemize}
\item The nilpotent orbits $\O$ such that $\gamma \cdot \Delta(\O)=\Delta(\O)$ are determined in \cite[Section 3.1]{MJT21}.
It turns out that, except for a few cases in type $D_n$, every nilpotent orbit $\O$ in $\mathfrak{g}$ is fixed by the $\star$-action when $\mathfrak{g}$ is simple.
\item When the nilpotent orbit closure $\overline{\O}$ is non-normal, we do not know whether every $(G,\sigma)$-equivariant real structure on $\O$ extends to $\overline{\O}$. A short summary on what is known about the (non-)normality of nilpotent orbit closures in semisimple Lie algebras can be found in \cite[Section 3.3]{MJT21}.
\item Assume that $G$ is simple, and let $F=G/\langle \sigma \rangle$ be the real form corresponding to $\sigma$.
For every nilpotent orbit $\O$ in $\mathfrak{g}$, we have that $\O(\mathbb{C})^{d\sigma_e}=\O(\mathbb{C}) \cap \mathfrak{g}^{d\sigma_e}$ is a real manifold (possibly empty) whose $F(\mathbb{R})$-orbits, usually called \emph{real nilpotent orbits}, are classified (see \cite[Section 9]{CM93}).
\end{itemize}
\end{remark}
\begin{example}(\cite[Example 3.5]{MJT21})\label{ex:D4}
In type $D_4$, the Hasse diagram for nilpotent orbit closures is the following (see \cite[Section 6.2]{CM93} for details).
\begin{multicols}{2}
\begin{center}
\scalebox{0.8}{
\xymatrix@R=4mm@C=2cm{
& \O_{[7,1]} \ar@{-}[d] & \\
& \O_{[5,3]} \ar@{-}[rd] \ar@{-}[ld] \ar@{-}[d] &\\
\O_{[4^2]}^{\mathrm{I}} \ar@{-}[rd] \ar@/^3pc/@{<.>}[rr] \ar@{<.>}[r] & \O_{[5,1^3]} \ar@{<.>}[r]\ar@{-}[d]& \O_{[4^2]}^{\mathrm{I\!I}} \ar@{-}[ld]\\
& \O_{[3^2,1^2]} \ar@{-}[d]& \\
& \O_{[3,2^2,1]} \ar@{-}[ld] \ar@{-}[rd] \ar@{-}[d]& \\
\O_{[2^4]}^{\mathrm{I}} \ar@/^3pc/@{<.>}[rr] \ar@{<.>}[r]\ar@{-}[rd] & \O_{[3,1^5]} \ar@{<.>}[r]\ar@{-}[d]& \O_{[2^4]}^{\mathrm{I\!I}} \ar@{-}[ld] \\
& \O_{[2^2,1^4]}\ar@{-}[d]& \\
& \O_{[1^8]}&
}
}
\end{center}
Let $G=\Spin_8$, and let $\sigma_s$ be the real group structure on $G$ whose real locus is the non-compact real Lie group usually denoted by $\Spin(4,4)$. If $\sigma$ is an inner twist of $\sigma_s$, then the $\star$-action induced by $\sigma$ stabilizes each nilpotent orbit of $\mathfrak{g}$, while if $\sigma$ is an outer twist of $\sigma_s$, then the corresponding $\star$-action swaps two pairs of orbits (depending on the choice of $\sigma$ in its equivalence class).
The dotted arrows in the opposite picture indicate which pairs of orbits can be swapped by the $\star$-action in this last case.
\end{multicols}
\end{example}
\section{Real structures on varieties with a torus action} \label{sec: real structures on T-varieties}
In this section, we consider the Key Problem when the reductive group acting (effectively) is a complex torus $T$. There are basically two cases to consider:
\begin{itemize}
\item The first case is when $T$ acts with a dense open orbit (case of \emph{toric varieties}), in which case there is a well-known combinatorial description of the $T$-varieties in terms of fans, and a complete answer to the Key Problem can be provided.
\item The second case is when a general $T$-orbit is of codimension $c\geq 1$ (this number $c$ is called the \emph{complexity} of the $T$-variety), in which case there is again a combinatorial description due to Altmann--Hausen \cite{AH06} (affine case) and Altmann--Hausen--S\"u\ss \ \cite{AHS08} (general case) that makes possible to provide a satisfactory answer to question \ref{item: existence part of Key-Prob3} of the Key Problem, at least in the affine case, but implies a rather heavy technical machinery. (Let us mention that the combinatorial description of Altmann-Hausen extends combinatorial descriptions obtained by Demazure in \cite{Dem88} and by Flenner--Zaidenberg in \cite{FZ03}.)
\end{itemize}
\subsection{Real structures on toric varieties}\label{subsec: real structures on toric varieties}
We refer to \cite{Ful93} or \cite{CLS11} for a complete account on toric varieties over the field of complex numbers.
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}^n$ be a complex torus of dimension $n \geq 1$, let $\sigma$ be a real group structure on $T$, and let $X$ be a complex toric variety with open orbit $X_0 \simeq T$. Our goal in this subsection is to explain how to determine all the equivalence classes of $(T,\sigma)$-equivariant real structures on $X$.
Let us note that, since the Galois descent $\mathbb{C}/\mathbb{R}$ is always effective for complex toric varieties (see e.g.~\cite[Remark 1.11]{Hur11}), there is a one-to-one correspondence between equivalence classes of equivariant real structures on $X$ and isomorphism classes of real forms of $X$ in the category of real toric varieties.
\smallskip
By Lemma \ref{lem: real form on tori}, there exists $\psi \in \Aut_{\mathrm{gr}}(T)$ and $n_0,n_1,n_2 \in \mathbb{N}_{\geq 0}$ such that
\[\sigma=\psi \circ (\sigma_0^{\times n_0}\times\sigma_1^{\times n_1}\times\sigma_2^{\times n_2}) \circ \psi^{-1},\ \ \text{with}\ n_0+n_1+2n_2=n.\]
Furthermore, denoting
\[\tau_1: \mathbb{G}_{m,\mathbb{C}} \to \mathbb{G}_{m,\mathbb{C}},\ t \mapsto -\overline{{t}^{-1}},\]
which is a $(\mathbb{G}_{m,\mathbb{C}},\sigma_1)$-equivariant real structure on $\mathbb{G}_{m,\mathbb{C}}$, we verify that the antiregular involution $X_0 \to X_0$ defined by
\[
\psi \circ (\sigma_0^{\times n_0}\times \rho_1 \times \cdots \times \rho_{n_1} \times\sigma_2^{\times n_2}) \circ \psi^{-1}, \ \text{where}\ \rho_i\in \{ \sigma_1, \tau_1\} \text{ for each } i=1,\ldots, n_1,
\]
is a $(T,\sigma)$-equivariant real structure on $X_0 \simeq T$ (seen as a $T$-variety for the usual multiplication on $T$). Furthermore, any $(T,\sigma)$-equivariant real structure on $X_0$ is equivalent to such a such one, and two equivariant real structures $\psi \circ (\sigma_0^{\times n_0}\times \rho_1 \times \cdots \times \rho_{n_1} \times\sigma_2^{\times n_2}) \circ \psi^{-1}$ and $\psi \circ (\sigma_0^{\times n_0}\times \rho'_1 \times \cdots \times \rho'_{n_1} \times\sigma_2^{\times n_2}) \circ \psi^{-1}$ on $X_0$ are equivalent if and only if $\rho_i=\rho'_i$ for each $i=1,\ldots, n_1$; in particular, $X_0$ admits exactly $2^{n_1}$ equivalence classes of $(T,\sigma)$-equivariant real structures. (This can be proved for instance using Proposition \ref{prop:Galois H1 to param eq real structures} since $\mathbb{H}^1(\Gamma,\Aut^{T}(X_0)) \simeq (\mathbb{Z}/2\mathbb{Z})^{n_1}$ by \cite[Proposition 1.18]{MJT18}.)
\begin{remark}
Let $\mu$ be a $(T,\sigma)$-equivariant real structure on $X_0$. Then, either $\mu$ is equivalent to $\sigma$, in which case $X_0/\langle \mu \rangle \simeq T/\langle \sigma \rangle$ is the trivial $(T/\langle \sigma \rangle)$-torsor, or else $X_0/\langle \mu \rangle$ is a $(T/\langle \sigma \rangle)$-torsor with no real points (in particular it is irrational).
\end{remark}
\smallskip
It remains to determine which $(T,\sigma)$-equivariant real structures on $X_0$ extend to $X$, and which ones stay equivalent after extension. Let us first note that $\Aut^T(X)\simeq \Aut^T(X_0) \simeq T$, which implies two things:
\begin{itemize}
\item If $\mu_1$ and $\mu_2$ are two equivalent $(T,\sigma)$-equivariant real structures on $X_0$, then $\mu_1$ extends to $X$ if and only if $\mu_2$ extends to $X$.
\item Two $(T,\sigma)$-equivariant real structures on $X$ are equivalent if and only if their restrictions on $X_0$ are equivalent.
\end{itemize}
Therefore, we are left with determining which $(T,\sigma)$-equivariant real structures $\psi \circ (\sigma_0^{\times n_0}\times \rho_1 \times \cdots \times \rho_{n_1} \times\sigma_2^{\times n_2}) \circ \psi^{-1}$ on $X_0$ extend to $X$. It turns out that the answer, obtained by Huruguen in \cite{Hur11}, only depends on $\sigma$.
\begin{theorem}\label{th:real structure extend to toric embeddings} \emph{(\cite[Proposition 1.19 and Theorem 1.25]{Hur11})}\\
With the previous notation, a $(T,\sigma)$-equivariant real structure $\mu$ on $X_0$ extends to $X$ if and only if the $\star$-action on $\mathbb{X}_\mathbb{Q}^\vee$ induced by $\sigma$ (see Example \ref{ex: star-action for tori}) stabilizes the fan $\Sigma$ associated with the $T$-equivariant embedding $X_0 \hookrightarrow X$.
\end{theorem}
It follows from Theorem \ref{th:real structure extend to toric embeddings} and the discussion above that the complex toric variety $X$ admits either $2^{n_1}$ equivalence classes of $(T,\sigma)$-equivariant real structures (when the $\star$-action on $\mathbb{X}_\mathbb{Q}^\vee$ stabilizes the fan $\Sigma$) or none. In particular, we have a complete answer to the Key Problem for toric varieties in terms of combinatorial objects easy to implement in a software system. We now give some examples.
\begin{example}
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}$, and let $\Sigma$ be the fan in $\mathbb{X}_\mathbb{Q}^\vee \simeq \mathbb{Q}$ generated by $e_1:=1$; it corresponds to the natural embedding $X_0=\mathbb{G}_{m,\mathbb{C}} \hookrightarrow X=\mathbb{A}_\mathbb{C}^1$.
\begin{itemize}
\item If $\sigma=\sigma_0$, then the corresponding $\star$-action on $\mathbb{X}_\mathbb{Q}^\vee$ is trivial, and so any $(T,\sigma_0)$-equivariant real structure on $X_0$ extends to $\mathbb{A}_\mathbb{C}^1$ (and they are all equivalent); it corresponds to the real $\mathbb{G}_{m,\mathbb{R}}$-form $\mathbb{A}_\mathbb{R}^1$, up to isomorphism.
\item If $\sigma=\sigma_1$, then the corresponding $\star$-action on $\mathbb{X}_\mathbb{Q}^\vee$ is $r \mapsto -r$, and so none of the two inequivalent $(T,\sigma_1)$-equivariant real structures $\sigma_1$ and $\tau_1$ extends to $\mathbb{A}_\mathbb{C}^1$, i.e.~$\mathbb{A}_\mathbb{C}^1$ has no real $\mathbb{S}^1$-form.
\end{itemize}
However, if we now take for $\Sigma$ the complete fan in $\mathbb{X}_\mathbb{Q}^\vee \simeq \mathbb{Q}$ generated by $e_1$ and $-e_1$, then any $(T,\sigma)$-equivariant real structure on $X_0$ extends to $X=\P_\mathbb{C}^1$ since both $\star$-actions stabilize $\Sigma$; it gives rise to the real $\mathbb{G}_{m,\mathbb{R}}$-form $\P_{\mathbb{R}}^{1}$ when $\sigma=\sigma_0$, and to the two real $\mathbb{S}^1$-forms $\P_\mathbb{R}^1$ and $\varnothing$ (the empty conic) when $\sigma=\sigma_1$.
\end{example}
Let us mention that if $\sigma$ and $\sigma'$ are two equivalent real group structures on $T$, it may happen that $X$ admits $2^{n_1}$ inequivalent $(T,\sigma)$-equivariant real structures but no $(T,\sigma')$-equivariant real structure. Hence, it is essential to set a real group structure $\sigma$, and not just its equivalence class, when studying the equivariant real structures on toric varieties.
\begin{example}
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}^2$, and let $\sigma$ and $\sigma'$ be the two equivalent real group structures defined in Example \ref{ex:counter-ex not strongly eq}. The $\star$-action on $\mathbb{X}^\vee \simeq \mathbb{Z}^2$ induced by $\sigma$ is given by $(m,n) \mapsto (m,-n)$, while the one induced by $\sigma'$ is given by $(m,n) \mapsto (m-2n,-n)$. Consider now the $T$-equivariant embedding $X_0 \hookrightarrow X \simeq \P_\mathbb{C}^1 \times \P_\mathbb{C}^1$ corresponding to the following complete fan in $\mathbb{X}_\mathbb{Q}^\vee$:
\begin{center}
\begin{tikzpicture}
\draw[->] (5,0) -- (6,0);
\draw[->] (5,0) -- (4,0);
\draw[->] (5,0) -- (5,1);
\draw[->] (5,0) -- (5,-1);
\node at (6.3,0) {$e_1$};
\node at (5,1.2) {$e_2$};
\node at (3.6,0) {$-e_1$};
\node at (5,-1.2) {$-e_2$};
\node at (5.6,0.6) {$C_1$};
\node at (4.4,0.6) {$C_2$};
\node at (5.6,-0.6) {$C_4$};
\node at (4.4,-0.6) {$C_3$};
\end{tikzpicture}
\end{center}
\noindent We denote this fan by $\Sigma$.
The $\star$-action induced by $\sigma$ swaps the cones $C_1 \leftrightarrow C_4$ and $C_2 \leftrightarrow C_3$, so $\Sigma$ is $\Gamma$-stable and $X$ admits $2$ equivalence classes of $(T,\sigma)$-equivariant real structures (corresponding to the isomorphism classes of the two real $(\mathbb{G}_{m,\mathbb{R}} \times \mathbb{S}^1)$-forms $\P_\mathbb{R}^1 \times \P_\mathbb{R}^1$ and $\varnothing$). But the $\star$-action induced by $\sigma'$ maps $e_2$ to $-2e_1-e_2$, so $\Sigma$ is not $\Gamma$-stable and $X$ admits no $(T,\sigma')$-equivariant real structure.
\end{example}
\begin{works}\
\begin{itemize}
\smallskip
\item Equivariant real structures (in a slightly broader sense than in the present article)
on projective smooth toric surfaces and threefolds have been classified by Delaunay in \cite{Del03,Del04}; she has also studied the topology of the corresponding real loci.
\smallskip
\item Descent data and $\mathrm{K}$-forms for toric varieties, with $\mathrm{K}$ an arbitrary field, have been studied by Huruguen in \cite{Hur11} for arbitrary toric varieties (focusing on determining when a given descent datum on the open orbit extends to the whole variety), by Elizondo--Lima-Filho--Sottile--Teitler in \cite{ELST14} for projective spaces and toric surfaces (with a detailed classification of the possible descent data in these cases), and by Duncan in \cite{Dun16} who has considered and compared $\mathrm{K}$-forms of toric varieties in three different categories, namely the abstract varieties, the toric varieties, and the \emph{neutral} toric varieties (i.e.~toric varieties whose open orbit is a trivial torsor).
\end{itemize}
\end{works}
\subsection{Real structures on arbitrary affine \texorpdfstring{$T$}{T}-varieties}\label{subsec: real structures on T-varieties}
In this subsection, based on the work of Gillard \cite{Gil20}, we still consider torus actions on complex varieties, but we do not assume that there is an open orbit anymore, and we restrict ourselves to the affine setting (in which case the Galois descent $\mathbb{C}/\mathbb{R}$ is always effective). Our main goal in this subsection is to outline the combinatorial answer obtained by Gillard to question \ref{item: existence part of Key-Prob3} of the Key Problem for affine $T$-varieties.
\smallskip
We start by recalling, very briefly, the combinatorial description of affine $T$-varieties due to Altmann--Hausen (see \cite{AH06} for details):
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}^n$ be a complex torus, with character lattice $\mathbb{X} \simeq \mathbb{Z}^n$, and let $X$ be a complex affine $T$-variety. The $T$-action on $X$ corresponds to an $\mathbb{X}$-grading of the coordinate ring $\mathbb{C}[X]$ as follows:
\[\mathbb{C}[X] = \bigoplus_{\chi \in \mathbb{X}} \mathbb{C}[X]_\chi \text{ with } \mathbb{C}[X]_\chi:=\{ f \in \mathbb{C}[X] \ |\ \forall t \in T, \forall x \in X,\ (t^{-1} \cdot f)(x)=\chi(t)f(x)\}.\]
Let $\omega$ be a full dimensional polyhedral cone in $\mathbb{X}_\mathbb{Q}$, let $Y$ be a \textsl{semi-projective} variety (i.e.~$\mathbb{H}^0(Y,\O_Y)$ is a finitely generated $\mathbb{C}$-algebra and the affinization morphism $Y \to\mathrm{Spec}(H^0(Y,\O_Y))$ is projective), and let $\mathcal{D}:= \sum \Delta_i \otimes D_i$ be a \textsl{polyhedral divisor} on $Y$; this means that the $D_i$ are prime divisors on $Y$ and the coefficients $\Delta_i$ are convex polyhedra in $\mathbb{X}_\mathbb{Q}^\vee$ depending on $\omega$.
Then, for every $\chi \in \omega \cap \mathbb{X}$, a Weil $\mathbb{Q}$-divisor on $Y$ is given by
$\mathcal{D}(\chi):= \sum \text{min}\{ \langle \chi | \Delta_i \rangle \} \otimes D_i$, where $\langle -|- \rangle\colon \mathbb{X}_\mathbb{Q} \times \mathbb{X}_\mathbb{Q}^\vee \to \mathbb{Q}$ is the natural pairing.
We assume furthermore that $\mathcal{D}$ is \emph{proper}, i.e. that any $\mathcal{D}(\chi)$ is a semiample rational Cartier divisor on $Y$, which is big whenever $\chi$ belongs to the relative interior of the cone $\omega$.
From such a triple ($\omega, Y, \mathcal{D}$), that we will call an \emph{AH datum} in the rest of this subsection, Altmann and Hausen construct an $\mathbb{X}$-graded $\mathbb{C}$-algebra defined by
\[
A[\omega, Y, \mathcal{D}]:= \bigoplus_{\chi \in \omega \cap \mathbb{X}} \mathbb{H}^0(Y, \mathcal{O}_Y(\mathcal{D}(\chi))) \subseteq \mathbb{C}(Y)[\mathbb{X}].
\]
\begin{theorem} \emph{(see \cite[Theorems 3.1 and 3.4]{AH06})}\label{th:AH complex affine}
\begin{enumerate}
\item Let $(\omega, Y, \mathcal{D})$ be an AH datum. Then $Z[\omega,Y, \mathcal{D}] := \mathrm{Spec}(A[\omega,Y, \mathcal{D}])$ is a complex affine $T$-variety that is $T$-equivariantly birational to $T \times Y$.
\item\label{item:ii of th 3.4} Let $X$ be a complex affine $T$-variety. Then there exists an AH datum $(\omega, Y, \mathcal{D})$ such that there is an isomorphism of $T$-varieties $X \simeq Z[\omega, Y, \mathcal{D}]$.
\end{enumerate}
\end{theorem}
\begin{remark}
The semiprojective variety $Y$ in Theorem \ref{th:AH complex affine} \ref{item:ii of th 3.4}, which is not unique for a given $X$, can be obtained as the normalization of the main component of the inverse limit over all GIT-quotients of $X$ or by \emph{toric downgrading} (see \cite[Section 11]{AH06} for details).
\end{remark}
Based on the Altmann--Hausen combinatorial description of affine $T$-varieties (Theorem \ref{th:AH complex affine}), Gillard has obtained the following criterion that provides a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem for affine $T$-varieties.
\begin{theorem}\emph{(\cite[Theorems~4.3 and~4.6]{Gil20})}\label{th:AH real affine}
Let $\sigma$ be a real group structure on $T$.
\begin{enumerate}
\item\label{item: i of th 3.6} Let $(\omega, Y, \mathcal{D})$ be an AH datum, and let $X:=Z[\omega,Y, \mathcal{D}]$ be the corresponding complex affine $T$-variety.
If there exists a real structure $\rho_Y$ on $Y$ and a monoid homomorphism $h\colon \omega \cap \mathbb{X} \to \mathbb{C}(Y)^*$ such that
\begin{equation}\label{eq:monoid homomorphism}
\forall \chi \in \omega \cap \mathbb{X},\ \rho_Y^*(\mathcal{D}(\chi))=\mathcal{D}(\,^\gamma\hskip-1pt \chi)+\div_Y(h(\,^\gamma\hskip-1pt \chi)) \ \text{and}\ h(\chi)\rho_Y^*(h(\,^\gamma\hskip-1pt \chi))=1,
\end{equation}
where $\Gamma$ acts on $\mathbb{X}$ through the $\star$-action induced by $\sigma$ (see Example \ref{ex: star-action for tori}), then $X$ admits a $(T,\sigma)$-equivariant real structure $\mu_{[\omega,(Y,\rho_{Y}),\mathcal{D},h]}$.
\item\label{item: ii of th 3.6} Let $X$ be an affine $T$-variety that admits a $(T,\sigma)$-equivariant real structure.
Then there exists an AH datum $(\omega, Y, \mathcal{D})$, together with a real structure $\rho_Y$ on $Y$ and a monoid homomorphism $h\colon \omega \cap \mathbb{X} \to \mathbb{C}(Y)^*$ satisfying \eqref{eq:monoid homomorphism}, such that there is a $\Gamma$-equivariant isomorphism of $T$-varieties $X \simeq Z[\omega,Y,\mathcal{D}]$.
\end{enumerate}
\end{theorem}
\begin{remark}\
\begin{itemize}
\item The criterion obtained by Gillard is in fact constructive in the sense that it produces an explicit $(T,\sigma)$-equivariant real structure $\mu_{[\omega,(Y,\rho_{Y}),\mathcal{D},h]}$ on $Z[\omega,Y,\mathcal{D}]$.
\item For a given complex affine $T$-variety $X$ admitting a $(T,\sigma)$-equivariant real structure, Gillard gives an explicit recipe to construct the pair $(Y,\rho_Y)$ by \emph{$\Gamma$-equivariant toric downgrading} (see \cite[Section 4.1]{Gil20}).
\end{itemize}
\end{remark}
Theorem \ref{th:AH real affine} \ref{item: ii of th 3.6} simplifies a bit when the real group structure $\sigma$ on $T$ is equivalent to $\sigma_0^{\times n_0}\times\sigma_2^{\times n_2}$ (no $\sigma_1$ factor, with the notation of Lemma \ref{lem: real form on tori}); indeed, we can then find $\mathcal{D}$ on $Y$ such that $\rho_Y^*(\mathcal{D}(\chi))=\mathcal{D}(\,^\gamma\hskip-1pt \chi)$, i.e.~take $h=1$. However, this simplification is not always possible when there is a $\sigma_1$ factor (see Example \ref{ex 4.10}).
\begin{example}{(\cite[Examples 2.14, 3.6 and 5.3]{Gil20})}
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}^2$ and let $\sigma:=\sigma_2$ (with the notation of Lemma \ref{lem: real form on tori}). Let $X:=\mathbb{A}_\mathbb{C}^4$ on which $T$ acts by
\[
(s,t)\cdot (x_1,x_2,x_3,x_4):=(sx_1,tx_2,st^2 x_3,s^2tx_4), \
\]
and let $\mu$ be the $(T,\sigma)$-equivariant real structure on $X$ defined by
\[
\mu\colon\ X \to X,\ (x_1,x_2,x_3,x_4)\mapsto (\overline{x_2},\overline{x_1},\overline{x_4},\overline{x_3});
\]
it corresponds to a real $R_{\mathbb{C}/\mathbb{R}}(\mathbb{G}_{m,\mathbb{C}})$-variety isomorphic to $\mathbb{A}_\mathbb{R}^4$.
Let $\{e_1,e_2\}$ be the canonical basis of $\mathbb{X} \simeq \mathbb{Z}^2$ and let $\omega:=\mathrm{Cone}(e_1,e_2)$. Let $Y$ be the projective toric surface corresponding to the fan left below, and let $\mathcal{D}:=\sum_{i=1}^{4} \Delta_i \otimes D_i$ be the polyhedral divisor on $Y$ such that each divisor $D_i$ corresponds to the ray generated by $v_i$ and the coefficients $\Delta_i \subseteq \mathbb{X}_\mathbb{Q}^\vee$ are the convex polyhedra pictured right below.
\begin{multicols}{4}
\definecolor{ffqqqq}{rgb}{1.,0.,0.}
\scalebox{0.65}{
\begin{tikzpicture}[line cap=round,line join=round,>=triangle 45,x=1.0cm,y=1.0cm]
\begin{axis}[
x=1.0cm,y=1.0cm,
axis lines=middle,
ymajorgrids=true,
xmajorgrids=true,
xmin=-2.5,
xmax=1.5,
ymin=-2.25,
ymax=1.5,
xtick={-3.0,-2.0,...,3.0},
ytick={-3.0,-2.0,...,3.0},]
\clip(-3.,-3.) rectangle (3.,3.);
\draw [->,line width=1.pt] (0.,0.) -- (1.,0.);
\draw [->,line width=1.pt] (0.,0.) -- (0.,1.);
\draw [->,line width=1.pt] (0.,0.) -- (-2.,-1.);
\draw [->,line width=1.pt] (0.,0.) -- (-1.,-2.);
\begin{scriptsize}
\draw[color=black] (1.2,0.15) node {$v_1$};
\draw[color=black] (0.2,1.2) node {$v_2$};
\draw[color=black] (-2.25,-1) node {$v_3$};
\draw[color=black] (-1.5,-2) node {$v_4$};
\end{scriptsize}
\end{axis}
\end{tikzpicture}
}
\scalebox{0.7}{
\begin{tikzpicture}[line cap=round,line join=round,>=triangle 45,x=1.0cm,y=1.0cm]
\begin{axis}[
x=1.0cm,y=1.0cm,
axis lines=middle,
ymajorgrids=true,
xmajorgrids=true,
xmin=-0.2,
xmax=2.75,
ymin=-0.2,
ymax=2.75,
xtick={-3.0,-2.0,...,3.0},
ytick={-3.0,-2.0,...,3.0},]
\clip(-3.,-3.) rectangle (3.,3.);
\shade[top color = white, bottom color = red] (0.,0.) -- (0.,3.) -- (3.,3.) -- (3.,0.) -- cycle;
\draw [line width=2.pt,color=ffqqqq] (0.,0.)-- (0.,3.);
\draw [line width=2.pt,color=ffqqqq] (0.,3.)-- (3.,3.);
\draw [line width=2.pt,color=ffqqqq] (3.,3.)-- (3.,0.);
\draw [line width=2.pt,color=ffqqqq] (3.,0.)-- (0.,0.);
\node[label=right:{$ {\Delta_1=\Delta_2 } $}] (n1) at (0.3,1.) {};
\end{axis}
\end{tikzpicture}
}
\scalebox{0.7}{
\begin{tikzpicture}[line cap=round,line join=round,>=triangle 45,x=1.0cm,y=1.0cm]
\begin{axis}[
x=1.0cm,y=1.0cm,
axis lines=middle,
ymajorgrids=true,
xmajorgrids=true,
xmin=-0.2,
xmax=2.75,
ymin=-0.2,
ymax=2.75,
xtick={-3.0,-2.0,...,3.0},
ytick={-3.0,-2.0,...,3.0},]
\clip(-3.,-3.) rectangle (3.,3.);
\shade[top color = white, bottom color = red](0.,1.) -- (0.,3.) -- (3.,3.) -- (3.,0.) -- (2.,0.) -- cycle;
\draw [line width=2.pt,color=ffqqqq] (0.,1.)-- (0.,3.);
\draw [line width=2.pt,color=ffqqqq] (0.,3.)-- (3.,3.);
\draw [line width=2.pt,color=ffqqqq] (3.,3.)-- (3.,0.);
\draw [line width=2.pt,color=ffqqqq] (3.,0.)-- (2.,0.);
\draw [line width=2.pt,color=ffqqqq] (2.,0.)-- (0.,1.);
\node[label=right:{$ {\Delta_3 } $}] (n1) at (0.75,1.) {};
\end{axis}
\end{tikzpicture}
}
\definecolor{ffqqqq}{rgb}{1.,0.,0.}
\scalebox{0.7}{
\begin{tikzpicture}[line cap=round,line join=round,>=triangle 45,x=1.0cm,y=1.0cm]
\begin{axis}[
x=1.0cm,y=1.0cm,
axis lines=middle,
ymajorgrids=true,
xmajorgrids=true,
xmin=-0.2,
xmax=2.75,
ymin=-0.2,
ymax=2.75,
xtick={-3.0,-2.0,...,3.0},
ytick={-3.0,-2.0,...,3.0},]
\clip(-3.,-3.) rectangle (3.,3.);
\shade[top color = white, bottom color = red] (0.,2.) -- (0.,3.) -- (3.,3.) -- (3.,0.) -- (1.,0.) -- cycle;
\draw [line width=2.pt,color=ffqqqq] (0.,2.)-- (0.,3.);
\draw [line width=2.pt,color=ffqqqq] (0.,3.)-- (3.,3.);
\draw [line width=2.pt,color=ffqqqq] (3.,3.)-- (3.,0.);
\draw [line width=2.pt,color=ffqqqq] (3.,0.)-- (1.,0.);
\draw [line width=2.pt,color=ffqqqq] (1.,0.)-- (0.,2.);
\node[label=right:{$ {\Delta_4 } $}] (n1) at (0.75,1.) {};
\end{axis}
\end{tikzpicture}
}
\end{multicols}
\noindent Then ($\omega,Y,\mathcal{D}$) is an AH datum such that $X \simeq Z[\omega,Y,\mathcal{D}]$. Moreover, if we denote by $\rho_Y$ the extension to $Y$ of the real group structure $\sigma_2$ on the dense open orbit $Y_0 \simeq \mathbb{G}_{m,\mathbb{C}}^2$, and take $h=1$, then one can check that the pair $(\rho_Y,h)$ satisfies \eqref{eq:monoid homomorphism}.
\end{example}
\begin{example}{(see \cite[Section 4.1]{DL22})} \label{ex 4.10}
Let $T \simeq \mathbb{G}_{m,\mathbb{C}}$, let $\sigma:=\sigma_1$ (the real group structure corresponding to the circle $\mathbb{S}^1$), and let $\omega=\mathbb{X}_\mathbb{Q} \simeq \mathbb{Q}$.
Let $Y$ be the complex surface with the real structure $\rho_Y$ whose corresponding real form is the real sphere $\mathbb{S}^2:=\mathrm{Spec}(\mathbb{R}[x,y,z]/(x^2+y^2+z^2-1))$.
Let $\mathcal{D}:=\{1\} \otimes D$, where $D:=\{ 1-z=x+iy=0\}$ is a Cartier divisor on $Y=\mathbb{S}_\mathbb{C}^2$, and let $h\colon \mathbb{Z} \to \mathbb{C}(Y)^*,\ n \mapsto (1-z)^{-n}$.
Then $(\omega,Y,\mathcal{D})$ is an AH datum, the pair $(\rho_Y,h)$ satisfies \eqref{eq:monoid homomorphism}, and the corresponding $(T,\sigma)$-equivariant real structure $\mu_{[\omega,(Y,\rho_{Y}),\mathcal{D},h]}$ on $X:=Z[\omega,Y,\mathcal{D}]$ corresponds to the real $\mathbb{S}^1$-variety $\mathbb{S}^3$ whose quotient $f\colon \mathbb{S}^3 \to \mathbb{S}^3/\mathbb{S}^1 \simeq \mathbb{S}^2$ is an \emph{algebraic model} of the famous Hopf fibration (which means that the continuous map $\mathbb{S}^3(\mathbb{R}) \to \mathbb{S}^2(\mathbb{R})$ induced by $f$ is the Hopf fibration). Let us note that, in this example, it is not possible to reduce to the case $h=1$.
\end{example}
As mentioned already, Theorem \ref{th:AH real affine} provides a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem for affine $T$ -varieties. It is then natural to ask what about question \ref{item: quantity part of Key-Prob3} of the Key Problem for these varieties. Unfortunately, for an arbitrary affine $T$-variety $X$, the group $\Aut^T(X)$ can be quite wild, and so in general we cannot say much about the cardinal of the set $\mathbb{H}^1(\Gamma,\Aut^T(X))$ that parametrizes the equivalence classes of equivariant real structures on $X$.
\begin{works}\
\begin{itemize}
\smallskip
\item Descent data and $\mathrm{K}$-forms for affine complexity-one $T$-varieties, with $\mathrm{K}$ an arbitrary field, have been studied by Langlois in \cite{Lan15}; in particular, if $\mathrm{K}=\mathbb{R}$ and the real group structure $\sigma$ on $T$ is equivalent to $\sigma_0^{\times n_0}\times\sigma_2^{\times n_2}$ (no $\sigma_1$ factor), then \cite[Theorem 5.10]{Lan15} coincides with Theorem \ref{th:AH real affine}.
\smallskip
\item Real $\mathbb{S}^1$-forms of $\mathbb{G}_{m,\mathbb{C}}$-varieties have been described by Dubouloz--Liendo in \cite{DL22}; in particular, \cite[Theorem 2.7]{DL22} coincides with Theorem \ref{th:AH real affine} when $T=\mathbb{G}_{m,\mathbb{C}}$ and $\sigma=\sigma_1$.
Also, the case of smooth $\mathbb{G}_{m,\mathbb{C}}$-surfaces has been considered more closely by Dubouloz--Petitjean in \cite{DP20}; as an application, they establish that every compact differentiable $\mathbb{S}^1(\mathbb{R})$-surface admits a unique smooth rational real affine model up to $\mathbb{S}^1$-equivariant birational diffeomorphism (\cite[Theorem 1]{DP20}).
\smallskip
\item An example of a non-linearizable $\mathbb{S}^1$-action on $\mathbb{A}_\mathbb{R}^4$, whose complexification is a linearizable $\mathbb{G}_{m,\mathbb{C}}$-action on $\mathbb{A}_\mathbb{C}^4$, was obtained by Freudenburg--Moser-Jauslin in \cite{FMJ04}. (Note that Koras--Russell proved in \cite{KR13} that any $\mathbb{S}^1$-action on $\mathbb{A}_\mathbb{R}^3$ is linearizable.) Later, a systematic approach to find uncountably many non-linearizable $\mathbb{S}^1$-actions on $\mathbb{A}_\mathbb{R}^4$ that are all pairwise inequivalent was obtained by Moser-Jauslin in \cite{MJ19} (see also \cite[Section 4.2]{DL22}).
\end{itemize}
\end{works}
\section{Real structures on spherical varieties}\label{sec: real structures on spherical varieties}
In this section, we leave the world of torus actions and consider the Key Problem for another famous class of algebraic varieties endowed with an algebraic group action, namely the \emph{spherical varieties}.
Let $G$ be a complex reductive group (of simply-connected type). A $G$-variety $X$ is called \emph{spherical} if a Borel subgroup of $G$ acts on $X$ with a dense open orbit. This condition implies of course that $G$ itself acts on $X$ with a dense open orbit, and consequently the classification of spherical varieties is generally done in two steps.
\begin{itemize}
\item First step: classify the spherical homogeneous spaces under the $G$-action.
\item Second step: for a given spherical homogeneous space $G/H$, classify the $G$-equivariant embeddings of $G/H$ (also known as \emph{spherical embeddings}).
\end{itemize}
There is a well established combinatorial description for both parts of this classification, but we will not go into details to avoid making this survey article too technical. We refer to \cite{Tim11} and the references therein for a complete account on spherical homogeneous spaces and their equivariant embeddings.
\subsection{Real structures on spherical homogeneous spaces}\label{subsec:real structures on spherical homog spaces}
In this subsection we consider the Key Problem for spherical homogeneous spaces.
Let $G$ be a complex reductive group, let $B \subseteq G$ be a Borel subgroup, and let $T \subseteq B$ be a maximal torus.
As mentioned above, a homogeneous space $X_0=G/H$ is called \emph{spherical} if $B$ acts on $X_0$ with a dense open orbit; we also say that $H$ is a \emph{spherical} subgroup of $G$. Conjugacy classes of spherical subgroups are classified by some discrete invariants introduced by Luna (see \cite[Section 2]{Lun01} or \cite[Section 30.11]{Tim11}) under the names of \emph{homogeneous spherical data}. Roughly speaking, it is a quadruplet $(\Lambda,\Pi^p,\Sigma,\mathcal{D}^a)$ with $\Lambda \subseteq \mathbb{X}$ a sublattice, $\Pi^p$ and $\Sigma$ two finite subsets of $\mathbb{X}$, and $\mathcal{D}^a$ a finite subset of the dual of the sublattice of $\mathbb{X}$ generated by $\Sigma$, which satisfy some compatibility conditions (see \cite[Definition 30.21]{Tim11}).
Let us also mention that, for any spherical subgroup $H \subseteq G$, the quotient group $N_G(H)/H$ is a diagonalizable group (\cite[Section 5.2]{BP87}); in particular, it is an abelian group.
\smallskip
Let us fix a real group structure $\sigma$ on $G$. The $\star$-action induced by $\sigma$ on the based root datum associated with the triple $(G,B,T)$ (see Definition \ref{def: star-action}) induces a $\Gamma$-action on the set of homogeneous spherical data (see \cite[Sections 2.15-2.16]{BG18} for details, but beware that the notations are different from \cite[Section 30.11]{Tim11}).
The next result gives a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem for spherical homogeneous spaces.
\begin{theorem}\emph{(\cite[Main Theorem 1.13]{BG18} with $k_0=\mathbb{R}$)}\label{th: main th BG}\\
We keep the previous notation.
Let $X_0=G/H$ be a spherical homogeneous space.
Then $X_0$ admits a $(G,\sigma)$-equivariant real structure if and only if
\begin{enumerate}
\item\label{item: th 4.1 (i)} the homogeneous spherical datum of the conjugacy class of $H$ is preserved by the $\star$-action induced by $\sigma$; and
\item\label{item: th 4.1 (ii)} a certain cohomology class $\kappa_*(\delta([\sigma])) \in \mathbb{H}^2(\Gamma,N_G(H)/H)$ is trivial (see \cite[Section 3]{BG18} for the precise definition of this cohomology class).
\end{enumerate}
\end{theorem}
\begin{remark}\
\begin{itemize}
\item Condition \ref{item: th 4.1 (i)} of Theorem \ref{th: main th BG} is equivalent to the fact that $\sigma(H)$ is conjugate to $H$ in $G$, which is condition \ref{eq: sigma compatible} of Lemma \ref{lem: two conditions}.
\item If $Z(G) \subseteq H$ or if $\sigma$ is \emph{quasi-split}, i.e.~$\sigma$ stabilizes a Borel subgroup of $G$, then the cohomology class $\kappa_*(\delta([\sigma]))$ is trivial, and so $X_0$ admits a $(G,\sigma)$-equivariant real structure if and only if condition \ref{item: th 4.1 (i)} of Theorem \ref{th: main th BG} holds.
\item We observe that Theorem \ref{th: main th BG} looks very similar to Theorem \ref{th: real structures on linear reps}, but we do not know what a common generalization of these two results could be.
\end{itemize}
\end{remark}
\begin{example}{(\cite[Example 2.2]{MJT19})}
Let $G=\SL_n^{\times 3}$ with $n \geq 2$, and let $\sigma\colon (g_1,g_2,g_3)\mapsto (\overline{g_2}, \overline{g_1},{\overline{\leftexp{t}g_3^{-1}}})$ that is a real group structure on $G$.
Let
\[H:=\{(h_1,h_1,h_2);\ h_1 \in \SL_n \text{ and } h_2 \in \mathrm{O}_n\}\ \text{and}\
H':=\psi(H),\]
where $\psi\colon G \to G,\ (g_1,g_2,g_3) \mapsto (g_3,g_2,g_1)$ is an outer automorphism of $G$.
Then $H$ and $H'$ are both spherical subgroups of $G$, but the $\star$-action induced by $\sigma$ stabilizes only the homogeneous spherical datum of the conjugacy class of $H$.
Moreover, the cohomology class $\kappa_*(\delta([\sigma])) \in \mathbb{H}^2(\Gamma,N_G(H)/H)$ is trivial, and so $X_0=G/H$ admits a $(G,\sigma)$-equivariant real structure by Theorem \ref{th: main th BG}.
\end{example}
\begin{example}{(\cite[Example 11.5]{BG18})}
Let $G=\SO_{10}$, let $\sigma$ be a real group structure on $G$, and let $H \subseteq G$ an algebraic subgroup isomorphic to $\SO_9$. Then $X_0=G/H$ is a spherical homogeneous space, and the homogeneous spherical datum of the conjugacy class of $H$ is stable for the $\star$-action induced by $\sigma$.
Let $\sigma'$ be the real group structure on $G$ whose real locus is the non-compact real Lie group usually denoted by $\SO^*(10)$. If $\sigma$ belongs to the equivalence class of $\sigma'$, then the cohomology class $\kappa_*(\delta([\sigma])) \in \mathbb{H}^2(\Gamma,N_G(H)/H)$ is non-trivial, and so $X_0$ does not admit a $(G,\sigma)$-equivariant real structure.
For any other $\sigma$, the cohomology class $\kappa_*(\delta([\sigma]))$ is trivial, and so $X_0$ admits a $(G,\sigma)$-equivariant real structure.
\end{example}
\smallskip
Let us now quickly consider question \ref{item: quantity part of Key-Prob3} of the Key Problem for spherical homogeneous spaces.
It follows from Corollary \ref{cor: finiteness for homog spaces} that, if the spherical homogeneous space $X_0=G/H$ admits a $(G,\sigma)$-equivariant real structure, then $X_0$ admits a finite number of them, up to equivalence. Moreover, as mentioned earlier, the group $\Aut^G(X_0) \simeq N_G(H)/H$ is diagonalizable, and so it fits into an exact sequence of abelian $\Gamma$-groups
\[
0 \to \Aut^G(X_0)^\circ \to \Aut^G(X_0) \to \pi_0(\Aut^G(X_0)) \to 0,
\]
where $\Aut^G(X_0)^\circ$ is a torus and $\pi_0(\Aut^G(X_0))$ is a finite abelian group, which makes it possible in practice to compute the cardinal of $\mathbb{H}^1(\Gamma,\Aut^G(X_0))$ through a long exact sequence in Galois cohomology. (See \cite[Section 3.4]{MJT18} and \cite[Section 3]{MJT19} for some explicit examples of computation of $\mathbb{H}^1(\Gamma,\Aut^G(X_0))$.)
\begin{works}\
\smallskip
\begin{itemize}
\item In \cite{BG18}, Borovoi and Gagliardi study more generally descent data and $\mathrm{K}$-forms for spherical varieties, with $\mathrm{K}$ an arbitrary base field of characteristic zero (but not specifically the field of real numbers).
\smallskip
\item Theorem \ref{th: main th BG} was first proved for \emph{horospherical} homogeneous spaces (i.e. homogeneous spaces $G/H$ with $H$ containing a maximal unipotent subgroup of $G$) by Moser-Jauslin--Terpereau in \cite{MJT18}, and then later generalized by Borovoi--Gagliardi to arbitrary spherical homogeneous spaces over a base field of characteristic zero.\smallskip
\item Equivariant real structures on spherical homogeneous spaces $X_0=G/H$, under the extra assumption that $\Aut^G(X_0) \simeq N_G(H)/H$ is finite, have been studied by Akhiezer in \cite{Akh15}, by Akhiezer--Cupit-Foutou in \cite{ACF14}, by Cupit-Foutou in \cite{CF15}, by Borovoi in \cite{Bor20}, and by Snegirov in \cite{Sne20}.
\smallskip
\item In the particular case where $X_0=G/H$ is a \emph{symmetric space} (i.e.~$G^\theta \subseteq H \subseteq N_G(G^\theta)$ with $\theta \in \Aut_{\mathrm{gr}}(G)$ a group involution), a practical criterion for the existence of an equivariant real structure on $X_0$, using the involution $\theta$ instead of the homogeneous spherical data of the conjugacy class of $H$, has been obtained by Moser-Jauslin--Terpereau in \cite{MJT19} .
\smallskip
\item Let $X_0=G/H$ be a spherical homogeneous space with a $(G,\sigma)$-equivariant real structure $\mu$ such that $X_0(\mathbb{C})^\mu$ is non-empty. Then the real Lie group $G(\mathbb{C})^\sigma$ acts on $X_0(\mathbb{C})^\mu$ with finitely many orbits.
When $X_0$ is a symmetric space, a combinatorial description of these orbits using Galois cohomology has been obtained by Cupit-Foutou--Timashev in \cite{CFT18} (see also \cite[Chp.~6]{BJ06}), and when $X_0$ is arbitrary but $\sigma$ is split, a parametrization of these orbits through geometric methods has been obtained by Cupit-Foutou--Timashev in \cite{CFT19}.
\end{itemize}
\end{works}
\subsection{Extension of real structures to spherical embeddings}\label{subsec: extension of real structures to sph embeddings}
In the previous subsection we investigated the Key Problem for spherical homogeneous spaces. In this subsection we go to the next step and consider the Key Problem for spherical embeddings (although, historically, things have rather worked the other way around). More precisely, given a spherical homogeneous space $X_0=G/H$ and a $G$-equivariant embedding $X_0 \hookrightarrow X$, we give the combinatorial criterion obtained by Huruguen in \cite{Hur11} to determine when an equivariant real structure on $X_0$ extend to $X$, generalizing what was done in Section \ref{subsec: real structures on toric varieties} for toric varieties. For brevity, we do not give details regarding the theory of spherical embeddings; the interested reader is referred to \cite{Kno91,Tim11,Per14} for a detailed presentation.
\smallskip
We keep the same notation as in the previous subsection, and we fix a spherical homogeneous space $X_0=G/H$.
The \emph{weight lattice} of $X_0$ is
\[ \mathbb{X}(X_0)=\{ \chi \in \mathbb{X}(B) \ | \ \mathbb{C}(X_0)_\chi^{(B)} \neq \{0\} \} \ \subseteq \mathbb{X}(B):=\mathrm{Hom}_{\mathrm{gr}}(B,\mathbb{G}_m)\simeq \mathbb{X}(T), \]
where
\[\mathbb{C}(X_0)_\chi^{(B)}=\{ f \in \mathbb{C}(X_0)\ |\ \forall b \in B,\ b \cdot f= \chi(b)f\}\ \text{ with }\ \chi \in \mathbb{X}(B).\]
It is a free abelian group of finite rank.
The (finite) set of \emph{colors} of $X_0$ is
\[\mathcal{D}(X_0)=\{ \text{$B$-stable prime divisors of $X_0$} \}.\]
A \emph{colored cone} for $X_0$ is a pair formed by a strictly convex polyhedral cone in $\mathbb{X}(X_0)_\mathbb{Q}^\vee$ and a subset of $\mathcal{D}(X_0)$ satisfying some conditions.
A \emph{colored fan} for $X_0$ is a finite collection of colored cones for $X_0$ satisfying some extra conditions. (See \cite[Section 3]{Kno91} for details.)
\begin{theorem}\emph{(\cite[Theorem 3.3]{Kno91})}
There is a bijection between isomorphism classes of $G$-equivariant embeddings of $X_0$ and colored fans for $X_0$.
\end{theorem}
Let now $\sigma$ be a real group structure on $G$, and let $\mu$ be a $(G,\sigma)$-equivariant real structure on $X_0$. Then $\mu$ induces a $\Gamma$-action on the weight lattice $\mathbb{X}(X_0)$ and on the set of colors $\mathcal{D}(X_0)$; see \cite[Section 2.2]{Hur11} for the detailed description of this $\Gamma$-action.
This $\Gamma$-action on $\mathbb{X}(X_0)$ and $\mathcal{D}(X_0)$ in turn induces a $\Gamma$-action on the set of colored fans for $X_0$. A somewhat surprising fact is that this $\Gamma$-action on the set of colored fans for $X_0$ does not depend on the choice of $\mu$, only on $\sigma$, and can be recovered from the $\star$-action induced by $\sigma$ on the based root datum associated the triple $(G,B,T)$; see \cite[Section 7]{BG18} for details.
\begin{theorem}\label{th:real structure extend to spherical embeddings} \emph{(\cite[Theorem 2.23]{Hur11}, see also \cite[Theorem 9.1]{Wed18})}\\
We keep the previous notation. Let $X_0 \hookrightarrow X$ be a $G$-equivariant embedding of $X_0$.
A $(G,\sigma)$-equivariant real structure $\mu$ on $X_0=G/H$ extends to $X$ if and only if the corresponding colored fan is $\Gamma$-stable.
\end{theorem}
\begin{remark}\
\begin{itemize}
\item Theorem \ref{th:real structure extend to toric embeddings} for toric embeddings is a particular case of Theorem \ref{th:real structure extend to spherical embeddings}.
\item Unlike the toric case, the Galois descent $\mathbb{C}/\mathbb{R}$ is not always effective for spherical varieties. This means that a $(G,\sigma)$-equivariant real structure on a spherical variety $X$ does not always correspond to a real $(G/\langle \sigma \rangle)$-form of $X$. We refer to \cite[Section 2.4]{Hur11} for an explicit example where this situation occurs. The fact that $X$ admits a covering by $\Gamma$-stable quasi-projective open subsets can however be expressed combinatorially (see \cite[Corollary 3.2.12]{Per14}). See also \cite[Proposition 2.27]{Hur11} for a list of sufficient conditions to guaranty effectiveness.
\end{itemize}
\end{remark}
\begin{example}(\cite[Example 3.34]{MJT18})
Let $U$ be a maximal unipotent subgroup of $\SL_2$. Then $X_0=\SL_2/U \simeq \mathbb{A}_\mathbb{C}^2 \setminus \{0\}$ is a spherical homogeneous space whose non-trivial equivariant embeddings, and the corresponding colored fans, are the following:\
$\mathbb{A}_\mathbb{C}^2$, \ $\P_\mathbb{C}^2 \setminus \{0\}$, \ ${\mathrm{Bl}}_0(\mathbb{A}_\mathbb{C}^2)$, \ $\P_\mathbb{C}^2$, and ${\mathrm{Bl}}_0(\P_\mathbb{C}^2)$.
\vspace{3mm}
\begin{center}
\includegraphics[width=10cm]{SL2eventail.png}\vspace{2mm}
\includegraphics[width=10cm]{SL2eventail_2.png}
\end{center}
\vspace{2mm}
(The small white dot corresponds to the unique color of $\SL_2/U$.)
We saw in Example \ref{ex:real group structures on SL2} that there are two inequivalent real group structures on $\SL_2$, which we denoted by $\sigma_s$ and $\sigma_c$.
It follows from Theorem \ref{th: main th BG} and Proposition \ref{prop:Galois H1 to param eq real structures} that there exists a unique equivalence class of $(\SL_2,\sigma_s)$-equivariant real structures on $X_0$, but that there is no $(\SL_2,\sigma_c)$-equivariant structure on $X_0$ as the cohomology class $\Delta_U([\sigma_c])$ is non-trivial.
Moreover, the $\star$-action induced by $\sigma_s$ is trivial (see Remark \ref{rk: star action trivial in the split case}), and so any $(\SL_2,\sigma_s)$-equivariant real structure on $X_0$ extends to any $\SL_2$-equivariant embedding $X_0 \hookrightarrow X$. All $(\SL_2,\sigma_s)$-equivariant real structures on $X$ are furthermore equivalent since $\Aut^G(X) \simeq \Aut^G(X_0) (\simeq \mathbb{G}_{m,\mathbb{C}})$.
\end{example}
\smallskip
Let us now move on to question \ref{item: quantity part of Key-Prob3} of the Key Problem for a given spherical variety $X$ with open orbit $X_0$. As for toric varieties, once the real group structure $\sigma$ on $G$ is fixed, we have the following alternative: Either all $(G,\sigma)$-equivariant real structures on $X_0$ extend to $X$, or none of them do.
However, contrary to the toric case, it may happen that two equivalent $(G,\sigma)$-equivariant real structures on $X_0$ extend to inequivalent $(G,\sigma)$-equivariant real structures on $X$; indeed, the group inclusion $\Aut^G(X) \hookrightarrow \Aut^G(X_0)$ may not be an isomorphism.
Consequently, to answer question \ref{item: quantity part of Key-Prob3} of the Key Problem, the best option seems again to write a long exact sequence in Galois cohomology as in the homogeneous case.
\begin{works}\
\begin{itemize}
\smallskip
\item In \cite{Hur11}, Huruguen works any perfect base field, not necessarily the field of real numbers. Let us note that Huruguen assumes that the equivariant real structure $\mu$ on $X_0$ satisfies $X_0(\mathbb{C})^\mu \neq \varnothing$, but this condition plays no role in its proof of Theorem \ref{th:real structure extend to spherical embeddings}.
The results of Huruguen have been later generalized by Wedhorn in \cite{Wed18} to the case where the base field is arbitrary. Also, Wedhorn prefers to work in the category of algebraic spaces instead of schemes, which allows him to avoid the problem of effectiveness for the Galois descent.
\smallskip
\item Akhiezer and Cupit-Foutou study in \cite{Akh15,ACF14,CF15} the extension problem for an equivariant real structure on a spherical homogeneous space $X_0$ admitting a wonderful compactification $X_0 \hookrightarrow X$.
\smallskip
\item In addition to proving Theorem \ref{th: main th BG}, Borovoi--Gagliardi prove in \cite[Section 7]{BG18} a reformulation of part of the results obtained by Huruguen in \cite{Hur11}. They also provide many examples in \cite[Section 11]{BG18} of spherical varieties for which they apply Theorem \ref{th: main th BG} to solve question \ref{item: existence part of Key-Prob3} of the Key Problem.
\smallskip
\item As a byproduct of Theorems \ref{th: main th BG} and \ref{th:real structure extend to spherical embeddings}, a classification of the equivariant real structures on smooth projective horospherical varieties of Picard rank $1$ (classified by Pasquier in \cite{Pas09}) has been obtained by Moser-Jauslin--Terpereau in \cite[Section 3.6]{MJT18}.
\end{itemize}
\end{works}
\section{Real structures on almost homogeneous \texorpdfstring{$\SL_2$}{SL2}-threefolds} \label{sec:almost homg SL2-threefolds}
Let $G$ be a complex reductive group, and let $B$ be a Borel subgroup of $G$. We recall that the \emph{complexity} of a $G$-variety is the codimension of a general $B$-orbit. For instance, the complexity-zero varieties are precisely the spherical varieties considered in Section \ref{sec: real structures on spherical varieties}. In this section, based on the work of Moser-Jauslin--Terpereau \cite[Section 3]{MJT20}, we study the Key Problem for a family of complexity-one varieties admitting a combinatorial description quite similar to that of spherical embeddings, namely the \emph{almost homogeneous $\SL_2$-threefolds}. Examples include the rank $1$ Fano threefolds $\P_\mathbb{C}^3$, $Q_3$, $X_5$ or $X_{22}^{\textrm{MU}}$ (with the notation of \cite[Section 12.2]{IP99}), and the $\P_\mathbb{C}^1$-bundle $\P(T_{\P_\mathbb{C}^2})$.
\smallskip
We have seen in Example \ref{ex:real group structures on SL2} that any real group structure on $\SL_2$ is equivalent either to $\sigma_s\colon\ g \mapsto \overline{g}$ or to $\sigma_c\colon\ g \mapsto {}^{t}\overline{g^{-1}}$, with corresponding real loci $\SL_2(\mathbb{R})$ and $\SU_2(\mathbb{C})$ respectively.
Moreover, $\SL_2$ has no outer automorphism, and so we can assume without loss of generality that $\sigma \in \{ \sigma_s,\sigma_c\}$ when studying $(\SL_2,\sigma)$-equivariant real structures on $\SL_2$-varieties (see the second part of \cite[Proposition A]{MJT20}). Let us also note that, for any real group structure $\sigma$ on $\SL_2$, the $\star$-action induced by $\sigma$ is trivial (see Remark \ref{rk: star action trivial in the split case}), and so the $\star$-action will play no role when considering the Key Problem for almost homogeneous $\SL_2$-threefolds (contrary to the spherical case studied in Section \ref{sec: real structures on spherical varieties}, where the $\star$-action was playing a leading role to answer question \ref{item: existence part of Key-Prob3} of the Key Problem).
Let $X$ be an almost homogeneous $\SL_2$-threefold. Then it contains a dense open orbit $X_0$ isomorphic to $\SL_2/H$ with $H \subseteq \SL_2$ a finite subgroup; those are well-known (see \cite{Klein93}): there are the cyclic groups of order $n$ (conjugate to $A_n$), the binary dihedral groups of order $4n-8$ (conjugate to $D_n$ with $n \geq 4$), and the binary polyhedral groups (conjugate to $E_n$ with $n \in \{6,7,8\}$).
\begin{theorem}\emph{(\cite[Theorem~D]{MJT20})}\label{th:D}
Let $H$ be a finite subgroup of $\SL_2$, and let $\sigma$ be a real group structure on $\SL_2$.
Then $X_0=\SL_2/H$ admits an $(\SL_2,\sigma)$-equivariant real structure. Moreover, the equivalence classes of $(\SL_2,\sigma)$-equivariant real structures on $X_0$ and their real loci are listed in \cite[Appendix C]{MJT20}; there are $1$, $2$ or $3$ equivalence classes depending on $H$ and $\sigma$.
\end{theorem}
Following the strategy outlined in Section \ref{sec: equivariant real structures} to describe the equivariant real structures on almost homogeneous varieties, we now consider the problem of determining when an $(\SL_2,\sigma)$-equivariant real structure on $X_0=\SL_2/H$ extends to a given $\SL_2$-equivariant embedding $X_0 \hookrightarrow X$.
As for spherical embeddings, there exists a combinatorial classification of the $\SL_2$-equivariant embeddings of $X_0$. (In fact, both classifications are particular cases of the Luna-Vust theory to classify $\SL_2$-equivariant embeddings of arbitrary homogeneous spaces.) Consider the sets
\[
\mathcal{V}^{\SL_2}=\mathcal{V}^{\SL_2}(X_0)=\{\text{$\SL_2$-invariant geometric valuations\footnotemark \ of $\mathbb{C}(X_0)$}\}
\]
and, for a given Borel subgroup $B$ of $\SL_2$,
\footnotetext{ Recall that a \emph{valuation} of $\mathbb{C}(X_0)$ is a group homomorphism $\nu\colon (\mathbb{C}(X_0)^*,\times) \to (\mathbb{Q},+)$ satisfying $\nu(a+b) \geq \min(\nu(a),\nu(b))$ when $a+b \neq 0$, whose kernel contains $\mathbb{C}^*$, and whose image is a discrete subgroup of $(\mathbb{Q},+)$. A valuation $\nu$ is called \emph{geometric} if $\nu=\nu_D$ for some divisor $D$ on $X_0$, where $\nu_D(f)$ is the order of vanishing of $f$ along $D$.}
\[
\mathcal{D}^B=\mathcal{D}^B(X_0)=\{ \text{$B$-stable prime divisors of $X_0$} \} \simeq B \backslash \SL_2/H\simeq \P_\mathbb{C}^1/H.
\]
The idea of the classification of the $\SL_2$-equivariant embeddings of $X_0$ is that any such embedding $X_0 \hookrightarrow X$ corresponds to a collection of \emph{colored data}, i.e.~a collection of pairs $(\mathcal{W}_i,\mathcal{R}_i)_{i \in I}$, with $\mathcal{W}_i \subseteq \mathcal{V}^{\SL_2}$ and $\mathcal{R}_i \subseteq \mathcal{D}^B$, satisfying some conditions.
In fact, each colored datum corresponds to an $\SL_2$-orbit of $X$, and the conditions that the collection of pairs $(\mathcal{W}_i,\mathcal{R}_i)_{i \in I}$ must satisfy encode the fact that the union of the corresponding $\SL_2$-orbits is indeed an $\SL_2$-variety. Also, $\SL_2$-orbits are divided into six different types (depending on their colored data) denoted by
\begin{itemize}
\item $\AA$, $\mathcal{AB}$, $\mathcal{B}_{+}$, or $\mathcal{B}_{-}$ for orbits isomorphic to $\P_\mathbb{C}^1$;
\item $\mathcal{B}_0$ for fixed points; and
\item $\mathcal{C}$ for $2$-dimensional orbits.
\end{itemize}
This combinatorial classification was first given by Luna--Vust in \cite[Section 9]{LV83} for equivariant embeddings of $\SL_2$, and then later generalized to equivariant embeddings of $\SL_2/H$, with $H \subseteq \SL_2$ a finite subgroup, by Moser-Jauslin in \cite{MJ87,MJ90} and Bousquet in \cite{Bou00}; we refer to \cite[Appendix B]{MJT20} for a self-contained summary.
\begin{remark}Let us mention that there is an alternative description of equivariant embeddings of $\SL_2/H$, in terms of \emph{colored hypercones} and \emph{colored hyperfans}, due to Timashev; see \cite[Section 16]{Tim11}.
\end{remark}
In practice, the collection of colored data corresponding to a given equivariant embedding of $\SL_2/H$ is represented by a \emph{skeleton diagram}. For instance the skeleton diagrams corresponding to the varieties $Q_3$, $V_5$ and $V_{22}^{\textrm{MU}}$ are respectively
\begin{figure}[h!]
\begin{tikzpicture}[scale=1.2]
\path (0,0) coordinate (origin);
\draw (0,0) circle (2pt) ;
\path (0:1.7cm) coordinate (P0);
\path (0:1.1cm) coordinate (P0););
\path (75:1.3cm) coordinate (P1););
\path (19:.75cm) coordinate (P2);
\path (310:1.3cm) coordinate (P3);
\path (55:.75cm) coordinate (P4);
\path (-30:.75cm) coordinate (P5);
\path (90:.8cm) coordinate (Q0);
\draw[line width=0.3mm] (origin) -- (P0) (origin) -- (P3) (origin) -- (P4) (origin) -- (P5) (origin) -- (P1) (origin) -- (P2) ;
\fill[white] (0,0) circle (1.5pt) ;
\node at (.5,.5){$\vdots$};
\node at (.6,.8){\tiny{$-\frac{5}{6}$}};
\node at (0,1.3){\tiny{$-\frac{1}{2}$}};
\node at (.4,-1.1){\tiny{$-\frac{1}{2}$}};
\node at (1.5,0){\tiny{$-\frac{2}{3}$}};
\node at (-.2,-.2){\tiny{$-1$}};
\node at (.3,-2){\tiny{$H=E_6$}};
\draw[line width=0.3mm] (P1) -- +(165:3pt) (P1)-- +(345:3pt);
\end{tikzpicture}
\begin{tikzpicture}[scale=1.2]
\path (0,0) coordinate (origin);
\draw (0,0) circle (2pt) ;
\path (0:1.7cm) coordinate (P0);
\path (0:1.1cm) coordinate (P0););
\path (75:1.3cm) coordinate (P1););
\path (19:.75cm) coordinate (P2);
\path (310:1.3cm) coordinate (P3);
\path (55:.75cm) coordinate (P4);
\path (-30:.75cm) coordinate (P5);
\path (90:.8cm) coordinate (Q0);
\draw[line width=0.3mm] (origin) -- (P0) (origin) -- (P3) (origin) -- (P4) (origin) -- (P5) (origin) -- (P1) (origin) -- (P2) ;
\fill[white] (0,0) circle (1.5pt) ;
\node at (.5,.5){$\vdots$};
\node at (.6,.8){\tiny{$-\frac{5}{6}$}};
\node at (0,1.3){\tiny{$-\frac{1}{2}$}};
\node at (.4,-1.1){\tiny{$-\frac{1}{2}$}};
\node at (1.5,0){\tiny{$-\frac{2}{3}$}};
\node at (-.2,-.2){\tiny{$-1$}};
\node at (.3,-2){\tiny{$H=E_6$}};
\draw[line width=0.3mm] (P3) -- +(40:3pt) (P3)-- +(220:3pt);
\end{tikzpicture}
\begin{tikzpicture}[scale=1.2]
\path (0,0) coordinate (origin);
\draw (0,0) circle (2pt) ;
\path (0:1.7cm) coordinate (P0);
\path (0:1.1cm) coordinate (P0););
\path (75:1.1cm) coordinate (P1););
\path (19:.75cm) coordinate (P2);
\path (310:1.5cm) coordinate (P3);
\path (55:.75cm) coordinate (P4);
\path (-30:.75cm) coordinate (P5);
\path (90:.8cm) coordinate (Q0);
\draw[line width=0.3mm] (origin) -- (P0) (origin) -- (P3) (origin) -- (P4) (origin) -- (P5) (origin) -- (P1) (origin) -- (P2) ;
\fill[white] (0,0) circle (1.5pt) ;
\node at (.5,.5){$\vdots$};
\node at (.6,.8){\tiny{$-\frac{11}{12}$}};
\node at (0,1.3){\tiny{$-\frac{5}{6}$}};
\node at (.4,-1.1){\tiny{$-\frac{2}{3}$}};
\node at (1.5,0){\tiny{$-\frac{3}{4}$}};
\node at (-.2,-.2){\tiny{$-1$}};
\node at (.3,-2){\tiny{$H=E_7$}};
\draw[line width=0.3mm] (P3) -- +(40:3pt) (P3)-- +(220:3pt);
\end{tikzpicture}
\end{figure}
The choice of a pair $(\sigma,\mu)$, with $\sigma$ a real group structure on $\SL_2$ and $\mu$ an $(\SL_2,\sigma)$-equivariant real structure on $X_0$, induces a $\Gamma$-action on the sets $\mathcal{V}^{\SL_2}$ and $\mathcal{D}^B$ (see \cite[Section 3.3]{MJT20} for details). This $\Gamma$-action can be visualized on the skeleton diagrams; indeed, it corresponds to a permutation of certain spokes of the same length.
The following result provides a complete answer to question \ref{item: existence part of Key-Prob3} of the Key Problem for almost homogeneous $\SL_2$-threefolds.
\begin{theorem}\emph{(\cite[Theorem~E]{MJT20})}\label{th:E}
We keep the previous notation. Let $X_0 \hookrightarrow X$ be an $\SL_2$-equivariant embedding of $X_0=\SL_2/H$.
An $(\SL_2,\sigma)$-equivariant real structure $\mu$ on $X_0$ extends to an $(\SL_2,\sigma)$-equivariant real structure $\widetilde\mu$ on $X$ if and only if the $\Gamma$-actions on $\mathcal{V}^{\SL_2}$ and $\mathcal{D}^B$ induced by the pair $(\sigma,\mu)$ stabilize the collection of colored data corresponding to the $\SL_2$-orbits of $X$. Moreover, the real structure $\widetilde\mu$ on $X$ is effective if and only if every $\SL_2$-orbit of $X$ of type $\mathcal{B}_0$ or $\mathcal{B}_-$ is fixed by the $\Gamma$-action.
\end{theorem}
\begin{remark}
It must be stressed that, contrary to the spherical case (see Section \ref{sec: real structures on spherical varieties}), the $\Gamma$-action on the colored equipment of $X_0=\SL_2/H$ depends not only on $\sigma$, but also on $\mu$. In fact, it is even possible, for a given $\sigma$, to have two equivalent $(\SL_2,\sigma)$-equivariant real structures on $X_0$ such that only one of them extends to a given $\SL_2$-equivariant embedding $X_0 \hookrightarrow X$.
\end{remark}
\begin{example}(\cite[Example~3.10]{MJT20})\label{ex:first bis example}
Let $X=\P_\mathbb{C}^1\times\P_\mathbb{C}^1 \times \P_\mathbb{C}^1$ on which $\SL_2$ acts diagonally. Then the stabilizer of the point $x=([1:1],[1:0],[0:1])$ is $H=\{ \pm I_2 \}=A_2$, and so $(X,x)$ is an $\SL_2$-equivariant embedding of $\SL_2/H=\PGL_2$.
The orbit decomposition of $X$ is $\ell \sqcup S_1 \sqcup S_2 \sqcup S_3 \sqcup X_0$, where $X_0 \simeq \PGL_2$ is the dense open orbit, $S_i \simeq \P_\mathbb{C}^1 \times \P_\mathbb{C}^1 \setminus \Delta$, and $\ell \simeq \P_\mathbb{C}^1$.
Let us note that
\[
\mathfrak{S}_3 \simeq \Aut^{\SL_2}(X) \hookrightarrow \Aut^{\SL_2}(X_0) \simeq \PGL_2(\mathbb{C}), \ (12) \mapsto \begin{bmatrix}
i & 0 \\i & -i
\end{bmatrix} \ \text{and} \ (23) \mapsto \begin{bmatrix}
0 & i \\ i & 0
\end{bmatrix},
\]
where the symmetric group $\mathfrak{S}_3$ acts on $X$ by permuting the three factors.
We fix a real group structure $\sigma$ on $\SL_2$. By \cite[Theorem D]{MJT20}, there are exactly two equivalence classes of $(\SL_2,\sigma)$-equivariant real structures on $X_0$.
Using Theorem \ref{th:E}, one can show that there always exists an $(\SL_2,\sigma)$-equivariant real structure on $X_0$ that extends to $X$, and using Proposition \ref{prop:Galois H1 to param eq real structures}, a direct computation of $\mathbb{H}^1(\Gamma,\Aut^{\SL_2}(X))$ yields that $X$ admits exactly two equivalence classes of $(\SL_2,\sigma)$-equivariant real structures.
Moreover, all $(\SL_2,\sigma)$-equivariant real structures on $X$ restrict to equivalent $(\SL_2,\sigma)$-equivariant real structures on $X_0$.
\end{example}
Other examples where Theorem \ref{th:E} has been applied to determine the equivariant real structures on certain almost homogeneous $\SL_2$-threefolds can be found in \cite[Section 3.3]{MJT20}. Also, a classification of the equivariant real structures on minimal smooth completions of $X_0=\SL_2/H$, when $H$ is non-cyclic, has been obtained in \cite[Section 3.4]{MJT20}. (Here we call \emph{minimal smooth completion} of $X_0$ any $\SL_2$-equivariant embedding $X_0 \hookrightarrow X$ such that $X$ is a smooth complete variety and any birational $\SL_2$-equivariant morphism $X \to X'$, with $X'$ smooth, is an isomorphism.)
\begin{works}\
\begin{itemize}
\smallskip
\item Examples of smooth compact connected three-dimensional Moishezon manifolds which are not schemes and on which $\SL_2$ acts with a dense open orbit have been obtained by Luna--Moser-Jauslin--Vust in \cite{LMV89}. (One way to produce such examples in the real setting is to take the quotient $X/\langle \mu \rangle$ with $X$ a complete almost homogeneous $\SL_2$-threefold and $\mu$ an $(\SL_2,\sigma)$-equivariant real structure on $X$ that is not effective.)
\smallskip
\item Let $G$ be any complex reductive group, and let $\sigma$ be a real group structure on $G$. A combinatorial criterion to determine if a $(G,\sigma)$-equivariant real structure $\mu$ on an arbitrary homogeneous space $X_0=G/H$ extends to a given $G$-equivariant embedding $X_0 \hookrightarrow X$, relying on the classical Luna--Vust theory (see \cite{LV83}), has been obtained by Moser-Jauslin--Terpereau in \cite{MJT20}, generalizing Theorems \ref{th:real structure extend to spherical embeddings} and \ref{th:E}, but the combinatorics is then more involved.
\end{itemize}
\end{works}
\section{Some open questions}\label{sec: open questions}
We finish this survey paper with some open questions related to the Key Problem. In all this last section, we denote by $G$ a complex reductive group.
\smallskip
In Section \ref{sec:almost homg SL2-threefolds}, we have studied the Key Problem for almost homogeneous $\SL_2$-threefolds. On the other hand, there exists a combinatorial description for com\-plexity-one $G$-varieties due to Timashev, in terms of \emph{colored hypercones} and \emph{colored hyperfans} (see \cite[Section 16]{Tim11}), that generalizes the one given by Luna--Vust in \cite[Section 9]{LV83} for almost homogeneous $\SL_2$-threefolds.
\begin{question} \label{Q1}
Is it possible to extend the results obtained for almost homogeneous $\SL_2$-threefolds (see Section \ref{sec:almost homg SL2-threefolds}) to arbitrary complexity-one $G$-varieties?
\end{question}
(Let us mention that for affine varieties endowed with a complexity-one torus action, a positive answer to question \ref{Q1} follows from the work of Langlois (\cite{Lan15}) and Gillard (\cite{Gil20}); see Section \ref{subsec: real structures on T-varieties}).
\smallskip
In \cite[Section 3.4]{MJT20}, we have determined the equivariant real structures on the minimal smooth completions of $\SL_2/H$ when $H$ is a non-cyclic finite subgroup of $\SL_2$ (in which case the underlying $\SL_2$-variety is projective, and so any real structure is effective). But ``most" of the almost homogeneous $\SL_2$-threefolds that appear in the literature are actually related to minimal smooth completions of $\SL_2/H$ with $H$ cyclic.
\begin{question} \label{Q2}
What are the effective equivariant real structures on the minimal smooth completions of $X_0=\SL_2/H$ when $H$ is cyclic?
\end{question}
It is certainly possible to try to answer question \ref{Q2} using the same techniques as in \cite[Section 3.4]{MJT20} when $H$ is a non-cyclic finite subgroup of $\SL_2$.
However, when $H$ is cyclic, the group $\Aut^{\SL_2}(X_0)$ is infinite, which makes it more complicated to compute $\mathbb{H}^1(\Gamma,\Aut^{\SL_2}(X_0))$, and there are between seven and eleven minimal smooth completions of $X_0$ to consider (depending on the cardinal of $H$). Moreover, the underlying $\SL_2$-varieties are not all projective, which makes the question of the effectiveness of the Galois descent non-trivial in this case.
\smallskip
Among all the complex Fano threefolds (see \cite[Section 12.2]{IP99} for the list of the smooth ones), many of them are varieties of complexity $\leq 1$.
Therefore, results obtained in recent years concerning the equivariant real structures on spherical and complexity-one varieties should allow a complete classification of the real structures on these varieties.
\begin{question}
What are the (equivariant) real structures on the complex Fano threefolds of complexity $\leq 1$? And what are the corresponding real loci?
\end{question}
Let us now consider $X_0=G/H$ an arbitrary homogeneous space.
If $H$ is connected and $\dim(X_0) \leq 10$ or if $X_0$ is spherical, then it is known that $X_0$ is a rational variety (see \cite[Theorem 5.9]{CZ17} and \cite[Corollary 2.1.3]{Per14}). However, a real form of a rational variety is not necessarily rational.
\begin{question}
Given a rational homogeneous space $X_0=G/H$, what are the rational real forms of $X_0$? And is it possible to characterize rationality via the real locus?
\end{question}
Let us now focus on question \ref{item: quantity part of Key-Prob3} of the Key Problem. The automorphism group $\Aut^G(X)$ of a spherical $G$-variety $X$ is a diagonalizable group; in particular, it is a linear algebraic group, and so the set $\mathbb{H}^1(\Gamma,\Aut^G(X))$ is finite. On the other hand, there are examples of complexity-two varieties with infinitely many equivalence classes of equivariant real structures.
\begin{question}
Does a complexity-one variety always admit a finite number of equivalence classes of equivariant real structures?
\end{question}
Finally, for a random projective variety $X$, the neutral component of its automorphism group $\Aut^\circ(X)$ is usually neither a linear algebraic group nor an abelian variety. It is therefore natural to consider the Key Problem without the assumption of reductivity, and even of linearity.
\begin{question}
Let $K$ be a connected algebraic group (not necessarily linear), let $X$ be a complex $K$-variety, and let $\sigma$ be a real group structure on $K$.
What can be said concerning the $(K,\sigma)$-equivariant real structures on $X$?
\end{question}
\bibliographystyle{alpha}
|
1,314,259,995,052 | arxiv | \section{Introduction}
Many time series observed in practice display nonstationary behavior,
especially if data is collected over long time spans. Nonstationarity
can affect the trend, the variance--covariance structure or, more
comprehensively, aspects of the underlying distribution. Since
estimates and forecasts can be severely biased if nonstationarity is
not properly taken into account, identifying and locating structural
breaks has become an important issue in the analysis of time series.
Over the years, there has been a large amount of research on issues
related to testing and estimating structural breaks in sequences of
independent random variables, time series and regression models. Most
of these focus on considering breaks in the (conditional) mean, while a
smaller number of publications are available for breaks in the
(conditional) variance. The relevant lines of research are summarized
in the monograph \cite{Csorgo:Horvath:1997} and the more recent
survey paper \cite{Aue:Horvath:2013}.
In various situations, however, it may be helpful and more informative
to study structural breaks in the (conditional) quantiles. As a case in
point, Hughes \textit{et al.} \cite
{Hughes:SubbaRao:SubbaRao:2007} have argued convincingly
that the increase in mean surface temperatures recorded at temperature
stations across the Antarctic can to a large degree be attributed to an
increase in the minimum and lower quantile temperatures. When focusing
on the mean, this additional information about the underlying changes
in variation is smoothed out and unavailable for a more in-depth
analysis. As another example, the Value at Risk, a measure of loss
associated with a rare event under normal market conditions, is by
definition a quantile and more important for risk managers than
information on measures of central tendency such as the mean.
Global estimation procedures for quantiles are often performed in the
quantile regression framework described in \cite{Koenker:2005}. There
is by now a rich body of literature on the various aspects of quantile
regression models. Detecting structural breaks in nonstationary time
series over different quantiles, however, is a comparatively new
research area. Contributions in a different direction from ours include
\cite{Bai:1998}, who considered the estimation of structural breaks
in the median of an underlying regression model by means of least
absolute deviations. In the quantile regression framework, Aue \textit{et~al.} \cite{Aue:Cheung:Lee:Zhong:2014} have
recently developed a related
methodology to perform segmented variable selection that includes break
point detection as a special case. The focus of the present paper,
however, is more on the aspects of nonlinear time series analysis.
In order to capture nonlinearities such as asymmetries, local
persistence, and changes in location, scale and shape, in conjunction
with temporal dependence that is frequently observed in applications,
and thus to obtain a more complete picture of the distributional
evolution of the underlying random processes, we propose in this paper
a new method for estimating structural breaks at any single quantile or
across multiple quantiles. Our methodology differs from the works above
in that it is not based on hypothesis testing. Instead we try to match
the observed data with a best fitting piecewise quantile
autoregression. These models, introduced by Koenker and Xiao \cite{Koenker:Xiao:2006},
are members of the class of random coefficient autoregressions that
allow the autoregressive coefficients to be quantile dependent and,
therefore, generalize linear quantile autoregressions as studied by
Koul and Saleh \cite{Koul:Saleh:1995}, and Hallin and Jure{\v{c}}kov{\'a} \cite{Hallin:Jureckova:1989}, among
others. We discuss quantile autoregression models and their piecewise
specifications in Section~\ref{sec:model}. In particular, we state
necessary and sufficient conditions for the existence of stationary
solutions and discuss the estimation of the parameters via optimizing a
subgradient condition. These results will then be generalized to the
piecewise stationary case.
Recognizing the connection between estimation of quantile
autoregression parameters and maximum likelihood estimation for
asymmetric Laplace random variables \cite{Yu:Lu:Stander:2003}, we
shall apply the minimum description length principle \cite
{Rissanen:1989} to define the best fitting piecewise quantile
autoregression. Details of this are given in Section~\ref{sec:mdl}.
Minimization of the resulting convex objective function will then yield
the best fitting model for the given data. The numerical complexity of
this optimization problem is handled via the application of a genetic
algorithm \cite{Davis:1991}.
From a technical perspective, our methodology is related to \cite
{Davis:Lee:Rodriguez-Yam:2006}, who proposed an automatic procedure
termed Auto-PARM. This procedure is designed to detect structural
breaks by fitting piecewise stationary, linear autoregressive time
series models which are estimated through the minimization of a minimum
description length criterion using a normal likelihood. Auto-PARM is
defined to mimic the second-order properties of the data but is not
always able to adjust to a nonlinear framework and does not provide
additional insight into distributional changes other than those
affecting the conditional mean and variance of the data given past observations.
The remainder of the paper is organized as follows. In Section~\ref
{sec:model}, quantile autoregressive models are introduced. Estimation
and model selection aspects for piecewise quantile autoregressive
models are detailed in Section~\ref{sec:mdl}. Sections~\ref
{sec:theory} and \ref{sec:ga} deal with asymptotic results and
implementation details, respectively. Empirical properties of the
proposed methodology are evaluated through simulations in Section~\ref
{sec:sim} and
real data examples in Section~\ref{sec:app}. Section~\ref{sec:sum}
concludes and
all technical proofs are given in the \hyperref[sec:proof]{Appendix}.
\section{Quantile autoregressions}
\label{sec:model}
Linear autoregressive models have played a dominant role in classical
time series analysis for at least half a century. The popularity stems
partially from their closeness to the linear regression framework with
its well-developed theory. They are, however, unable to capture
nonlinear dynamics and local persistence. With the objective of
dynamically modeling the evolution of location, scale and shape of the
underlying processes, Koenker and Xiao \cite
{Koenker:Xiao:2006} have introduced a
particular subclass of random coefficient autoregressive models called quantile
autoregressions. In this model, autoregressive coefficients are allowed
to vary with the quantiles $\tau\in[0,1]$. In contrast to many of the
standard contributions to the random coefficient autoregression area
for which independence is a key assumption, the coefficients possess a
strong functional
relationship; in sequel $\mathbb{Z}$ denotes the set of integers. A
real time series $(y_t\dvt t\in\mathbb{Z})$ is said to follow a
quantile autoregression of order $p$, shortly $\QAR(p)$, if
\begin{equation}
\label{eqn:qar1} y_{t}=\theta_{0}(u_t)+
\theta_{1}(u_t)y_{t-1}+\cdots+\theta
_{p}(u_t)y_{t-p}, \qquad t\in\mathbb{Z},
\end{equation}
where $(u_t\dvt t\in\mathbb{Z})$ are independent random variables
distributed uniformly on the interval $[0,1]$, and $\theta_{j}\dvtx
[0,1]\to\mathbb{R}$, $j=0,1,\ldots,p$, are the coefficient
functions. In order to exhibit the connection to standard random
coefficient autoregressions, (\ref{eqn:qar1}) can also be written more
conventionally in the form
\begin{equation}
\label{eqn:qar4} y_{t}=\phi_{0}+\phi_{1,t}y_{t-1}+
\cdots+\phi_{p,t}y_{t-p}+\varepsilon_{t},\qquad t\in
\mathbb{Z},
\end{equation}
where $\phi_{0}=E\{\theta_{0}(u_t)\}$, $\varepsilon_{t}=\theta
_{0}(u_t)-\phi_{0}$, and $\phi_{j,t}=\theta_{j}(u_t)$ for
$j=1,\ldots,p$ and $t\in\mathbb{Z}$. We have in particular that the
innovations $(\varepsilon_{t}\dvt t\in\mathbb{Z})$ constitute an
independent, identically distributed sequence with distribution
function $F(\cdot)=\theta_{0}^{-1}(\cdot+\phi_{0})$. Therefore,
necessary and sufficient conditions for the existence of a strictly
stationary solution to the equations (\ref{eqn:qar1}) can be derived
from the work of Aue \textit{et al}. \cite
{Aue:Horvath:Steinebach:2006}, which also
contains statements concerning the finiteness of moments of quantile
autoregressions.
The estimation of the quantile autoregression functions $\theta(\tau
)$ in stationary quantile autoregressive models (\ref{eqn:qar1}) is
typically achieved \cite{Koenker:2005} by solving the convex
optimization problem
\begin{equation}
\label{eqn:qar3} \min_{\theta(\tau)\in\mathbb{R}^{p+1}}\sum_{t=1}^{n}
\rho_{\tau
}\bigl\{y_{t}-X'_{t}\theta(
\tau)\bigr\},
\end{equation}
where $\rho_{\tau}(u)=u\{\tau-I(u<0)\}$ is the check function.
Solutions $\hat\theta(\tau)$ of (\ref{eqn:qar3}) are called
autoregression quantiles. Asymptotic properties of the estimation
procedure have been derived in \cite{Koenker:Xiao:2006}. It should be
noted that the assumptions under which the following proposition holds
require $X_t^\prime\theta(\tau)$ to be monotonic.
This will not always be reasonable. However, for the methodology
developed in this paper, this is not an issue insofar as we derive
asymptotic statements only about the quality of the segmentation
procedure but not on the quality of the estimator $\hat\theta$.
\begin{proposition}\label{prop:3}
Let $F_{t-1}={P}(y_t<\cdot\mid\mathcal{F}_{t-1})$ be the conditional
distribution function of $y_t$ given $\mathcal{F}_{t-1}$, and denote
by $f_{t-1}$ its derivative. Under stationarity and if $f_{t-1}$ is
uniformly integrable on $\mathcal{X}=\{x\dvt0<F(x)<1\}$, then
\[
\Sigma^{-1/2}n^{1/2}\bigl[\hat\theta(\cdot)-\theta(\cdot)\bigr]
\stackrel{\mathcal{D}} {\longrightarrow}B_{p+1}(\cdot)\qquad (n\to
\infty),
\]
where $\Sigma=\Omega_1^{-1}\Omega_0\Omega_1^{-1}$ with $\Omega
_0=E(X_tX_t^\prime)$ and $\Omega_1=\lim_n\frac{1}n\sum
_{t=1}^nf_{t-1}\{F_{t-1}^{-1}(\tau)\}X_tX_t^\prime$. Moreover,
$(B_{p+1}(\tau)\dvt\tau\in[0,1])$ is a standard
$(p+1)$-dimensional Brownian bridge.
\end{proposition}
If the number of break points $m$ is given, then estimating their
locations and the $m+1$ piecewise quantile autoregressive models at a
specific quantile $\tau\in(0,1)$ can be done via solving
\begin{equation}
\label{eqn:qar7} \min_{\theta(\tau),\mathcal{K}}\sum_{j=1}^{m+1}
\sum_{t=k_{j-1}+1}^{k_{j}}\rho_{\tau}\bigl
\{y_{t}-X'_{j,t}\theta_{j}(\tau)
\bigr\}.
\end{equation}
Given that the number of observations in each segment increases as a
fraction of the overall sample size, the limit behavior of (\ref
{eqn:qar7}) follows directly from Proposition~\ref{prop:3}. \newtext
{For unknown $m$, we use a model selection approach to select the
numbers of segments.} To this end, we discuss the relation between
(\ref{eqn:qar3}) and (\ref{eqn:qar7}), and optimizing the likelihood
obtained from asymmetric Laplace distributions next.
The connection between the asymmetric Laplace distribution and quantile
regression has long been recognized and has often been used in the
Bayesian context. Yu \textit{et al.} \cite
{Yu:Lu:Stander:2003} have made this explicit.
If we assume that at the $\tau$th quantile the innovations
$(\varepsilon_t\dvt t\in\mathbb{Z})$ in model (\ref{eqn:qar4})
follow an asymmetric Laplace distribution with parameter $\tau$, then
maximizing the likelihood function
\[
L\bigl\{\theta(\tau)\bigr\}\propto\exp\Biggl[-\sum
_{t=1}^n\rho_\tau\bigl\{
y_t-X_t^\prime\theta(\tau)\bigr\} \Biggr]
\]
is equivalent to solving the problem in (\ref{eqn:qar3}). The
equivalent to (\ref{eqn:qar7}) could be stated in a similar fashion.
The use of the asymmetric Laplace likelihood allows us to formulate a
minimum description length criterion in order to do model selection
with (\ref{eqn:qar7}).
\section{Piecewise quantile autoregressions}
\label{sec:mdl}
\subsection{The model}
\label{subsec:piecewise qar}
Koenker and Xiao \cite{Koenker:Xiao:2006} have pointed
out that a fitted quantile
autoregressive model should serve as a useful local approximation to a
potentially more complicated global dynamic. While a single quantile
autoregression fit can already adequately and quite explicitly describe
local persistence and seemingly explosive behavior (see Sections~\ref
{sec:sim} and \ref{sec:app} for examples), it does not provide us with
means to fit nonstationary data. We propose to match a nonstationary
time series by blocks of different stationary quantile autoregressions.
The piecewise stationary quantile autoregressive models are defined as
follows. Assume that the data $y_1,\ldots,y_n$ can be segmented into
$m+1$ stationary pieces, and that, for $\ell=1,\ldots,m+1$, the $\ell
$th piece can be modeled by a $\QAR(p_\ell)$ process. For $\ell
=1,\ldots,m+1$, we denote by $k_\ell$ the $\ell$th break date, that
is, the time lag at which the transition from the $\ell$th to the
$(\ell+1)$th segment occurs. Using the convention $k_0=1$ and
$k_{m+1}=n$ and letting $u_1,\ldots,u_n$ be independent standard
uniform random variables, the $\ell$th segment is, for $t=k_{\ell
-1}+1,\ldots,k_\ell$, given by
\begin{equation}
\label{eqn:qar5} y_{t}=\theta_{\ell,0}(u_t)+
\theta_{\ell,1}(u_t)y_{t-1}+\cdots+
\theta_{\ell,p_\ell}(u_t)y_{t-p_\ell} =X_{\ell,t}^\prime
\theta_\ell(u_t),
\end{equation}
where $X_{\ell,t}=(1,y_{t-1},\ldots,y_{t-p_{\ell}})^\prime$ and
$\theta_{\ell}(u_t)=\{\theta_{\ell,0}(u_t),\ldots,\theta_{\ell
,p_{\ell}}(u_t)\}^\prime$. At $\tau\in(0,1)$, model (\ref
{eqn:qar5}) is determined by the parameters $m$, $\mathcal
{K}=(k_1,\ldots,k_m)^\prime$ and $\theta(\tau)=\{\theta_1(\tau
)^\prime,\ldots,\break \theta_{m+1}(\tau)^\prime\}^\prime$, where the
segment autoregression functions are denoted by $\theta_\ell(\tau)=\{
\theta_{\ell,0}(\tau),\theta_{\ell,1}(\tau),\ldots,\allowbreak \theta_{\ell
,p_\ell}(\tau)\}^\prime$. Observe that in the case that $m=0$, (\ref
{eqn:qar5}) reduces to the single $\QAR(p)$ model (\ref{eqn:qar1}). One
can fit the model (\ref{eqn:qar5}) even if it is not the true data
generating process and that we can then view the piecewise quantile
autoregressive structure as an approximation.
The approach taken in this paper is related to the piecewise AR model
fitting technique Auto-PARM developed in \cite
{Davis:Lee:Rodriguez-Yam:2006}. These authors utilized linear time
series models, changing the coefficient functions $\theta_{\ell
,j}(\cdot)$ in \eqref{eqn:qar5} to constants, say, $\phi_{\ell,j}$,
and were concerned mainly about matching the second-order structure of
the data with stationary AR segments. The present paper focuses on
nonlinear aspects of the time series as observed from quantiles,
thereby enabling a more comprehensive study of changes in the
distribution of the underlying data. The switch from linear to
nonlinear time series means in particular that somewhat different
arguments are needed in order to prove large-sample results (see
Section~\ref{sec:theory}). In terms of practical estimation, the
genetic algorithm behind Auto-PARM can be modified for the piecewise
quantile autoregression fitting. Details are given in Section~\ref{sec:ga}.
\subsection{Model selection at a single quantile}
In this section, we derive a minimum description length criterion for
choosing the best fitting model from the piecewise quantile
autoregressive models defined in (\ref{eqn:qar5}). As to be seen
below, the ``best'' model is defined as the one that enables the best
compression of the observed series $Y=(y_1,\ldots,y_n)^\prime$. For
introductory material on this, see, for example, \cite
{Rissanen:1989,Hansen:Yu:2000,Lee:2001}.
There are different versions of the minimum description length
principle, and the version adopted here is the so-called two-part code.
It begins with splitting $Y$ into two parts. The first part, denoted by
$\hat{\mathcal{{F}}}$, represents the fitted piecewise quantile
autoregression, and the second\vspace*{2pt} part, denoted by $\hat{\mathcal
{E}}=Y-\hat{Y}$, represents the residuals, where $\hat{Y}$ is the
fitted value for $Y$. Notice that once $\hat{\mathcal{{F}}}$ and
$\hat{\mathcal{E}}$ are known, $Y$ can be completely retrieved. The
idea of the minimum\vspace*{2pt} description length principle is to find the best
pair of $\hat{\mathcal{{F}}}$ and $\hat{\mathcal{E}}$ so that via
encoding (or compressing) $\hat{\mathcal{{F}}}$ and $\hat{\mathcal
{E}}$, $Y$ can be transmitted (or stored) with the least amount of
codelength (or memory). To quantify this idea, let $\textsc
{cl}_{\mathcal{F}}(Z|\tau)$ denote the codelength of an object $Z$
using model $\mathcal{F}$ at a specific quantile~$\tau$. Then we have
the decomposition
\begin{equation}
\label{eqn:twopart} \textsc{cl}_{\mathcal{F}}(Y|\tau)=\textsc
{cl}_{\mathcal{F}}(
\hat{\mathcal{{F}}}|\tau)+\textsc{cl}_{\mathcal{F}}(\hat{\mathcal
{E}}|\hat{
\mathcal{{F}}},\tau)
\end{equation}
for the data $Y$. In the above $\textsc{cl}_{\mathcal{F}}(Y|\tau)$
is the codelength for $Y$, $\textsc{cl}_{\mathcal{F}}(\hat{\mathcal
{{F}}}|\tau)$ is the codelength for $\hat{\mathcal{{F}}}$, while
$\textsc{cl}_{\mathcal{F}}(\hat{\mathcal{E}}|\hat{\mathcal
{{F}}},\tau)$ is the codelength for $\hat{\mathcal{E}}$. The minimum
description length\vspace*{1pt} principle defines the best fitting $\hat{\mathcal
{F}}$ as the one that minimizes $\textsc{cl}_{\mathcal{F}}(Y|\tau)$.
Using the estimated quantile autoregression structure, we obtain the
following expression:
\begin{eqnarray}
\label{eqn:qar8} \textsc{cl}_{\mathcal{F}}(\hat{\mathcal
{{F}}}|\tau)
&=&\textsc{cl}_{\mathcal{F}}(m|\tau)+\textsc{cl}_{\mathcal
{F}}(k_{1},
\ldots,k_{m}|\tau)+\textsc{cl}_{\mathcal
{F}}(p_{1},
\ldots,p_{m+1}|\tau) \nonumber\\
&&{}+\textsc{cl}_{\mathcal{F}}\bigl\{\hat{
\theta}_{1}(\tau),\ldots,\hat\theta_{m+1}(\tau)\bigr\}
\nonumber
\\[-8pt]\\[-8pt]
&=&\textsc{cl}_{\mathcal{F}}(m|\tau)+\textsc{cl}_{\mathcal
{F}}(n_{1},
\ldots,n_{m+1}|\tau)+\textsc{cl}_{\mathcal
{F}}(p_{1},
\ldots,p_{m+1}|\tau)\nonumber\\
&&{} +\textsc{cl}_{\mathcal{F}}\bigl\{\hat{
\theta}_{1}(\tau),\ldots,\hat\theta_{m+1}(\tau)\bigr\}.
\nonumber
\end{eqnarray}
To proceed further, we need the following coding result: the codelength
for an integer $T$ is $\log_2 T$ bits, leading to $\textsc
{cl}_{\mathcal{F}}(m|\tau)=\log_2 m$ and $\textsc{cl}_{\mathcal
{F}}(p_{1},\ldots,p_{m+1}|\tau)=\sum_{j=1}^{m+1}\log_{2}p_{j}$. On
the other hand, if the upper bound $T_U$ of an integer $T$ is known,
the corresponding codelength is $\log_2 T_U$ bits. This gives $\textsc
{cl}_{\mathcal{F}}(n_{1},\ldots,n_{m+1}|\tau)=(m+1)\log_2 n$, as
each $n_\ell$ is upper-bounded by $n$. Lastly, Rissanen \cite{Rissanen:1989}
has shown that \newtext{a} maximum likelihood estimate computed from
$n$ data points can be effectively encoded with $\frac{1}{2}\log_2 n$
bits. Applying this to the $\hat\theta_{\ell}(\tau)$'s, we have
$\textsc{cl}_{\mathcal{F}}\{\hat{\theta}_{1}(\tau),\ldots,\hat
\theta_{m+1}(\tau)\}=\newtext{\sum_{j=1}^{m+1}\frac
{p_{j}+1}{2}\log_{2}n_{j}}$. Combining these codelength expressions,
(\ref{eqn:qar8}) becomes
\begin{equation}
\label{eqn:qar8a} \textsc{cl}_{\mathcal{F}}(\hat{\mathcal
{{F}}}|\tau) =
\log_{2}m+(m+1)\log_{2}n+\sum_{j=1}^{m+1}
\log_{2}p_{j}+\newtext{\sum_{j=1}^{m+1}
\frac{p_{j}+1}{2}\log_{2}n_{j}}.
\end{equation}
Now for the last term in (\ref{eqn:twopart}). It is shown in \cite
{Rissanen:1989} that the codelength of the residuals $\hat{\mathcal
{E}}$ is the negative of the log likelihood of the fitted model $\hat
{\mathcal{{F}}}$. Utilizing the asymmetric Laplace likelihood this
leads to
\begin{equation}
\label{eqn:qar9} \textsc{cl}_{\mathcal{F}}(\hat{\mathcal{E}}|\hat
{\mathcal
{{F}}},\tau)= -\log L\bigl\{\theta(\tau)\bigr\} =\sum
_{j=1}^{m+1}\sum_{t=k_{j-1}+1}^{k_{j}}
\rho_\tau(\newtext{\hat{\varepsilon}_t})-n\log\bigl\{
\tau(1-\tau)\bigr\}.
\end{equation}
Combining equations (\ref{eqn:twopart}), (\ref{eqn:qar8}) and (\ref
{eqn:qar9}) and dropping the constant term $-n\log\{\tau(1-\tau)\}$,
we define the best fitting piecewise quantile autoregressive model at a
single quantile $\tau\in(0, 1)$ as the one that minimizes the minimum
description length criterion
\begin{eqnarray}
\label{eqn:qar10}
&&\textsc{mdl}(m,k_{1},\ldots,k_{m},p_{1},
\ldots,p_{m+1}|\tau)\nonumber\\
&&\quad =\log_{2}m+(m+1)\log_{2}n\\
&&\qquad{}+\sum
_{j=1}^{m+1}\log_{2}p_{j}+
\newtext{\sum_{j=1}^{m+1}\frac{p_{j}+1}{2}
\log_{2}n_{j}}
+\sum
_{j=1}^{m+1}\sum_{t=k_{j-1}+1}^{k_{j}}
\rho_\tau(\newtext{\hat{\varepsilon}_{t}}).
\nonumber
\end{eqnarray}
\subsection{Model selection at multiple quantiles}
To extend the scope of detecting break points at a single quantile, it
is worthwhile to study the joint estimation of, say, $L$ quantiles in
order to gain more insight into the global behavior of the process. To
estimate break points for multiple quantiles, it can, for example, be
assumed that the true break locations are the same across the different
quantiles under consideration. This could lead to a borrowing of
strength in the segmentation procedure because information on the
behavior of various quantiles is added into the analysis. Instead of
summing up the minimum description length function defined in (\ref
{eqn:qar10}) for all $L$ quantiles, one could also use their weighted
sums. That is,
\begin{eqnarray}
\label{eqn:qar11} &&\textsc{mdl}(m,k_{1},\ldots,k_{m},p_{1},
\ldots,p_{m+1}|\tau_{1},\ldots,\tau_{L})
\nonumber\\[-8pt]\\[-8pt]
&&\quad =\sum_{\ell=1}^{L}\omega_{\ell}
\textsc{mdl}(m,k_{1},\ldots,k_{m},p_{1},
\ldots,p_{m+1}|\tau_{\ell}).
\nonumber
\end{eqnarray}
The weights can either be chosen in advance or data-adaptively. In the
latter case it may be worthwhile to read the discussion in Chapter~5.5
of \cite{Koenker:2005}, where similar ideas are discussed in a
location-shift regression model. For this case the optimal weights
$\omega_\mathrm{opt}=(\omega_{1,\mathrm{opt}},\ldots,\omega
_{L,\mathrm{opt}})^\prime$ are given by
$
\omega_{\mathrm{opt}}=W^{-1}v$,
where $W$ is the $L\times L$ matrix with entries $A_{\ell,\ell^\prime
}=\min\{\tau_\ell,\tau_{\ell^\prime}\}-\tau_\ell\tau_{\ell
^\prime}$ and $v=(v_1,\ldots,v_L)^\prime$ with $v_\ell
=f(F^{-1}(\tau_\ell))$. For the more complicated model under
consideration here, one could use these results as a starting point for
a more detailed analysis.
On the other hand, one could also think about a more general version of
the segmentation procedure that would not enforce simultaneous breaks
across the quantiles under consideration. Such an approach may be
useful if it could be coupled with prior information on the effect
breaks would have on the underlying distribution; for example, if
breaks would propagate in a monotone way from the lower to the upper
quantiles. The resulting minimum description length criterion would
then be even more complex. While a few issues concerning multiple
quantiles are highlighted in the empirical parts of the paper, any
detailed analysis of such modeling is, however, beyond the scope of the
present paper.
\section{Large sample results}
\label{sec:theory}
To study large sample properties assume that the underlying true model
indeed follows the piecewise quantile autoregressive structure in (\ref
{eqn:qar5}). We denote the true number of break points and their
locations respectively by $m^0$ and $k_{j}^{0}$, $j=1,\ldots,m^0$,
where $k_{j}^{0}=\lfloor\lambda^{0}_{j}n\rfloor$ and $0<\lambda
^{0}_{1}<\lambda^{0}_{2}<\cdots<\lambda^{0}_{m^0}<1$. Following
standard convention in order to ensure sufficient separation of the
break points, we choose an $\epsilon>0$ such that $\epsilon\ll\min
_{j=1,\ldots,m^0+1}(\lambda_{j}^{0}-\lambda_{j-1}^{0})$ and set
\[
\Lambda_{m}= \bigl\{(\lambda_{1},\ldots,
\lambda_{m})\dvt0<\lambda_{1}<\cdots<
\lambda_{m}<1, \lambda_{j}-\lambda_{j-1}\geq
\epsilon, j=1,2,\ldots,m+1 \bigr\},
\]
where $\lambda_{0}=0$ and $\lambda_{m+1}=1$. Fix $\tau\in(0,1)$,
and set $\lambda=(\lambda_{1},\ldots,\lambda_{m})$ and
$p=(p_{1},\ldots,p_{m+1})$. The parameters $m$, $\lambda$ and $p$ are
estimated by minimizing the minimum description length criterion
\begin{equation}
\label{mdl-lim} (\hat{m},\hat\lambda,\hat{p})=\arg\min
_{(m,\lambda,p)\in
\mathcal{M}}
\frac{1}{n}\textsc{mdl}(m,\lambda,p|\tau),
\end{equation}
where the minimum is taken in the set $\mathcal{M}=\{(m,\lambda
,p)\dvt m\leq M_0, \lambda\in\Lambda_m, 0\leq p_j\leq P_0\}$ with
$M_{0}$ and $P_{0}$ denoting upper bounds for $m$ and $p_{j}$,
respectively. The large sample behvavior of the minimum description
length criterion is given in the next theorem. Its proof can be found
in the \hyperref[sec:proof]{Appendix}.
\begin{theorem}\label{th:1}
Assume that the conditions of Proposition~\ref{prop:3} are satisfied
and let the number of break points $m^0$ be known. Then estimating the
piecewise quantile autoregressive model specified in (\ref{eqn:qar5})
at any single quantile $\tau\in(0,1)$ leads to
\[
\hat{\lambda}_{j}\rightarrow\lambda^{0}_{j}\qquad
\mbox{with probability one } (n\rightarrow\infty)
\]
for all $j=1,2,\ldots,m^0$, where $\hat{\lambda}=(\hat{\lambda
}_{1},\ldots,\hat{\lambda}_{m^0})$ is the minimizer of the criterion
function (\ref{eqn:qar10}).
\end{theorem}
The following \newtext{corollary} extends the result of Theorem~\ref
{th:1} to the multiple quantile case. Its verification is also provided
in the \newtext{\hyperref[sec:proof]{Appendix}}.
\begin{corollary}\label{cor:1}
Assume that the conditions of Proposition~\ref{prop:3} are satisfied.
Let the number of break points $m^0$ be known and assume that the break
locations as well as the autoregressive orders are the same across the
quantiles under consideration. Then estimating the piecewise quantile
autoregressive model specified in (\ref{eqn:qar5}) at the collection
of quantiles $(\tau_{1},\ldots,\tau_{L})\in(0,1)^L$ leads to
\[
\hat{\lambda}_{j}\rightarrow\lambda^{0}_{j}\qquad
\mbox{with probability one } (n\rightarrow\infty)
\]
for all $j=1,2,\ldots,m^0$, where $\hat{\lambda}=(\hat{\lambda
}_{1},\ldots,\hat{\lambda}_{m^0})$ is the minimizer of the criterion
function (\ref{eqn:qar11}).
\end{corollary}
We remark that in practice the assumption of known $m^0$ is often
unrealistic. However, it is substantially more difficult to establish
consistency in the general case of unknown $m^0$. Even in the simpler
univariate change-point frameworks, where independent variables are
grouped into segments of identical distributions, only special cases
such as normal distributions and exponential families have been
thoroughly investigated; for example, \cite{Lee.cb.97,Yao88} as well
as \cite{Aue:Lee:2011} for image segmentation. The
reason for this is that sharp tail estimates for maxima of certain
squared Gaussian processes are needed which do not hold for
distributions with thicker tails.
\section{Practical minimization using genetic algorithms}
\label{sec:ga}
Practical minimization of the minimum description length criteria (\ref
{eqn:qar10}) and (\ref{eqn:qar11}) is not a trivial task. We propose
using genetic algorithms to solve this minimization problem.
Genetic algorithms are a class of stochastic optimization techniques.
They are based on the idea of Darwin's theory of natural selection.
Typically a genetic algorithm begins with a random population of
possible solutions to the optimization problems. These solutions are
known as \emph{chromosomes} and often represented in vector form.
These chromosomes are allowed to evolve over time through the so-called
\emph{crossover} and \emph{mutation} operations. The hope is that the
evolution process would ultimately lead to a chromosome which
represents a good answer to the optimization problem. Successful
applications of genetic algorithms for solving various optimization
problems can be found, for examples, in \cite{Davis:1991}.
For a similar piecewise AR modeling minimization problem, Davis \textit{et al.} \cite{Davis:Lee:Rodriguez-Yam:2006}
developed a genetic algorithm for
approximating the minimizer. We modified their genetic algorithm to
solve the present minimization problem. For conciseness, we only
describe the major differences between the genetic algorithm for the
present piecewise quantile autoregressive model fitting problem and the
one from \cite{Davis:Lee:Rodriguez-Yam:2006}. We refer the reader to
\cite{Davis:Lee:Rodriguez-Yam:2006} for complete details.
\begin{ChromosomeRepresentation*} For the current problem of
detecting break points for a non-stationary time series at a specific
quantile $\tau$, a chromosome should contain information of all the
break points $k_{j}$ as well as the quantile autoregression orders
$p_{j}$ for any $\mathcal{F}\in\mathcal{M}$, where $\mathcal{M}$
denotes the whole class of piecewise quantile autoregressive models. We
express a chromosome as a vector of $n$ integers: a chromosome $\boldsymbol{\delta}=(\delta_{1},\ldots,\delta_{n})$ is of length $n$ with gene
values $\delta_{t}$ defined as
\[
\delta_{t}=\lleft\{
\begin{array} {l@{\qquad}l}
-1,& \mbox{if no
break point at time }t,
\\
p_{j},& \mbox{if $t=k_{j-1}$ and for the $j$th piece we
choose the $\QAR(p_j)$ model at quantile $\tau$.}
\end{array}
\rright.
\]
In practice, we impose an upper bound $P_{0}$ on the order $p_{j}$ of
each quantile autoregressive process. For our numerical work, we set
$P_{0}=20$. While the algorithm is running, we also impose the
following constraint on each $\delta$: in order to have enough
observations for parameter estimation, each piecewise quantile
autoregressive process is required to have a minimum length $m_p$,
which is chosen as a function of the order $p_j$ of the piecewise
process; their values are listed in Table~\ref{table1}.
\end{ChromosomeRepresentation*}
\ignore{
\textsc{First Population Generation:}
The initialization of the algorithm requires the creation of a number
of chromosomes to fill the first generation. The gene values of these
chromosomes are randomly generated, as follows. Starting from $t=1$,
generate the order $p_{1}$ for the first piece from $1,2,\ldots,P_{0}$
with equal probabilities, and set $\delta_{1}=p_{1}$. Then the next
$m_{p_{1}}-1$ genes are set to $-1$ according to the minimum length
constraint mentioned above. Now for the next gene in line (i.e.,
$\delta_{m_{p_{1}}+1}$), it will be either initialized as a break
point with probability $\pi_{B}$, or it will be assigned $-1$ with
probability $1-\pi_{B}$. We use
$\pi_{B}=\min(m_{p})/n=10/n$. If this gene is declared to be a break
point, then we randomly select a number from $1,2,\ldots,P_{0}$ as the
order $p_2$
of the second piece, and assign $\delta_{m_{p_{1}}+1}=p_{2}$. This
implies that the next $m_{p_{2}}-1$ genes will have values $-1$ due to
the minimum length constraint. On the other hand, if
$\delta_{m_{p_{1}}+1}$ is initialized as a non-break point with value
$-1$, then the process will move to the next gene in line and decide
whether this gene is declared as a break point or not. This
initialization process continues in the same way until a value is
assigned to the last gene $\delta_{n}$.
}
\ignore{
\textsc{Crossover and Mutation:} Once a first generation of random
chromosomes are generated, \emph{crossover} and \emph{mutation}
operations are applied to generate offspring which will form the second
generation. The offspring tend to be better than their parents in the
sense that they are better solutions to the optimization problem. In
our implementation we set the probability for conducting a crossover
operation as $\pi_{C}$ and conducting a mutation operation as $1-\pi
_{C}$, where $\pi_{C}=1-\min(m_{p})/n=(n-10)/n$.
In a crossover operation, one child chromosome is produced from
``mating'' two parent chromosomes. The parent chromosomes are selected
from the current pool of chromosomes with probabilities inversely
proportional to their ranks sorted by their values of the objective
function; i.e., their minimum description length values in the current
problem. The goal of this operation is to allow the child chromosome to
inherit good traits from its parents. A typical ``mating'' strategy is
that every child gene location has an equal probability of inheriting
from its father gene or its mother gene. In our problem, the gene
values $\delta_{t}$ of the child chromosome will be selected as
follows. Beginning with $t=1$, $\delta_{t}$ will take the
corresponding gene value either from its father chromosome or its
mother chromosome equally likely. If its value is not $-1$, but some
integer $p_{j}$ instead, then this location is declared to be a break
point with corresponding quantile autoregression order $p_{j}$, and the
next $m_{p_{j}}-1$ genes will be assigned $-1$ to satisfy the minimum
length constraint. If this value is $-1$, then the procedure will move
to the next gene in line, continue until all child genes are allocated.
This crossover operation is the distinct feature that makes genetic
algorithms different from other optimization methods.
In a mutation operation, one child chromosome is generated from one
parent chromosome. The child is mostly identical to its parent, except
that random changes are made to a small number of genes. This operation
provides the child chromosome additional freedom to explore the search
space, and thus avoids the problem of overly fast convergence to a
sub-optimal solution. Starting with $t=1$ and subject to the minimum
length constraint, our implementation of the mutation operation takes
one of the following three possible choices: (i) with probability $\pi
_{P}$ it will take the corresponding $\delta_{t}$ value from its
parent, (ii) with probability $\pi_{N}$ it will take the value $-1$,
and (iii) with probability $1-\pi_{P}-\pi_{N}$ it will randomly
generate a quantile autoregressive process with order ${p_{j}}$. In
this paper we set $\pi_{P}=\pi_{N}=0.3$.
}
\begin{IslandModelandConvergence*} The Island Model was also
applied to speed up the convergence rate. We used 40 islands with
subpopulation size 40, performed a migration for every 5 generations,
and migrated 2 chromosomes during each migration.
And at the end of each migration the overall best chromosome that has
the smallest minimum description length value is selected. If this best
chromosome does not change for 20 consecutive migrations, or the total
number of generations exceeds 100, the genetic algorithm stops and the
best chromosome is taken as the solution to the optimization problem.
\end{IslandModelandConvergence*}
\ignore{
\textsc{Elitist step and Island Model:} In order to converge to an
optimal solution at a faster rate, two additional steps, the \emph
{elitist} step and the \emph{Island model}, are performed. In the
elitist step, the worst chromosome of the next generation is replaced
by the best chromosome of the current generation. This conserves the
best chromosome in each generation and guarantees the monotonicity in
the search process. In conducting the island model, parallel
implementations are applied and $NI$ (number of islands) genetic
algorithms are simultaneously performed in $NI$ different
sub-populations. After every $J$ generations, the worst $H$ chromosomes
from the $j$-th island are replaced by the best $H$ chromosomes from
the $(j-1)$-th island, $j=2,\ldots,NI$ (for $j=1$ the best $H$
chromosomes are migrated from the $NI$-th island). In our simulations
we set $NI=40$, $J=5$, $H=2$ and a sub-population size of 40.
\textsc{Convergence:} At the end of each migration the overall best
chromosome that has the smallest minimum description length value is
selected. If this best chromosome does not change for 20 consecutive
migrations, or the total number of generations exceeds 100, the genetic
algorithm stops and the best chromosome is taken as the solution to the
optimization problem.
}
\begin{table}[t]
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Values of $m_{p}$ used in the genetic algorithm}\label{table1}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lllllllll@{}}
\hline
&\multicolumn{8}{l}{$p$}\\[-5pt]
&\multicolumn{8}{l}{\hrulefill}\\
& 0--1 & 2 & 3 & 4 & 5 &6 &7--10 &11--20 \\
\hline
$m_{p}$ & 10 & 12 & 14 & 16 & 18 & 20 &25 &50 \\ \hline
\end{tabular*}
\end{table}
\section{Simulation studies}
\label{sec:sim}
\subsection{Preliminaries}
In this section, four sets of simulation experiments are conducted to
evaluate the empirical performance of the proposed method for fitting
piecewise stationary quantile autoregressions. We shall compare the
results from our method with the Auto-PARM method of \cite
{Davis:Lee:Rodriguez-Yam:2006}, who developed an automatic procedure
for fitting piecewise autoregressive processes.
In each set of experiments, the results are based on 500 repetitions.
For the proposed method, we estimated the structural changes at
individual quantiles $\tau=0.25$, $0.5$ and $0.75$, as well as jointly
at $(0.25, 0.5, 0.75)$
using equal weights for the three quantiles. For convenience, we will
report the \emph{relative}\vspace*{2pt} locations of break points defined as $\hat
{\lambda}_{j}=\hat{k}_{j}/n$ for $j=1,\ldots,\hat{m}$.
\subsection{Piecewise $\AR(2)$ processes}
This simulation experiment is designed to compare the performance of
the proposed method and Auto-PARM in a linear autoregressive process setting
favoring the latter. The data generating process is
\begin{equation}
\label{eqn:sim1} y_{t}=\lleft\{
\begin{array} {l@{\qquad}l} 0.5y_{t-1}+0.3y_{t-2}+\varepsilon_{t}& (1
\leq t\leq n/2),
\\
-0.5y_{t-1}-0.7y_{t-2}+\varepsilon_{t}&
(n/2<t\leq3n/4),
\\
1.3y_{t-1}-0.5y_{t-2}+\varepsilon_{t}&
(3n/4<t\leq n),
\end{array}
\rright.
\end{equation}
where $(\varepsilon_{t})$ are independent standard normal, and
$n=1024$ and $2048$.
\begin{table}
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Summary of the estimated number of break points $\hat m$ for
the proposed procedure for the process (\protect\ref{eqn:sim1}) with $n=1024$.
Mean (standard deviation (Std)) of the relative break point location is
reported where applicable. If mult is specified for the quantile, it
refers to the multiple case $\tau=(0.25,0.50,0.75)$. The rows labeled
Auto-PARM give the results for that method}
\label{table:sim1}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lllllll@{}}
\hline
&\multicolumn{6}{l}{$\hat m$}\\[-5pt]
&\multicolumn{6}{l}{\hrulefill}\\
& \multicolumn{1}{l}{0}&\multicolumn{2}{l}{1} & \multicolumn{2}{l}{2}
& \multicolumn{1}{l}{3} \\[-5pt]
& \multicolumn{1}{l}{\hrulefill}&\multicolumn{2}{l}{\hrulefill} &
\multicolumn{2}{l}{\hrulefill}
& \multicolumn{1}{l}{\hrulefill} \\
\multicolumn{1}{l}{$\tau$} & \multicolumn{1}{l}{\%} &
\multicolumn{1}{l}{\%} & \multicolumn{1}{l}{Mean (Std)} &
\multicolumn{1}{l}{\%} & \multicolumn{1}{l}{Mean (Std)} &
\multicolumn{1}{l}{\%} \\
\hline
$0.25$ & $1.2$ & $23.2$ & $0.759\ (0.016)$ & $75.6$ & $0.501\ (0.024)$ &
0 \\
& & & & & $0.747\ (0.012)$ & \\[3pt]
$0.50$ & $0$ & $\hphantom{2}3.6$ & $0.757\ (0.012)$ &
$96.4$ & $0.504\ (0.021)$ & 0 \\
& & & & & $0.747\ (0.011)$ & \\[3pt]
$0.75$ & $0.6$ & $19.8$ & $0.756\ (0.014)$ & $79.6$ & $0.501\ (0.025)$ &
0 \\
& & & & & $0.747\ (0.013)$ & \\[3pt]
mult & $0$ & $14.2$ & $0.750\ (0.013)$ & $85.8$ & $0.503\ (0.023)$ & 0 \\
& & & & & $0.748\ (0.012)$ & \\[3pt]
Auto-PARM & $0$ &$\hphantom{1}0$ & & $99.6$ & $0.501\ (0.004)$ &
$0.4$ \\
& & & & & $0.751\ (0.002)$ & \\ \hline
\end{tabular*}
\vspace*{-3pt}
\end{table}
For each simulated process we applied both procedures to locate the
break points. We recorded the number of break points detected by each
method, together with their relative locations. These numbers are
summarized in Tables~\ref{table:sim1} and \ref{table:sim1-2048}. From
Table~\ref{table:sim1}, we observe that, for the case $n=1024$, the
performance of Auto-PARM is slightly better than for the proposed
method at the median and is better at the other two quantiles under
consideration. However, as $n$ increased to 2048, the performance of
the quantile autoregression procedure improved and is comparable with
Auto-PARM both in terms of finding the correct number of breaks and
their locations, as can be seen from Table~\ref{table:sim1-2048}.
We have repeated the same experiment but with innovations distributed
as the $t$-distribution with 5 degrees of freedom. In this case, our
method outperformed Auto-PARM for all quantiles tested. Due to space
limitation, tabulated results are omitted.
\begin{table}[t]
\tablewidth=8cm
\tabcolsep=0pt
\caption{Similar to Table \protect\ref{table:sim1} except for
$n=2048$}\label{table:sim1-2048}
\begin{tabular*}{8cm}{@{\extracolsep{\fill}}llll@{}}
\hline
&\multicolumn{3}{l}{$\hat m$} \\[-5pt]
&\multicolumn{3}{l}{\hrulefill} \\
& \multicolumn{2}{l}{2} & \multicolumn{1}{l}{3} \\[-5pt]
& \multicolumn{2}{l}{\hrulefill} & \multicolumn{1}{l}{\hrulefill} \\
\multicolumn{1}{l}{$\tau$} & \multicolumn{1}{l}{\%} &\multicolumn
{1}{l}{ Mean (Std)} & \multicolumn{1}{l}{\%} \\
\hline
$0.25$ & $\hphantom{1}99.2$ & $0.503\ (0.015)$ &
$0.8$ \\
& & $0.747\ (0.008)$ & \\[3pt]
$0.50$ & $\hphantom{1}99.4$ & $0.503\ (0.012)$ &
$0.6$ \\
& & $0.744\ (0.006)$ & \\[3pt]
$0.75$ & $\hphantom{1}99.6$ & $0.503\ (0.015)$ &
$0.4$ \\
& & $0.748\ (0.007)$ & \\[3pt]
mult & $\hphantom{1}99.4$ & $0.504\ (0.013)$ & $0.6$ \\
& & $0.748\ (0.007)$ & \\[3pt]
Auto-PARM & $100$ & $0.501\ (0.002)$ & 0 \\
& & $0.750\ (0.001)$ & \\
\hline
\end{tabular*}
\end{table}
\begin{figure}[b]
\includegraphics{671f01.eps}
\caption{A typical realization for the process in (\protect\ref{eqn:sim2}).}
\label{fig:sim2}
\end{figure}
\subsection{$\QAR(1)$ processes exhibiting explosive behavior}
\label{sec:sim2}
The data generating mechanism in this simulation follows the $\QAR(1)$ process
\begin{equation}
\label{eqn:sim2} y_{t}=(0.85+0.25u_t)y_{t-1}+
\Phi^{-1}(u_t),
\end{equation}
where $(u_t)$ is a sequence of independent standard uniform random
variables and $\Phi$ the standard normal distribution function. Shown
in Figure {\ref{fig:sim2}} is a typical realization. There is no
structural break in this series but from the plot one can see that it
exhibits explosive behavior in the upper tail. Processes such as this
one seem to be capable of modeling certain macroeconomic time series;
for example, interest rate data. We will
revisit this issue in Section~\ref{sec:app} below. While our method
does not detect break points at any of the quantiles tested, only about
one-third of the results from Auto-PARM lead to the correct conclusion;
the numbers of break points detected by their method are summarized in
Table~\ref{table:sim2}. It is apparent that it is much less tolerant
to nonlinearity.
\subsection{Piecewise $\AR(1)$ processes with changes in certain
quantile ranges}
In this simulation experiment, the nonstationary time series is
generated from the model
\begin{equation}
\label{eqn:sim3} y_{t}=\lleft\{
\begin{array} {l@{\qquad}l}
\bigl\{0.5I(\tau\leq0.2)+0.8I(\tau>0.2)\bigr\} y_{t-1}+
\varepsilon_{t}& (1\leq t\leq n/2),
\\
0.5y_{t-1}+\varepsilon_{t}& (n/2<t\leq n),
\end{array}
\rright.
\end{equation}
where $(\varepsilon_t)$ are independent asymmetric Laplace with
parameter $0.4$ for $t\leq n/2$ and independent asymmetric
Laplace with parameter $0.6$ for $t>n/2$.
\begin{table
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Relative frequencies of the number of break points estimated
from Auto-PARM for the process (\protect\ref{eqn:sim2}) with $n=1024$.
Independent of the specific quantile it was applied to, the proposed
methodology always correctly chose $\hat m=0$}\label{table:sim2}
\begin{tabular*}{\textwidth}{@{\extracolsep{4in minus 4in}}lllllll@{}}
\hline
&\multicolumn{6}{l}{Number of break points} \\[-5pt]
&\multicolumn{6}{l}{\hrulefill} \\
& 0 & 1 & 2 & 3 & 4 &5 \\
\hline
Relative frequency &33.8 &35.2 &23.8 &
5.6 & 1.4 & 0.2 \\ \hline
\end{tabular*}
\end{table}
\begin{table}[b]
\tablewidth=11cm
\tabcolsep=0pt
\caption{Similar to Table \protect\ref{table:sim1} except for the
process (\protect\ref{eqn:sim3}) with $n=1024$}\label{table:sim3}
\begin{tabular*}{11cm}{@{\extracolsep{\fill}}llllll@{}}
\hline
&\multicolumn{5}{l}{$\hat m$} \\[-5pt]
&\multicolumn{5}{l}{\hrulefill} \\
& \multicolumn{1}{l}{0} & \multicolumn{2}{l}{1} & \multicolumn
{1}{l}{2} & \multicolumn{1}{l}{3} \\[-5pt]
& \multicolumn{1}{l}{\hrulefill} & \multicolumn{2}{l}{\hrulefill} &
\multicolumn{1}{l}{\hrulefill} & \multicolumn{1}{l}{\hrulefill} \\
\multicolumn{1}{l}{$\tau$} & \multicolumn{1}{l}{\%} & \multicolumn
{1}{l}{\%} & \multicolumn{1}{l}{Mean (Std)} &
\multicolumn{1}{l}{\%} & \multicolumn{1}{l}{\%} \\
\hline
$0.25$ & $83.4$ & $16.6$ & $0.527\ (0.096)$ & 0 & 0 \\
$0.50$ & $\hphantom{8}1.5$ & $98.5$ & $0.503\ (0.038)$ & 0 & 0 \\
$0.75$ & $24.4$ & $75.6$ & $0.479\ (0.055)$ & 0 & 0 \\
mult & $35.2$ & $64.8$ & $0.498\ (0.046)$ &
0 & 0 \\
Auto-PARM & $51.0$ & $44.4$ & $0.487\ (0.181)$ & $4.0$ & $0.6$ \\ \hline
\end{tabular*}
\end{table}
For this process, results from our method and Auto-PARM are reported in
Table~\ref{table:sim3} in a similar manner as in Table~\ref
{table:sim1}. Not reported in this table is the fact that, when the
coefficients of $y_{t-1}$ in the two pieces are the same (which happens
for quantiles $\tau\leq0.2$), then the proposed procedure does
not detect any break points even though the residuals of the two pieces
are slightly different. For the quantile at $\tau=0.25$ which
is close to the threshold at which the autoregressive coefficient
changes, our method detected a (nonexisting) break point in 16\% of the
simulation runs. On the other hand, when $\tau\geq0.5$, the
quantile autoregression method performs reasonably well, especially at
the median where the performance is excellent. Also at $\tau=0.75$ it
outperforms Auto-PARM. When estimating jointly at $\tau
=(0.25, 0.5, 0.75)$, the percentage of detecting
the correct number of break points is not as high as at $\tau=0.5$ due
to the inclusion of the quantiles at $\tau=0.25$ and
$\tau=0.75$, indicating that care has to be exercised if
quantiles are jointly specified. We can also see that the performance
of our method is better than that of Auto-PARM in both percentage and
accuracy (in terms of smaller standard deviations) for this simulation
example. In Table~\ref{table:sim3order}, we summarize the proposed
procedure's estimates of the quantile autoregression orders for the
above process at $\tau=0.5$, and we can see that most of the
segments are correctly modeled as $\QAR(1)$ processes.
\begin{table}[t]
\tablewidth=200pt
\tabcolsep=0pt
\caption{Relative frequencies of the quantile autoregression orders
selected by the proposed method at $\tau=0.5$ for the
realizations from the process (\protect\ref{eqn:sim3})}\label
{table:sim3order}
\begin{tabular*}{200pt}{@{\extracolsep{\fill}}llllll@{}}
\hline
&\multicolumn{5}{l}{Order}\\[-5pt]
&\multicolumn{5}{l}{\hrulefill}\\
& \multicolumn{1}{l}{1} & \multicolumn{1}{l}{2} & \multicolumn{1}{l}{3}
& \multicolumn{1}{l}{4} & \multicolumn{1}{l}{5} \\ \hline
$p_{1}$ & $80.3$ & $15.7$ & $2.6$ & $1.4$ &
0 \\
$p_{2}$ & $72.4$ & $19.2$ & $6.6$ & $1.4$ &
$0.4$ \\
\hline
\end{tabular*}
\end{table}
\subsection{Higher-order QAR processes}
In this experiment, the data generating process is
\begin{equation}
\label{eqn:sim4} y_{t}=\lleft\{
\begin{array} {l@{\qquad}l}
(0.2+0.1u_t)y_{t-1}+(0.5+0.1u_t)y_{t-2}+\epsilon_{t} & (1\leq t\leq n/2),
\\
0.7u_ty_{t-1}+\epsilon_{t}& (n/2<t\leq
n),
\end{array}
\rright.
\end{equation}
where $(u_t)$ is a sequence of independent standard uniform random
variables, $(\epsilon_t)$ are independent standard normal for $t \leq
n/2$, and independent asymmetric Laplace with parameter 1 for $t >
n/2$. A typical realization is displayed in Figure~\ref{fig:sim4}, and
break detection results from our method for this process are reported
in Table~\ref{table:sim4}. One can see that our method has
successfully detected one break with very high probability in most
considered cases, and that the detected relative locations are also
very close to the true location.
In order to assess the performance of the MDL criterion for order
selection in $\QAR(p)$ models for $p>1$, we tabulated the relative
frequencies of the order selected by the proposed method for the first
piece of process (\ref{eqn:sim4}) in Table~\ref{table:sim4_2}. The
proposed method never underestimates the order, but only achieves about
50\% accuracy. At first sight, these correct estimation rates seem to
be relatively low. However, in the break point detection context, the
problem of order estimation seems to be hard even for linear AR
processes (of higher order), as is seen in Table~3 of \cite
{Davis:Lee:Rodriguez-Yam:2006}, where Auto-PARM only gave around 65\%
correct estimation rates for $\AR(2)$ processes. Thus, we believe that a
50\% correct rate is not unreasonable for $\QAR(p)$ models.
\begin{figure
\includegraphics{671f02.eps}
\caption{A typical realization for the process in (\protect\ref{eqn:sim4}).}\vspace*{10pt}
\label{fig:sim4}
\end{figure}
\begin{table
\tablewidth=250pt
\tabcolsep=0pt
\caption{Similar to Table \protect\ref{table:sim1} except for the
process (\protect\ref{eqn:sim4}) with $n=4000$}
\label{table:sim4}
\begin{tabular*}{250pt}{@{\extracolsep{\fill}}lllll@{}}
\hline
& \multicolumn{4}{l}{$\hat m$} \\[-5pt]
& \multicolumn{4}{l}{\hrulefill} \\
&\multicolumn{1}{l}{0} &\multicolumn{2}{l}{1} & \multicolumn
{1}{l}{2} \\[-5pt]
&\multicolumn{1}{l}{\hrulefill} &\multicolumn{2}{l}{\hrulefill} &
\multicolumn{1}{l}{\hrulefill} \\
\multicolumn{1}{l}{$\tau$} &\multicolumn{1}{l}{\%}& \multicolumn
{1}{l}{\%} & \multicolumn{1}{l}{Mean (Std)} & \multicolumn{1}{l}{\%}
\\
\hline
$0.25$ &4.0& \hphantom{1}95.5 & 0.517\ (0.049)&0.5\\
$0.50$ &0& \hphantom{1}98.5 & 0.505\ (0.039)&1.5 \\
$0.75$ &3.0& \hphantom{1}97.0 & 0.508\ (0.052) &0 \\
mult &0& 100.0 & 0.509\ (0.045) & 0.5\\
\hline
\end{tabular*}
\vspace*{30pt}
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Relative frequencies of the quantile autoregression orders
selected by the proposed method at different $\tau$ values ($\tau=$
0.25, 0.50, 0.75, and mult) for the first piece in the process
(\protect\ref
{eqn:sim4}). The true order is 2}
\label{table:sim4_2}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}llllllll@{}}
\hline
$\tau$ & 1&2 & 3 & 4 & 5 & 6 & $\geq7$ \\ \hline
0.25 & 0& 48.69& 31.41&15.71& 2.09& 1.57& 0.52 \\
0.50 & 0& 51.78& 26.40& 12.18& 5.58 & 2.03& 2.03 \\
0.75 & 0&55.15&22.68& 11.86& 7.73& 1.55& 1.05 \\
mult & 0 &50.50& 26.00& 14.50& 5.00& 2.00& 2.00 \\ \hline
\end{tabular*}
\end{table}
\ignore{
\begin{table}[!ht]
\begin{center}
\begin{tabular}{|c|c|@{ }ccccccccc|}\hline
$\tau$ & Order& 1&2 & 3 & 4 & 5 &6&7&8&9 \\ \hline
0.25 & $p_{1}$ & 0& 48.69& 31.41&15.71& 2.09& 1.57& 0& 0.52 &0 \\
& $p_{2}$ & 46.07&31.94& 12.57& 5.24& 2.62& 1.05 & 0& 0.52& 0 \\ \hline
0.50 & $p_1$ & 0& 51.78& 26.40& 12.18& 5.58 & 2.03& 2.03& 0&0 \\
& $p_2$ & 50.25& 24.37& 13.20& 5.58& 4.06& 1.02& 1.02&1.02&0 \\ \hline
0.75 & $p_1$ & 0&55.15&22.68& 11.86& 7.73& 1.55& 0.52&0&0.52 \\
& $p_2$ & 48.97& 29.90& 10.31&7.22& 2.06& 1.03& 0.52&0&0 \\ \hline
mult & $p_1$ & 0 &50.50& 26.00& 14.50& 5.00& 2.00& 2.00& 0& 0 \\
& $p_2$ &40.00& 31.50& 16.00& 7.50& 2.50& 2.00& 0.5&0&0 \\ \hline
\end{tabular}
\caption{Relative frequencies of the quantile autoregression orders
selected by the proposed method at different $\tau$ values ($\tau$ =
0.25, 0.50, 0.75, and mult).}\label{table:sim4_2}
\end{center}
\end{table}
}
\subsection{Stochastic volatility models}
\label{sec:svm}
The simulation section concludes with an application of the proposed
methodology to stochastic volatility models (SVM) often used to fit
financial time series; see \cite{Shephard:Andersen:2009} for a recent
overview. It should be noted that the proposed quantile methodology and
Auto-PARM are not designed to deal with this type of model as it
consists of uncorrelated random variables exhibiting dependence in
higher-order moments. However, SVM are used to compare the two on a
data generating process different from nonlinear QAR and linear AR time
series. Following Section~4.2 of \cite{Davis:Lee:Rodriguez-Yam:2008},
the process
\begin{equation}
\label{eq:svm1} y_t = \sigma_t\xi_t=e^{\alpha_t/2}
\xi_t,
\end{equation}
is considered, where $\alpha_t=\gamma+\phi\alpha_{t-1}+\eta_t$.
The following two-piece segmentations were compared:
\begin{eqnarray*}
&&\mbox{Scenario A} \quad\mbox{Piece 1:}\quad \gamma
=-0.8106703,\qquad \phi=0.90,\qquad (
\eta_t)\sim\mbox{ i.i.d. } N(0,0.45560010),
\\
&&\hphantom{\mbox{Scenario A} \quad}\mbox{Piece 2:}\quad \gamma
=-0.3738736,\qquad \phi=0.95, \qquad(\eta_t)\sim\mbox{
i.i.d. } N(0,0.06758185),
\end{eqnarray*}
while $(\xi_t)\sim\mbox{ i.i.d. } N(0,1)$ for both pieces, and
\begin{eqnarray*}
&&\mbox{Scenario B}\quad \mbox{Piece 1:}\quad \gamma=-0.8106703,
\qquad\phi=0, \qquad(
\xi_t)\sim\mbox{ i.i.d. } N(0,1),
\\
&&\hphantom{\mbox{Scenario B}\quad}\mbox{Piece 2:}\quad \gamma
=-0.3738736,\qquad \phi=0,\qquad (\xi_t)\sim\mbox{
i.i.d. } N(0,4),
\end{eqnarray*}
while $(\eta_t)\sim\mbox{ i.i.d. } N(0,0.5)$ for both pieces.
Scenario A corresponds to a change in dynamics of the volatility
function $\sigma_t$, Scenario B basically to a scale change.
Scenario A was considered in \cite{Davis:Lee:Rodriguez-Yam:2008}.
These authors developed a method tailored to deal with financial time
series of SVM and GARCH type. The method, termed Auto-Seg, was able to
detect one break in 81.8\% of 500 simulation runs and detected no break
otherwise. On this data, Auto-PARM tends to use a too fine segmentation
as 62.4\% of the simulations runs resulted in two or more estimated
break points. One (no) breakpoint was detected in 21.2\% (16.4\%) of
the cases. The proposed method failed to detect any changes at any of
the tested quantiles ($\tau=0.05, 0.10, 0.25, 0.50, 0.75, 0.90,
0.95$). It should be noted, however, that there is no change at the
median and changes in the other quantiles are very hard to find as is
evidenced by Figure~\ref{fig:quantile-quantile}, which displays the
averaged (over 50 simulation runs) empirical quantile--quantile plot
from the first and the second segment of the two-piece Scenario A process.
\begin{figure}
\includegraphics{671f03.eps}
\caption{Empirical quantile--quantile plot for the SVM process
specified under Scenario A (left panel) and Scenario B (right panel).
The $x$-axis ($y$-axis) shows the empirical quantiles of Piece 1 (Piece
2). The 45 degree line is given for ease of comparison.}
\label{fig:quantile-quantile}
\end{figure}
The results for Scenario B are summarized in Table~\ref{table:svm}. It
can be seen that, for the proposed method, the scale change, is
detected at the more extreme quantiles ($\tau=0.05, 0.10, 0.90, 0.95$)
with very good accuracy and with reasonable accuracy at intermediate
quantiles ($\tau=0.25$ and $\tau=0.75$), while no change is found
(correctly) at the median $\tau=0.50$, reflecting that the proposed
procedure describes the local behavior of the SVM process adequately.
Auto-PARM does the same on a global basis.
\begin{table}[b]
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Summary of the estimated number of break points $\hat m$ for
the proposed procedure and Auto-PARM for the process \protect\eqref{eq:svm1}
with specifications given under Scenario B}
\label{table:svm}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lllllllll@{}}
\hline
& \multicolumn{7}{l}{$\tau$}& \\[-5pt]
& \multicolumn{7}{l}{\hrulefill}& \\
\multicolumn{1}{l}{$\hat m$}& \multicolumn{1}{l}{0.05} &
\multicolumn{1}{l}{0.10} & \multicolumn{1}{l}{0.25}
& \multicolumn{1}{l}{0.50} & \multicolumn{1}{l}{0.75} & \multicolumn
{1}{l}{0.90} & \multicolumn{1}{l}{0.95} &
\multicolumn{1}{l}{Auto-PARM} \\ \hline
0 & \hphantom{9}0.4\% & \hphantom{9}0.2\% & 32.6\% & 100.0\% & 29.6\% &
\hphantom{10}0.0\% & \hphantom{9}0.6\% & \hphantom{9}0.2\% \\
1 & 99.6\% & 99.8\% & 67.4\% & \hphantom{10}0.0\% & 70.4\% & 100.0\% & 99.4\% &
99.6\% \\
2 & \hphantom{9}0.0\% & \hphantom{9}0.0\% & \hphantom{9}0.0\% &
\hphantom{10}0.0\% &
\hphantom{9}0.0\% & \hphantom{10}0.0\% & \hphantom{9}0.0\% & \hphantom
{9}0.2\% \\ \hline
\end{tabular*}
\end{table}
\section{Real data applications}
\label{sec:app}
\subsection{Treasury bill data}
Treasury bills are short-term risk-free investments that are frequently
utilized by investors to hedge portfolio risks. In this application,
the observations are three-month treasury bills from the secondary
market rates in the United States, ranging from January 1954 to
December 1999. The weekly data can be found at the website \url
{http://research.stlouisfed.org/fred2/series/TB3MS} and are displayed
in Figure~\ref{fig:bills03}.
\begin{figure}
\includegraphics{671f04.eps}
\caption{Three-month treasury bills (01/1954 to 12/1999).} \label{fig:bills03}
\end{figure}
It can be seen from Figure~\ref{fig:bills03} that the time series
exhibits obvious explosive behavior in the upper tail. In many
instances similar time series would be viewed as random walks and
sophisticated testing procedures would have to be applied to either
confirm or reject what is known as unit-root hypothesis; see, for
example, \cite{Paparoditis:Politis:2003,Paparoditis:Politis:2005} for
more. As in Section~\ref{sec:sim2}, Auto-PARM aims in this case at
partitioning the series into segments with structures mimicking linear
behavior. In the present case, this leads to 15 segments. On the other
hand the proposed procedure does not detect break points at any of the
quantiles tested ($\tau=0.05, 0.10,\ldots,0.90,0.95$), thus
indicating that with the use of some extra
parameters a more parsimonious stationary but nonlinear modeling is
possible for this data set. Using a $\QAR(2)$ model with cubic polynomial
coefficients in the uniform random variables $(u_t)$, the data can be
approximated via the following model with 12 parameters:
\begin{equation}
\label{eqn:bills03} y_t=\theta_0(u_t)+
\theta_1(u_t)y_{t-1}+\theta_2(u_t)y_{t-2},
\end{equation}
where
\begin{eqnarray*}
\theta_0(u_t)&=&-0.0144+0.2264u_t-0.5448u_t^2+0.3848u_t^3,
\\
\theta_1(u_t)&=&1.3721-0.9635u_t+1.5312u_t^2-0.6939u_t^3,
\\
\theta_2(u_t)&=&-0.4394+1.3154u_t-2.1945u_t^2+1.1353u_t^3.
\end{eqnarray*}
Figure~\ref{fig:generated} depicts several realizations generated by
the estimated model (\ref{eqn:bills03}), which all show a pattern
closely resembling the data in Figure~\ref{fig:bills03}. This example
illustrates that quantile autoregressions can expand the modeling
options available to the applied statistician as it accurately captures
temporary explosive behavior and nonlinearity.
\begin{figure}
\includegraphics{671f05.eps}
\caption{Four typical realizations of the process in (\protect\ref
{eqn:bills03}).} \label{fig:generated}
\end{figure}
\subsection{Monthly minimum temperature data}
In this section the monthly mean minimum temperature at Melbourne in
Australia is considered. The data set is obtainable from the Bureau
of Meteorology of the Australian Government (\href
{http://www.bom.gov.au/climate/data/}{http:// www.bom.gov.au/climate/data/}). The plots for the original
series and its deseasonalized version are shown in Figure~\ref
{fig:australia}. This data set has been investigated by \cite
{Koenker:2005} who pointed out that, due to the quantile dependent
behavior visible in the scatter plots, linear autoregressive models
are insufficient to describe the data. Our method was
applied to this data set at various quantiles and for all cases one
break point was found near the year 1960. This agrees with a visual
inspection of Figure~\ref{fig:australia}.
\begin{figure}[b]
\includegraphics{671f06.eps}
\caption{(a) Monthly minimum air temperature in Melbourne, Australia
from January 1856 to December 2010. (b) Deseasonalized series. The
dashed line represents the estimated break point in August 1962.}
\label{fig:australia}
\end{figure}
\begin{table}
\tablewidth=\textwidth
\tabcolsep=0pt
\caption{Estimated break points at different quantiles for the
Australian temperature data}\label{table:australia}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lllll@{}}
\hline
&\multicolumn{4}{l}{Quantiles}\\[-5pt]
&\multicolumn{4}{l}{\hrulefill}\\
& \multicolumn{1}{l}{0.25} & \multicolumn{1}{l}{0.5} & \multicolumn
{1}{l}{0.75} & \multicolumn{1}{l}{mult} \\
\hline
Estimated break point & December 1960 &August 1963 & December 1958 &
August 1962 \\
\hline
\end{tabular*}
\end{table}
It can be seen from Table~\ref{table:australia} that the break point
location estimated with the multiple quantile procedure, set up with
equal weights for the three quantiles under consideration, is between
the break point locations estimated at the individual quantiles. This
should always be the case, as the requirement of simultaneous
occurrence of breaks automatically leads to a weighted average
interpretation. In general, one would ideally find weights that prefer
quantiles which stronger exhibit the structural break and attenuate the
impact of quantiles that are only marginally subjected to the break.
This would mean to more closely evaluate properties of the (piecewise)
density and distribution function of the underlying random process.
\section{Conclusions}
\label{sec:sum}
This article proposes a new segmentation procedure that helps breaking
down a given nonstationary time series into a number of stationary
pieces by means of quantile autoregression modeling. In contrast to
most of the existing literature, this is done either for individual
quantiles or across a collection of quantiles. The proposed method
utilizes the minimum description length principle and a genetic
algorithm to obtain the best segmentation. It has been proved that this
method is asymptotically consistent, and simulation results have
demonstrated that the finite sample performance of the proposed
procedure is quite good. Data applications are also provided with
satisfactory results. It can be seen in particular that our method can
add to second-order time series modeling by enriching the
statistician's tool box via the inclusion of nonlinearity, asymmetry,
local persistence and other distributional aspects. An interesting
problem for future research that shows some potential is the
investigation of the properties of the multiple quantile segmentation
procedure for the case of quantile-dependent break point locations,
thereby loosening the assumption of simultaneous breaks utilized in
this paper.
\begin{appendix}
\section*{Appendix: Proofs}
\label{sec:proof}
\begin{lemma}\label{prop:A.1}
If $(y_t\dvt t\in\mathbb{Z})$ follow a stationary $\QAR(p)$ model
such that the assumptions of Proposition~\ref{prop:3} are satisfied,
then with probability one and for all $\tau\in(0,1)$,
\[
\frac{1}n\sum_{t=1}^n
\rho_\tau(\hat\varepsilon_t)\to E\bigl\{
\rho_\tau(\varepsilon_1)\bigr\}\qquad (n\to\infty),
\]
where $\rho_\tau$ is the check function defined below (\ref{eqn:qar3}).
\end{lemma}
\begin{pf}
The assertion follows as in the proof of Lemma A.1 in \cite
{Aue:Cheung:Lee:Zhong:2014}.
\end{pf}
\ignore{Fix $\tau\in(0,1)$. The quantile autoregrssion equations
imply that $\hat\varepsilon_t=\varepsilon_t+X_t^\prime(\hat\theta
(\tau)-\theta(\tau))$. It follows from \cite{Koenker:2005} that
$\hat\theta(\tau)$ is strongly consistent for $\theta(\tau)$.
Therefore, with probability one,
\[
\frac{1}n |\sum_{t=1}^n(\hat
\varepsilon_t-\varepsilon_t) |=\frac{1}n |\sum
_{t=1}^nX_t^\prime
\bigl\{\hat\theta(\tau)-\theta(\tau)\bigr\} |\to0.
\]
Consequently, $\frac{1}n\sum_{t=1}^n\varepsilon_t\to E(\varepsilon
_1)$ with probability one by the strong law of large numbers. Since
$\rho_\tau(\cdot)$ is a continuous and measurable function, the
latter limit relation is also true if $\varepsilon_t$ is replaced by
$\rho_\tau(\epsilon_t)$.
}
\begin{lemma}\label{prop:A.2}
Let $(y_t\dvt t\in\mathbb{Z})$ be a piecewise stationary $\QAR(p)$
model that satisfies the assumptions of Proposition~\ref{prop:3} on
each of the segments. Let $\lambda^0=(\lambda^0_1,\ldots,\lambda
^0_{m^0})$ denote the true segmentation and choose $K=\lfloor\kappa
n\rfloor$, $M=\lfloor\mu n\rfloor$ with $0\leq\kappa<\mu\leq1$.
Then, with probability one for all $\tau\in(0,1)$,
\[
\frac{1}{M-K}\sum_{t=K+1}^{M}
\rho_\tau(\hat\varepsilon_t)\to L_\tau(\kappa,
\mu).
\]
The limit $L_\tau(\kappa,\mu)$ is the sum of two components, $A_\tau
(\kappa,\mu)$ and $B_\tau(\kappa,\mu)$, both of which are given in
the proof.
\end{lemma}
\begin{pf}
There are two cases to consider, namely (1) $K$ and $M$ are contained
in the same segment and (2) $K$ and $M$ are in different segments.
For the case (1), Lemma~\ref{prop:A.1} implies immediately that
\[
\frac{1}{M-K}\sum_{t=K+1}^M
\rho_\tau(\hat\varepsilon_t)\to\rho_{\tau,j}=
A_\tau(\kappa,\mu).
\]
With $B_\tau(\kappa,\mu)=0$, one can set $L_\tau(\kappa,\mu
)=A_\tau(\kappa,\mu)$ and the limit is determined.
For the case (2), there are $1\leq j<J\leq m^0+1$ such that $\kappa\in
[\lambda_{j-1}^0,\lambda_j^0)$ and $\mu\in(\lambda_{J-1}^0,\lambda
_J^0]$. In addition to the residuals $\hat\varepsilon_t$ obtained
from fitting a QAR model to the observations $y_{K+1},\ldots,y_M$, one
also defines residuals $\hat\varepsilon_{t,\ell}$ obtained from
fitting a QAR model on the $\ell$th underlying (true) segment. If now
$t\in\{k_{\ell-1}^0+1,\ldots,k_\ell^0\}$ with $k_\ell^0=\lfloor
\lambda_\ell^0 n\rfloor$, then one gets the decomposition $\rho
_\tau(\hat\varepsilon_t)=\{\rho_\tau(\hat\varepsilon_t)-\rho
_\tau(\hat\varepsilon_{t,\ell})\}+\rho_\tau(\hat\varepsilon
_{t,\ell})$. The sum over the first terms on the right-hand side leads
to a positive bias term $B_\tau(\kappa,\mu)$ determined by the
almost sure limit relation
\begin{eqnarray*}
&&\frac{1}{M-K} \Biggl[\sum_{t=K+1}^{k_{j}^0}
\bigl\{\rho_\tau(\hat\varepsilon_t)-\rho_\tau(
\hat\varepsilon_{t,j})\bigr\} \\
&&{\hphantom{\frac{1}{M-K} \Biggl[}}+\sum_{\ell=j+1}^{J-1}
\sum_{t=k_{\ell-1}+1}^{k_\ell^0}\bigl\{\rho_\tau(
\hat\varepsilon_t)-\rho_\tau(\hat\varepsilon_{t,\ell})
\bigr\} +\sum_{t=k_{J-1}^0+1}^M\bigl\{
\rho_\tau(\hat\varepsilon_t)-\rho_\tau(\hat
\varepsilon_{t,J})\bigr\} \Biggr]
\\
&&\quad\to B_\tau(\kappa,\mu).
\end{eqnarray*}
The remaining segment residuals $\hat\varepsilon_{t,\ell}$ allow for
an application of Lemma~\ref{prop:A.1} to each of the underlying
(true) segments, so that, with probability one,
\begin{eqnarray*}
&&\frac{1}{M-K} \Biggl\{\sum_{t=K+1}^{k^0_j}
\rho_\tau(\hat\varepsilon_{t,j})+\sum
_{\ell=j+1}^{J-1}\sum_{t=k^0_{\ell
-1}+1}^{k^0_\ell}
\rho_\tau(\hat\varepsilon_{t,\ell})+\sum
_{t=k_{J-1}^0+1}^M\rho_\tau(\hat
\varepsilon_{t,J}) \Biggr\}
\\
&&\quad\to\frac{1}{\mu-\kappa} \Biggl\{\bigl(\lambda_j^0-
\kappa\bigr)\rho_{\tau
,j}+\sum_{\ell=j+1}^{J-1}
\bigl(\lambda_\ell^0-\lambda_{\ell-1}^0
\bigr)\rho_{\tau,\ell}+\bigl(\mu-\lambda_{J-1}^0\bigr)
\rho_{\tau,J} \Biggr\}
\\
&&\quad=A_\tau(\kappa,\mu),
\end{eqnarray*}
where $\rho_{\tau,j}=E\{\rho_\tau(\varepsilon_{k_j^0})\}$. Setting
$L_\tau(\kappa,\mu)=A_\tau(\kappa,\mu)+B_\tau(\kappa,\mu)$
completes the proof.
\end{pf}
\begin{pf*}{Proof of Theorem~\ref{th:1}}
Denote by $\hat\lambda=(\hat\lambda_1,\ldots,\hat\lambda_{m^0})$
and $\lambda^0=(\lambda^0_1,\ldots,\lambda^0_{m^0})$ the
segmentation chosen by the minimum description length criterion (\ref
{eqn:qar10}) and the true segmentation, respectively. The proof is
obtained from a contradiction argument, assuming that $\hat\lambda$
does not converge almost surely to $\lambda^0$. If that was the case,
then the boundedness of $\hat\lambda$ would imply that, almost surely
along a subsequence, $\hat\lambda\to\lambda^*=(\lambda_1^*,\ldots
,\lambda_{m^0}^*)$ as $n\to\infty$, where $\lambda^*$ is different
from $\lambda^0$. Two cases for neighboring $\lambda_{j-1}^*$ and
$\lambda_j^*$ have to be considered, namely (1) $\lambda_{j^\prime
}^0\leq\lambda_{j-1}^*<\lambda_j^*\leq\lambda_{j^\prime}^0$ and
(2) $\lambda_{j^\prime-1}^0\leq\lambda_{j-1}^*<\lambda_{j^\prime
}^0<\cdots<\lambda_{j^\prime+J}^0<\lambda_j^*\leq\lambda
_{j^\prime+J+1}^0$ for some positive integer $J$.
For the case (1), Lemma~\ref{prop:A.1} implies that, almost surely,
\[
\lim_{n\to\infty}\frac{1}n\sum_{t=\hat k_{j-1}+1}^{\hat k_j}
\rho_\tau(\hat\varepsilon_t) \geq\bigl(
\lambda_j^*-\lambda_{j-1}^*\bigr)\rho_{\tau,j^\prime},
\]
where $\rho_{\tau,j^\prime}=E\{\rho_\tau(\varepsilon_{k_{j^\prime
}^0})\}$.
For the case (2), Lemma~\ref{prop:A.2} gives along the same lines of
argument that, almost surely,
\begin{eqnarray*}
\lim_{n\to\infty}\frac{1}n\sum
_{t=\hat k_{j-1}+1}^{\hat k_j}\rho_\tau(\hat
\varepsilon_t)
&>& \frac{1}{\lambda_j^*-\lambda_{j-1}^*} \Biggl\{\bigl(\lambda
_{j^\prime
}^0-
\lambda_{j-1}^*\bigr)\rho_{\tau,j^\prime}\\
&&\hphantom{\frac{1}{\lambda_j^*-\lambda_{j-1}^*} \Biggl\{}{}+\sum
_{\ell=j^\prime
+1}^{j^\prime+J+1}\bigl(\lambda_\ell^0-
\lambda_{\ell-1}^0\bigr)\rho_{\tau
,\ell}+\bigl(
\lambda_j^*-\lambda_{j^\prime+J}^0\bigr)
\rho_{\tau,j^\prime
+J+1} \Biggr\}.
\end{eqnarray*}
Taken together, these two inequalities, combined with the fact that
asymptotically all penalty terms in the definition of the \textsc{mdl}
in \eqref{mdl-lim} vanish, give, almost surely,
\begin{eqnarray*}
\lim_{n\to\infty}\frac{1}n{\textsc{mdl}}
\bigl(m^0,\hat\lambda,\hat p|\tau\bigr) &=&\lim_{n\to\infty}
\frac{1}n\sum_{j=1}^{m^0+1}\sum
_{t=\hat
k_{j-1}+1}^{\hat k_j}\rho_\tau(\hat
\varepsilon_t)
\\
&>&\lim_{n\to\infty}\frac{1}n\sum
_{j=1}^{m^0+1}\sum_{t=k^0_{j-1}+1}^{k_j^0}
\rho_\tau(\varepsilon_t) =\lim_{n\to\infty}
\textsc{mdl}\bigl(m^0,\lambda^0,p^0|\tau
\bigr),
\end{eqnarray*}
which is a contradiction to the definition of the MDL minimizer.
\end{pf*}
\begin{pf*}{Proof of Corollary~\ref{cor:1}}
Recall that the minimum description length criterion for multiple
quantiles $(\tau_{1},\ldots,\tau_{L})$ is given in (\ref
{eqn:qar11}). If follows from Theorem~\ref{th:1} that at any
individual quantile $\tau_{\ell}$, the minimizer, say, $(\hat
{\lambda}_\ell,\hat{p}_\ell)$ of the minimum description length
criterion (\ref{eqn:qar10}) is consistent for $(\lambda^0,p^0)$. It
follows that the minimizer $(\hat{\lambda},\hat{p})$ of (\ref
{eqn:qar11}) is consistent as it is a weighted sum of several criteria
in the form of (\ref{eqn:qar10}).
\end{pf*}
\end{appendix}
|
1,314,259,995,053 | arxiv | \section{Introduction}
In the Minkowski background, the classical stability of a vacuum
of a scalar field is guaranteed if its mass squared
(ie the curvature of the potential) is positive.
However, this does not imply the quantum stability of the vacuum.
It may be metastable depending on the global shape of the potential.
If the vacuum is a local minimum but not a global minimum of the
potential, it may decay into a stabler state via the bubble
nucleation due to quantum tunneling~\cite{Coleman:1977py,Callan:1977pt}.
This situation remains essentially the same even if we take gravity
into account~\cite{Coleman:1980aw}.
In an AdS background, the situation is more complicated.
Indeed, it is known that an AdS vacuum may be classically stable
even if the mass squared is negative, provided that it is at or
above the BF bound~\cite{Breitenlohner:1982jf,Breitenlohner:1982bm},
\begin{eqnarray}
m^2_{\rm BF} \ell^2 = - \frac{(d-1)^2}{4} \ ,
\label{bf}
\end{eqnarray}
where $\ell$ is the curvature radius of the AdS spacetime
and $d$ is the spacetime dimension.
Of course, as in the case of the Minkowski spacetime,
this does not guarantee the quantum stability of the system.
However, in contrast to the Minkowski case, one cannot
judge the stability of a tachyonic vacuum just by looking
at the global shape of the potential.
In fact, the system is perfectly stable even
for an unbounded potential such as an inverted quadratic
potential provided that the mass squared satisfies
the BF stability bound.
So, the question is how to judge the stability of a vacuum.
To this end, we mention that there exists an algebraic criterion
for the global stability of an AdS vacuum.
It was proved that if the potential takes the form,
\begin{eqnarray}
V(\phi) = (d-2) \left(\frac{dP(\phi)}{d\phi}\right)^2
- (d-1) P(\phi)^2\,,
\label{super}
\end{eqnarray}
then any non-trivial field configuration has
a positive definite energy~\cite{Boucher:1984yx,Townsend:1984iu}.
Hence, the system is stable.
In other words, considering (\ref{super}) as a first-order
nonlinear differential equation for $P$ for a given potential $V$,
the existence of a real solution for $P$ guarantees
the absolute stability of the system.
This is a sufficient condition for
the stability (The stability condition is further refined recently in
\cite{Hertog:2005hm,Amsel:2006uf,Amsel:2007im,Faulkner:2010fh,Amsel:2011km}).
Even for this sufficient condition, knowing the explicit form
of a potential does not help us much to know the stability
since it involves solving the nonlinear differential equation.
Given this situation, we take another strategy.
Namely, we consider instability conditions instead of stability
conditions.
It is known that if there exists an instanton in the system,
then it is quantum mechanically unstable.
Thus we look for instanton solutions. Specifically, we
look for a class of potentials which admit exact analytical
instanton solutions.
To the best of our knowledge, no one has found instantons
representing the decay of a tachyonic vacuum
(except for instantons with unconventional boundary
conditions~\cite{Hertog:2004rz,Hertog:2005hu}
which are not related to the vacuum decay).
This means no one has found a potential which is classically stable
but quantum mechanically unstable.
In this paper, extending the method developed
in \cite{Kanno:2011vm,Kanno:2012zf}, we look for
exact instantons for a class of
potentials which satisfies the BF stability bound.
The class of potentials contain two parameters; one
of them controls the value of the mass squared and
the other the global shape of the potential.
We compute everything analytically. In particular,
the bounce action is analytically computed and found
to be positive and finite.
Thus we find a class of tachyonic vacua which are
classically stable but quantum mechanically unstable.
Interestingly, our method automatically excludes the
existence of instanton solutions for a potential with
mass squared below the BF bound. Since the vacuum for such
a potential is already classically unstable, this result seems
reasonable, though we should not perhaps claim it as a general
theorem at the moment because we have not proved the complete
generality of our method yet.
In order to understand better the reason behind the
quantum mechanical instability, we calculate
various quantities as functions of the model parameters.
Comparing the algebraic criterion with our findings, we discuss
stability criteria for tachyonic vacua in AdS spacetime.
The paper is organized as follows.
In section II, we formulate instanton solutions in an asymptotically
AdS spacetime.
In section III, we shows that instantons exist only for the potential
with the mass squared above the BF bound. In section IV,
we find exact instanton solutions, which represent the instability of
the tachyonic state at or above the BF bound.
The final section is devoted to discussion and conclusion.
\section{Formalism}
We consider the $d$-dimensional Euclidean action for a scalar
field $\phi$ coupled with gravity:
\begin{eqnarray}
S_{E} =-\frac{1}{2\kappa^2}\int_{M} d^dx\sqrt{g}~R
- \frac{1}{\kappa^2}\int_{\partial M} d^{d-1}x \sqrt{h}~K
+\int_{M} d^dx \sqrt{g}\left[~
\frac{1}{2}g^{\mu\nu}\partial_\mu\phi\partial_\nu\phi
+V(\phi)
~\right]\,,
\label{basicaction}
\end{eqnarray}
where $\kappa^2=8\pi G$, $R$ is the Ricci scalar of the metric
$g_{\mu\nu}$, $h$ is the determinant of the induced metric on
the boundary, and $K$ is the trace part of the extrinsic curvature.
The second term is necessary to make the variational principle
consistent when the spacetime is non-compact,
called the Gibbons-Hawking boundary term~\cite{Gibbons:1976ue,Hawking:1995fd}.
Here we note that the critical case $m^2 =m_{\rm BF}^2$ needs
special care. In this case, one has two different theories
depending on the choice of the asymptotic boundary
condition~\cite{Klebanov:1999tb}. One may stick to
the original action~(\ref{basicaction}). But in this case
it is known that the theory is unstable~\cite{Hertog:2003xg}.
So to discuss the quantum instability, we consider
the theory with an additional boundary term,
\begin{eqnarray}
S_{\rm B}
= - \frac{1}{2}\int_{\partial M} d^{d-1}x
\sqrt{h} n^\mu \phi \partial_\mu \phi\,,
\label{extra}
\end{eqnarray}
where $n^\mu$ denotes a unit normal vector of the boundary $\partial M$.
It is known that adding this boundary term makes the system perturbatively
stable~\cite{Klebanov:1999tb}. In subsequent sections we
consider this theory when the mass squared saturates the BF bound.
Assuming $O(d)$-symmetry, we consider the metric of the form,
\begin{eqnarray}
ds^2=a(z)^2\left(dz^2 + d\Omega^2_{d-1}\right) \,,
\end{eqnarray}
and the scalar $\phi=\phi(z)$.
Under the $O(d)$-symmetry, the action reduces to
\begin{eqnarray}
S_E=v_{S^{d-1}}\left[-\frac{(d-1)(d-2)}{2\kappa^2}\int dz
\left(a^{d-4}\dot a^2+a^{d-2}\,\right)
+ \int dz\,a^{d-1}
\left(\frac{1}{2a}\dot\phi^2 + a\,V\right)\right]\,,
\label{action}
\end{eqnarray}
where the dot denotes a derivative with respect to
$z$ ($\dot{~}=d/dz$)
and $v_{S^{d-1}}$ is the volume of a unit
$(d-1)-$dimensional sphere.
The equations of motion are
\begin{eqnarray}
(d-1)(d-2)\left(\frac{\ddot a}{a}-1\right)
=\kappa^2\left[-(d-3)\dot\phi^2-4a^2V\right]\,,
\label{metric1}
\end{eqnarray}
and
\begin{eqnarray}
\ddot\phi+(d-2)\frac{\dot a}{a}\dot\phi-a^2\frac{dV}{d\phi}=0 \, .
\label{sc1}
\end{eqnarray}
The Hamiltonian constraint, which is an integral of
(\ref{metric1}) with a specific integration constant, is
\begin{eqnarray}
(d-1)(d-2)\left[\left(\frac{\dot a}{a}\right)^2-1\right]
=2\kappa^2\left(\frac{1}{2}\dot\phi^2-a^2V\right)\, .
\label{hc1}
\end{eqnarray}
We now construct an exact instanton solution in the presence
of gravity by extending the method developed
in \cite{Kanno:2011vm,Kanno:2012zf}.
Namely, instead of giving the form of the potential first, we
consider the condition on the form of the scale factor for the
existence of a regular instanton solution and look for
a function describing the scale factor that enables us to
derive the potential as a function of the scalar field analytically.
Since we are interested in the decay of an AdS vacuum,
instantons are required to be asymptotically AdS.
For a pure AdS spacetime with the curvature radius $\ell$,
we have $a=\ell/\sinh z$ ($0<z<\infty$) where $z\to0$
corresponds to spatial infinity and $z\to\infty$ to the origin.
Thus for an asymptotically AdS spacetime,
we set the scale factor in the form,
\begin{eqnarray}
a(z) = \frac{\ell}{\sinh z} ~f(\tanh z ) \ ,
\qquad\quad \lim_{z\rightarrow 0} f = 1 \ ,
\end{eqnarray}
where the function $f(\tanh z)$ is assumed to be regular at
$0<z<\infty$, and $\ell$ is the AdS curvature radius
in the asymptotic region. Since the function $f$ describes
deformation from AdS spacetime, we call $f$ the deformation
function.
The above form of the scale factor guarantees the asymptotic
AdS nature of the metric.
For convenience, we introduce the variable $x$ by $x=\tanh z$.
In terms of $x$, we have
\begin{eqnarray}
e^z=\sqrt{\frac{1+x}{1-x}}\,,\qquad
\frac{d}{dz}=(1-x^2)\frac{d}{dx}\,,
\qquad
\frac{d}{dx}=\cosh^2z\frac{d}{dz}\,,
\end{eqnarray}
and the scale factor is expressed as
\begin{eqnarray}
a=\ell~\frac{\sqrt{1-x^2}}{x}\,f(x)\,.
\end{eqnarray}
The range $0<z<\infty$ is mapped onto $0<x<1$.
Using Eqs.~(\ref{metric1}) and (\ref{hc1}), we can express
$\dot\phi^2$ and $V$ in terms of $\ddot a/a$ and $\dot a/a$, and
hence in terms of the function $f(x)$ and its derivatives.
The resulting expressions are
\begin{eqnarray}
\frac{\kappa^2}{2(d-2)} \left( \frac{d\phi}{dx} \right)^2
= \left( \frac{f'}{f} \right)^2 - \frac{1}{2} \frac{f''}{f}
- \frac{1}{x}\frac{f'}{f}\,,
\label{dphi}
\end{eqnarray}
and
\begin{eqnarray}
\frac{2\kappa^2 \ell^2}{d-2}\,V =
- \frac{x^2 (1-x^2)}{f^2} \left\{\frac{f''}{f}
+ (d-3)\left(\frac{f'}{f}\right)^2
\right\} + 2 \frac{x(x^2 + d-2)}{f^2} \frac{f'}{f}
- \frac{d-1}{f^2} \,,
\label{potential}
\end{eqnarray}
where the prime denotes an $x$-derivative (${}'=d/dx$).
Thus, if we specify the function $f(x)$ in these
equations, both of $d\phi/dx$ and $V$ are given as
a function of $x$. Integrating the expression for $d\phi/dx$
one obtains $\phi$ as a function of $x$. Then combining
this with the expression for $V$ as a function of $x$,
we obtain $V$ as a function of $\phi$.
In this way, we obtain an instanton solution
$\phi=\phi(x)$ for the potential $V=V(\phi)$.
In subsequent sections, we look for instanton solutions for
potentials that have tachyonic vacua.
Once the function $f$ is specified, the action
for the instanton can be easily computed.
Substituting the Hamiltonian constraint (\ref{hc1})
into the action (\ref{action}), we manipulate
\begin{eqnarray}
S_{\rm instanton}
& =& 2v_{S^{d-1}} \int_{0}^{\infty}dz
\left[ a^d V - \frac{(d-1)(d-2)}{2\kappa^2}\,a^{d-2} \right]
\cr
&=&\frac{(d-2)\ell^{d-2}}{\kappa^2}v_{S^{d-1}}
\int_{0}^{1}dx\,\frac{(1-x^2)^{\frac{d}{2}-1}}{x^d}f^{d-2}
\cr
&&\hspace{1cm}\times\left[
-x^2(1-x^2)\left\{\frac{f''}{f}+(d-3)\left(\frac{f'}{f}\right)^2
\right\}
+2x(x^2+d-2)\frac{f'}{f}
-\frac{d-1}{1-x^2}
\,\right]\,.
\label{actionwhc}
\end{eqnarray}
Note that we add the boundary term (\ref{extra}),
\begin{eqnarray}
S_B=\frac{v_{S^{d-1}}\ell^{d-2}}{2}
\left[\frac{1}{x^{d-2}}\phi\frac{d\phi}{dx}\right]_{x\to0}\,,
\label{BFbterm}
\end{eqnarray}
to the above in the case the mass squared saturates the BF bound.
For an asymptotically AdS instanton solution,
the above action (\ref{actionwhc}) diverges. However, for
instantons that contribute to the vacuum decay,
the difference between this action and the AdS action
$S_{\rm AdS}$ should be finite, where $S_{\rm AdS}$ is given by
\begin{eqnarray}
S_{\rm AdS}=\frac{(d-2)\ell^{d-2}}{\kappa^2}v_{S^{d-1}}
\int_{0}^{1}dx\,\frac{(1-x^2)^{\frac{d}{2}-1}}{x^d}
\left[-\frac{d-1}{1-x^2}\right]\,.
\end{eqnarray}
The decay rate is given by $\Gamma\sim e^{-B}$ with
\begin{eqnarray}
B &=& S_{\rm instanton} - S_{\rm AdS}
\cr
&=& \frac{(d-2)\ell^{d-2}}{\kappa^2}v_{S^{d-1}} \int^1_0 dx
\frac{(1-x^2)^{\frac{d}{2}-1}}{x^d}
\cr
&&\qquad\times\left[\frac{d-1}{1-x^2}(1-f^{d-2})
+ 2x(x^2 +d-2) f^{d-3}f' -x^2 (1-x^2) f^{d-2}
\left\{ \frac{f''}{f} + (d-3)\left(\frac{f'}{f}\right)^2
\right\}
\right]\,.
\label{Bdef}
\end{eqnarray}
In the case $m^2=m_{\rm BF}^2$, we add (\ref{BFbterm}) to the above.
The condition that the above integral be finite
constrains the behavior of the deformation function $f$
in the asymptotically AdS region.
Namely, the solution should approach the AdS spacetime
sufficiently fast to make the integral converge.
Analyzing the behavior of the integrand of Eq.~(\ref{Bdef})
at $x=0$, which we perform in the next section,
we find that the deformation function should satisfy
\begin{eqnarray}
1-f =O\left( x^{n} \right) \quad
\mbox{where}\quad
n=d-2\quad\mbox{or}\quad n\geq d-1 \, .
\label{condition}
\end{eqnarray}
\section{Existence of instantons satisfying the BF bound}
\label{sec:analysis}
Here we consider the condition for an instanton to exist
based on our method presented in the previous section.
Since we are interested in the role of the BF bound
in the quantum stability, we seek for instantons describing
the decay of a tachyonic state.
In this respect we note that instantons we look for resemble
the Linde-Lee-Weinberg instanton~\cite{Linde:1981zj,Lee:1985uv}
in the flat spacetime, in that there is no potential barrier.
Let us first perform the asymptotic analysis
of an instanton solution in the asymptotically AdS region.
We assume that the deformation function can be expanded
around $x\sim 0$ as
\begin{eqnarray}
f = 1 - b\, x^n + \cdots \ ,
\label{expansion}
\end{eqnarray}
where $b$ is a constant and $n>0$.
As long as we restrict ourselves to $O(d)$ symmetric
solutions, this assumption seems quite general.
For $f$ of the form (\ref{expansion}), Eq.~(\ref{dphi}) gives
\begin{eqnarray}
\frac{\kappa^2}{2(d-2)}\left( \frac{d\phi}{dx} \right)^2
= \frac{n(n+1)}{2}\, b\, x^{n-2} +\cdots\,.
\end{eqnarray}
Since the left hand side of the above equation
is positive, $b$ has to be positive.
Then it is can be solved as
\begin{eqnarray}
\phi = \frac{2}{\kappa}
\sqrt{\frac{(d-2)(n+1)}{n}\,b}\ x^{n/2} + \cdots\,.
\label{app:scalar}
\end{eqnarray}
On the other hand, Eq.~(\ref{potential}) gives the potential
\begin{eqnarray}
\kappa^2 \ell^2 V = -\frac{(d-1)(d-2)}{2}
+ \frac{n(n-2d+2)}{8} \kappa^2 \phi^2 + \cdots \ .
\end{eqnarray}
where we have used Eq.~(\ref{app:scalar}).
The mass of this system is read off from the coefficient of $\phi^2$,
\begin{eqnarray}
m^2 \ell^2 &=& \frac{n(n-2d+2)}{4}
\cr
&=& \frac{\left( n- d+1 \right)^2}{4} +m_{\rm BF}^2\ell^2 \ .
\label{mass1}
\end{eqnarray}
where $m^2_{\rm BF}$ is defined in Eq.~(\ref{bf}).
This tells us that the mass squared always satisfies the BF bound
and it is tachyonic for $n<2d-2$ and
saturates the BF bound when $n= d-1$.
Note that we can express $n$ in terms of $m^2$ by
inverting the above relation,
\begin{eqnarray}
\frac{n}{2} = \frac{d-1 \pm \sqrt{(d-1)^2 + 4m^2 \ell^2}}{2} \,.
\label{npm}
\end{eqnarray}
This agrees with the standard asymptotic behavior of a
scalar field with mass squared $m^2$ in asymptotically AdS spacetime.
For an instanton to mediate the vacuum decay, the
decay rate should be finite. This can be examined by
expanding the integrand of the bounce action $B$ given by
Eq.~(\ref{Bdef}) around $x=0$. The result is
\begin{eqnarray}
B = \frac{(d-2)\ell^{d-2}\,v_{S^{d-1}}}{\kappa^2} \int^1_0
\frac{dx}{x^d} \left[ (n-d+1)(n-d+2)\, b \,x^n + \cdots \right]
\ .
\end{eqnarray}
The decay rate is apparently finite for $n>d-1$, that is,
$m^2 > m_{\rm BF}^2$.
In the critical case of $n=d-1$, the coefficient
of $x^n$, which would lead to a logarithmic divergence, vanishes.
Hence, the action converges even in this critical case.
In addition, the boundary term (\ref{BFbterm}) is evaluated as
\begin{eqnarray}
S_B= \frac{(d-2)\ell^{d-2}\,v_{S^{d-1}}}{\kappa^2}\,
b\,(n+1)x^{n-d+1}|_{x\to0}\,.
\end{eqnarray}
Since the critical case corresponds to $n=d-1$, this is indeed
finite and contributes to the bounce action. Note that
the boundary term vanishes for $n>d-1$.
Interestingly there is yet another case for which $B$ is finite.
For $n=d-2$, one also finds the leading order coefficient
vanishes. By analyzing the next leading order term, one further
finds it vanishes as well. Hence $B$ is finite in this case, too.
To summarize the decay rate is finite for $n=d-2$ and $n\geq d-1$.
Note that the above analysis is quite general in the sense
that it involves only the asymptotic behavior in the asymptotically
AdS region and we have not imposed any condition on the mass
of the scalar field a priori.
We have only assumed the existence of an instanton with
the asymptotic behavior~(\ref{expansion}) and imposed
the positivity of $(d\phi/dx)^2$.
Therefore, it should be applicable to any Euclidean solutions.
The important conclusion at this stage is that
if there exists an instanton that satisfies
the asymptotic behavior~(\ref{expansion}), it must have
its mass squared at or above the BF bound.
\section{Exact Instanton Solutions}
In the previous section, we have shown that instantons exist only
for potentials satisfying the BF bound.
In this section, we explicitly find a series of
exact instanton solutions.
We consider a solution given by
\begin{eqnarray}
f(x) = \frac{c}{c+x^n}
\qquad(c>0)\,.
\end{eqnarray}
where $n$ is an arbitrary real number satisfying $n=d-2$
or $n\geq d-1$.
We notice that the asymptotic form,
\begin{eqnarray}
f(x) \sim 1 - \frac{1}{c} x^n + \cdots
\end{eqnarray}
matches the expansion (\ref{expansion}).
The parameter $n$ determines the mass squared at the metastable
vacuum as seen from the analysis in the previous section,
while $c$ determines the global shape of the potential.
Inserting the above into (\ref{dphi}), we find
\begin{eqnarray}
\kappa \frac{d\phi}{dx}
= \pm\sqrt{\frac{(d-2)n(n+1) x^{n-2} }{c +x^n}} \ .
\end{eqnarray}
This can be readily integrated to give
\begin{eqnarray}
\kappa\phi =
\pm\sqrt{\frac{4(d-2)(n+1)}{n}} \sinh^{-1} \frac{x^{n/2}}{\sqrt{c}} \,,
\qquad
x^{n/2} =\sqrt{c}\sinh\left(\kappa\sqrt{\frac{n}{4(d-2)(n+1)}}|\phi|
\right)\,.
\label{xphi}
\end{eqnarray}
Since $0<x<1$, the range of the scalar field is restricted to
$0<\phi<\sqrt{\frac{4(d-2)(n+1)}{n\kappa^2}}\sinh^{-1}1/\sqrt{c}$.
The instanton covers this range of the potential.
From (\ref{potential}), the potential is given by
\begin{eqnarray}
\frac{2\kappa^2 \ell^2}{d-2} V &=& -(d-1) +\frac{(n+1)(n-2d+2)}{c}~x^n
- \frac{n(n+1)}{c}~x^{n+2}
\cr &&\qquad
- \frac{(n+1)\{(d-2)n+d-1\}}{c^2}~x^{2n}
+ \frac{n\{(d-2)n-1\}}{c^2}~x^{2n+2}\ .
\end{eqnarray}
Inserting (\ref{xphi}) into the above, we obtain
the potential as a function of $\phi$.
\begin{eqnarray}
\frac{2\kappa^2 \ell^2}{d-2} V &=& -(d-1) + (n+1)(n-2d+2) \sinh^2
\left(\kappa\sqrt{\frac{n}{4(d-2)(n+1)}}\,|\phi|\right)
\cr
&&
- n(n+1)~c^{\frac{2}{n}} \sinh^{2+\frac{4}{n}}
\left(\kappa\sqrt{\frac{n}{4(d-2)(n+1)}}\,|\phi|\right)
\cr
&&
- (n+1)\{(d-2)n+d-1\} \sinh^4
\left(\kappa\sqrt{\frac{n}{4(d-2)(n+1)}}\,|\phi|\right)
\cr
&&
+ n\{(d-2)n-1\}~c^{\frac{2}{n}} \sinh^{4+\frac{4}{n}}
\left(\kappa\sqrt{\frac{n}{4(d-2)(n+1)}}\,|\phi|\right) \ .
\end{eqnarray}
Note that there is a branch cut at $\phi=0$ except for $n= 1$, $2$ and $4$.
However, the potential is sufficiently smooth for the other values
of $n$ in the sense that its second derivative is finite and
continuous at $\phi=0$.
As an example, the potential in the case of $d=4$, $n=4$ and $c=1$
is shown in Fig.~\ref{fig:1}.
\begin{center}
\begin{figure}[t]
\includegraphics[width=95mm]{potential.eps}
\caption{
The potential as a function of $\phi$ is depicted for $c=1$ and $n=4$.
The green line is the range the instanton runs. The dashed
line shows the potential $V=-3/(\kappa\ell)^2-m^2\phi^2/2$ for
comparison. The horizontal axis is in units of $\kappa\phi$
and the vertical axis in units of $1/(\kappa\ell)^2$.
}
\label{fig:1}
\end{figure}
\end{center}
If we expand the above potential around $\phi=0$, we find
\begin{eqnarray}
&&V= -\frac{(d-1)(d-2)}{2\kappa^2\ell^2}
+\frac{m^2}{2}\phi^2 + \cdots\,;
\cr
\cr
&&\quad
m^2 = \frac{n(n-2d+2)}{4\,\ell^2}
=\frac{\left( n- d+1 \right)^2}{4\,\ell^2} +m_{\rm BF}^2\,,
\quad
m_{\rm BF}^2=-\frac{(d-1)^2}{4\,\ell^2}\,.
\end{eqnarray}
We see that the mass squared agrees with
the general analysis in Eq.~(\ref{mass1}).
Thus, our instanton solutions indeed satisfy the BF bound,
implying that tachyonic vacua which are perturbatively stable
can be quantum mechanically unstable.
To further support this picture, we have calculated the energy at
the nucleation surface and found it is zero for $n\geq d-1$,
which is consistent with the interpretation that the analytic
continuation of the instanton describes the state after the decay
of the AdS vacuum.
In the critical case $m^2 =m_{\rm BF}^2$, the contribution of
the boundary term (\ref{extra}) is essential to make the energy zero.
Without it the energy turns out to be negative. But this is consistent
with the fact that the system is unstable
unless the boundary term is added.
In the case $n=d-2$, the energy is divergent to minus infinity
in spite of the fact that the bounce action $B$ is finite.
Mathematically this is because this solution corresponds to
the minus sign of Eq.~(\ref{npm}) for $m^2\ell^2=-d(d-2)/4$,
hence picks up the singular behavior in the asymptotically AdS region.
On the other hand, if we adopt the boundary term (\ref{extra}),
the energy vanishes. In this case, the bounce action diverges
to plus infinity, resulting in the vanishing decay rate.
This probably means that the instanton solution in this case
is not relevant for the vacuum decay.
Study of the exact meaning of this solution is left as a future issue.
As shown in the previous section, the bounce action
for the above solution is finite for $n=d-2$ and $n\geq d-1$.
To confirm this and to see how the bounce action $B$ depends
on the shape of the potential, we compute it below.
Although we can compute $B$ for any spacetime dimension numerically
if necessary (even analytically in the case of even spacetime dimensions),
we focus on the case of four dimensions ($d=4$).
\subsection{The bounce action in four dimensions}
We focus on four dimensions, $d=4$.
As a simple example that satisfies the condition~(\ref{condition}),
we first consider the case $n=4$,
\begin{eqnarray}
f(x) = \frac{c}{c+x^4} \qquad(c>0)\,.
\end{eqnarray}
For this, we obtain a completely analytical potential,
\begin{eqnarray}
\kappa^2 \ell^2 V = -3 - 10 \sinh^2 \frac{\kappa \phi}{\sqrt{10}}
- 20 \sqrt{c} \sinh^3 \frac{\kappa \phi}{\sqrt{10}}
- 55 \sinh^4 \frac{\kappa \phi}{\sqrt{10}}
+ 28 \sqrt{c} \sinh^5 \frac{\kappa \phi}{\sqrt{10}} \ .
\label{typical}
\end{eqnarray}
This potential is shown in Fig.~\ref{fig:1} for the parameter $c=1$.
The bounce action~(\ref{Bdef}) can be also evaluated analytically as
\begin{eqnarray}
B=B_4&\equiv& \frac{\pi^2 \ell^2}{8\kappa^2}
\left[ \frac{8}{1 + c}
- \frac{10 \sqrt{2}}{c^{1/4}}
\tan^{-1} \left(1 - \frac{\sqrt{2}}{c^{1/4}}\right)
+ \frac{10 \sqrt{2}}{c^{1/4}}
\tan^{-1} \left(1 + \frac{\sqrt{2}}{c^{1/4}}\right)
+ \frac{5 \sqrt{2}}{c^{1/4}}
\log \frac{1 - \sqrt{2} c^{1/4} + \sqrt{c}}
{1 + \sqrt{2} c^{1/4} + \sqrt{c}} \right]
\cr
\cr
&=&\frac{\ell^2}{\kappa^2}\times
\left\{
\begin{array}{ll}
\displaystyle\frac{5\sqrt{2}\,\pi^3}{4\,c^{1/4}}
-4\pi^2+O(c^2)
\quad &\mbox{for}\,\ c\ll1\,,
\\
~
\\
\displaystyle\frac{8\pi^2}{3\,c}
\left(1-\frac{9}{14c}+O(c^{-2})\right)
\quad &\mbox{for}\,\ c\gg1\,,
\end{array}\right.
\label{B:n=4}
\end{eqnarray}
where we used $v_{S^{3}} = 2\pi^2$.
\begin{figure}[ht]
\begin{center}
\begin{minipage}{8.5cm}
\begin{center}
\vspace{-7mm}
\hspace{-1.5cm}
\includegraphics[width=95mm]{bounce2.eps}\vspace{0cm}
\caption{The bounce action as a function of $c$ is depicted for
$n=4$. The decay rate is larger for large $c$.}
\label{fig:2}
\end{center}
\end{minipage}
\hspace{1mm}
\begin{minipage}{8.5cm}
\begin{center}
\hspace{-1.5cm}
\includegraphics[width=95mm]{potential3.eps}\vspace{0cm}
\caption{The potential in the case of $n=4$
as a function of $\phi$ for several values of $c$
(drawn with the dashed lines). The units are the
same as in Fig.~1.
The real lines shows the region that instanton run.}
\label{fig:3}
\end{center}
\end{minipage}
\end{center}
\end{figure}
Let us now turn to general cases of arbitrary $n$,
\begin{eqnarray}
f(x) = \frac{c}{c+x^n} \qquad(c>0)\,.
\end{eqnarray}
For this instanton, for $n=2$ or $n > 3$,
we can calculate the bounce action as
\begin{eqnarray}
B_n &=&\frac{4\pi^2 \ell^2 }{\kappa^2}
\left[-1-\frac{c}{(1+c)n}
+\frac{1+n}{n}
F\left( 1,\,- \frac{1}{n},\,\frac{n-1}{n} ;\, -\frac{1}{c}\right)
\right]
\cr
\cr
&=& \frac{\ell^2}{\kappa^2}\times
\left\{
\begin{array}{ll}
\displaystyle
\frac{4(n+1)\pi^3}{n^2\sin(\pi/n)\,c^{1/n}}-4\pi^2+O(c)
\quad &\mbox{for}\,\ c\ll1\,,
\\
~\\
\displaystyle
\frac{8\pi^2}{n-1}\frac{1}{c}
\left(1+O(c^{-1})\right)
\quad &\mbox{for}\,\ c\gg1\,,
\end{array}\right.
\label{B:n}
\end{eqnarray}
where $F(\alpha,\,\beta,\,\gamma\,;\,u)$ is a hypergeometric function.
We see the bounce action decreases as $c$ or $n$ increases.
In the case $n=3$ which corresponds to the BF bound $m^2 = m_{\rm BF}^2$,
we add the boundary term (\ref{BFbterm}) to obtain
\begin{eqnarray}
B_3 &=&\frac{4\pi^2 \ell^2 }{\kappa^2}
\left[-1-\frac{c}{(1+c)n}
+\frac{1+n}{n}
F\left( 1,\,- \frac{1}{n},\,\frac{n-1}{n} ;\, -\frac{1}{c}\right)
- \frac{1}{c}
\right] \Biggl|_{n=3} + S_{\rm B} \nonumber \\
&=& \frac{4\pi^2 \ell^2 }{\kappa^2}
\left[-1-\frac{c}{(1+c)3}
+\frac{1+3}{3}
F\left( 1,\,- \frac{1}{3},\,\frac{2}{3} ;\, -\frac{1}{c}\right)
+\frac{3}{c} \right]
\nonumber\\
&=& \frac{4\pi^2\ell^2}{\kappa^2}\times
\left\{
\begin{array}{ll}
\displaystyle
\frac{3}{c}+\frac{8\sqrt{3}\pi}{27\,c^{1/3}}-1+O(c)
\quad &\mbox{for}\,\ c\ll1\,,
\\
~\\
\displaystyle
\frac{4}{c}
\left(1+O(c^{-1})\right)
\quad &\mbox{for}\,\ c\gg1\,,
\end{array}\right.
\end{eqnarray}
where we have used $S_B = 16 \pi^2 \ell^2 /(\kappa^2 c)$.
Notice that we have an extra contribution $-1/c$ from the bulk
integral when $n=3$. Because of this extra contribution the bounce
action would become negative if the boundary term were not added.
Again, this seemingly pathological result is consistent with the fact
that the system would be already perturbatively unstable,
implying that the instability would develop classically without
any barriers, if the boundary term were not added.
In Fig.~\ref{fig:2}, we plot $B$ as a function of $c$.
As expected from the above equation the decay rate
is larger large for larger $c$.
This means increasing $c$ makes the tunneling process easier,
or renders the system more unstable.
Since there is no dependence of $c$ in the mass,
changing $c$ implies the changing the non-linear terms of
the potential. To see the actual shape of the potential,
we plot it in Fig.~\ref{fig:3}.
Apparently, the potential gets steeper near the origin
$\phi=0$ and the region of the instanton runs shrinks
as we increase $c$. Thus, we see that the larger $c$
makes the potential more unstable. This explains the
tendency that the decay rate is larger for larger $c$.
In Fig.~\ref{fig:4}, we show the bounce action $B$ as a function of $n$.
It clearly shows that $B$ decreases as $n$ increases.
It should be noted that for $n>6$ the mass squared is positive
and hence there appears a potential barrier.
This fact seems counter-intuitive because the tunneling becomes
easier to occur as the barrier grows higher.
However, changing $n$ also changes the nonlinear part of the potential.
So we need to see how the nonlinear part of the potential
depends on $n$. In Fig.~\ref{fig:5}, potentials for several values of
$n$ are shown. We see that the potential becomes deeper as $n$
increases. Thus in spite of the increase of the mass squared at
the origin, the potential at larger values of $\phi$
becomes deeper for larger $n$ and the system becomes more unstable.
\begin{figure}[ht]
\begin{center}
\begin{minipage}{8.5cm}
\begin{center}
\vspace{-7mm}
\hspace{-1.5cm}
\includegraphics[width=95mm]{bounce.eps}\vspace{0cm}
\caption{The bounce action as a function of $n$ is
depicted for $c=1$. This shows that tunneling is easier for
large $n$.
}
\label{fig:4}
\end{center}
\end{minipage}
\hspace{1mm}
\begin{minipage}{8.5cm}
\begin{center}
\hspace{-1.5cm}
\includegraphics[width=95mm]{potential2.eps}\vspace{0cm}
\caption{The potential for $c=1$ as a function of $\phi$ for
several different values of $n$ (drawn with the dashed lines).
The units are the same as in Fig.~1.
The real lines shows the region that instanton run.}
\label{fig:5}
\end{center}
\end{minipage}
\end{center}
\end{figure}
\subsection{Comparison with stable potentials}
We have obtained a series of quantum mechanically unstable potentials.
We can also generate a stable potentials by inspection
using an algebraic condition (\ref{super}).
For example, if we take
\begin{eqnarray}
P(\phi) = 1+ \frac{1}{2}\phi^2 + \frac{1}{300}\phi^3
- \frac{1}{4000}\phi^4+ \frac{1}{90000}\phi^6 \ ,
\end{eqnarray}
we obtain the potential,
\begin{eqnarray}
V(\phi) &=& -3 - \phi^2 +\frac{3}{50} \phi^3
- \frac{7507}{10000}\phi^4 -\frac{753}{25000}\phi^5
+\frac{163}{250000}\phi^6 \nonumber\\
&& \qquad + \frac{23}{1000000}\phi^7 -\frac{2703}{80000000}\phi^8
-\frac{1}{1500000}\phi^9 + \frac{23}{900000000}\phi^{10}
- \frac{1}{2700000000}\phi^{12} \ .
\end{eqnarray}
In Fig.~\ref{fig:6}, we compared the above potential with
the potential (\ref{typical}) with $c=1$.
Taking a close look at the potential,
we find the unstable potential is slightly below the stable one.
This small difference is crucially important in
the determination of the stability.
As $c$ decreases, the difference between the two potentials
near the origin becomes even smaller. Together with the fact
that $B$ diverges as $c^{-1/4}$ as seen in the second line of
Eq.~(\ref{B:n=4}) or (\ref{B:n}), this suggests that our instanton
solution verges towards the instability boundary for $c\ll1$.
\begin{center}
\begin{figure}
\includegraphics[width=115mm]{sp.eps}
\caption{Comparison of stable and unstable potentials.
The red curve shows a stable potential, the blue dashed curve
shows an unstable potential which behaves almost identically with
the stable potential near the origin.
The thick blue line shows the region that the instanton covers.}
\label{fig:6}
\end{figure}
\end{center}
\section{Conclusion}
It is known that in the asymptotically AdS spacetime,
a tachyonic vacuum is perturbative stable if the mass squared is at
or above the BF bound. But this does not imply the global stability.
To discuss it one may resort to a different
criterion~\cite{Boucher:1984yx,Townsend:1984iu}. Namely,
if the potential is written as
$$
V(\phi) =
(d-2) \left(\frac{dP(\phi)}{d\phi}\right)^2 - (d-1) P(\phi)^2\,,
$$
with some function $P(\phi)$, the vacuum is stable.
This condition is a sufficient condition for the stability.
The above equation may be viewed as a nonlinear differential equation
for $P$ given the potential $V$. Unfortunately, however,
it is formidable to see if there is a real solution
for $P$ or not for a generic potential $V$.
In this paper, we took another route, that is, we
looked at a sufficient condition for the instability
instead of stability. In other words, we looked for
instantons with finite decay rate. If we find such an instanton,
it proves the instability of the vacuum.
We indeed found a series of exact instanton solutions
for a class of potentials which satisfy the BF bound.
These instantons have finite bounce action, hence describe
the decay of (tachyonic) vacua.
In other words, the class of potentials we found must not
be expressed in the form of the above equation.
In passing we also showed that there is no instanton solution
if the mass squared of the potential is below the BF bound,
under a physically reasonable assumption for the asymptotic
behavior of the instanton in the asymptotic AdS region.
In conclusion, we found exact instanton solutions which
destabilize tachyonic vacua at or above the BF bound.
We also noted that our solution can be extended to vacua with barriers
if we allow a slight violation of analyticity of the potential,
in the sense that the potential is kept smooth up to the second
derivative (ie it is $C^2$).
It is interesting to explore general exact CDL instantons using
our method by relaxing the requirement of exactness if necessary.
It is also intriguing to explore implication of our instantons
to the AdS/CFT correspondence.
\acknowledgements
We would like to thank Akihiro Ishibashi for useful discussions.
This work is supported in part by the JSPS
Grants-in-Aid for Scientific Research (C) No.~22540274 and (A) No.22244030,
the Grant-in-Aid for Creative Scientific Research No.~19GS0219,
the Grant-in-Aid for Scientific Research on Innovative Area No.21111006.
SK is supported in part by grant PHY-0855447 from the National Science
Foundation. SK would like to thank Yukawa Institute for
Theoretical Physics (YITP) members at Kyoto University for warm
hospitality. A part of this work was done while SK was visiting YITP.
|
1,314,259,995,054 | arxiv | \section{Introduction}
Although convolutional neural networks (ConvNets) have been successfully applied to solve non-trivial image processing problems since the 1990s~\cite{lecun1989backpropagation,lecun2012efficient}, their adoption as a de facto standard for image classification~\cite{russakovsky2015imagenet} and segmentation~\cite{long2015fully} is due largely to recent breakthroughs in network architecture. Beginning with AlexNet in 2012~\cite{krizhevsky2012imagenet}, the annual ImageNet classification challenge (ILSVRC) has been dominated by progressively deeper networks with smaller kernels~\cite{szegedy2015going,simonyan2014very}. Recent solutions to the issues of vanishing and exploding gradients~\cite{glorot2010understanding} have allowed these networks to extend even deeper, with the ILSVRC15 winner (ResNet~\cite{he2016deep}) being 8-fold deeper than VGG.
It is easy to see why the ``deeper is better" trend has led to better performing ConvNets. Constructing even a modest 7 x 7 receptive field with stacked $k=3$ kernels requires $45\%$ fewer parameters than a single kernel of size $k=7$. Intuitively it also captures a richer set of features due to additional non-linearity. Recent studies have begun to formalize the expressive power of deep versus shallow networks, finding that classification boundaries acquire local curvature and expressivity as an exponential function of network depth but not breadth~\cite{poole2016exponential}. The only obvious trade-off to this performance is the extra memory necessary to store the intermediate activations in deeper networks.
Motivated by the success of these models in image processing tasks, researchers have begun to investigate ConvNet applications in the video processing domain. Example applications include video classification~\cite{karpathy2014large}, segmentation~\cite{couprie2013indoor} and de-noising~\cite{shi2016real}. An important observation that has emerged from these studies is the importance of 3D convolutional primitives for modelling joint spatio-temporal features; the na\"{\i}ve application of traditional 2D ConvNets frame-by-frame does not capture motion continuity or other rich temporal correlations~\cite{ledig2016photo,tran2015learning}. It is thus unsurprising that simple 3D ConvNets have yielded state-of-the-art performance on video classification benchmarks~\cite{tran2015learning} and volumetric image segmentation, e.g. tracing neurons between electron microscopy samples~\cite{lee2015recursive}.
Given the early success and conceptual simplicity of 3D ConvNets, it is interesting to note that many popular deep learning libraries (e.g. Caffe~\cite{jia2014caffe}) do not provide native support. One simple explanation is that these libraries are optimized for execution on GPUs, and higher-order convolutions require prohibitively large volumes of data with respect to the 16 GB ceiling of today's most advanced GPU hardware. These limitations are clear in previous studies, which either (a) limit the network size~\cite{tran2015learning}, (b) down-sample images to lower resolution~\cite{ji20133d}, or (c) include 3D primitives for only a subset of network layers~\cite{lee2015recursive}.
There are many potential options for circumventing the issue of ConvNet memory usage. The first is to split the network across multiple GPUs, which requires the careful coordination of activation and gradient flow~\cite{dean2012large}. Even in the case of the most successful distributed frameworks for ConvNets~\cite{abadi2016tensorflow}, GPU memory management is largely unresolved. The TensorFlow authors propose two partial solutions warranting further investigation: (a) re-computing versus storing large tensors; and (b) transferring long-lived tensors from GPU to host CPU memory. Instead, we propose an alternative to horizontal scalability for overcoming GPU memory constraints -- a fast implementation of $N$-dimension convolution optimized for multicore CPU systems, which have access to practically unbounded memory on a single node.
\section{Prior Art}
Algorithms for fast convolution have existed in signal processing literature since the 1980s~\cite{winograd1980arithmetic}. The general recipe is to transform both data and kernel into a new space, where expensive sliding window-style convolutions reduce to cheaper element-wise products. The first examples of this approach in ConvNet literature involved Fast Fourier Transforms (FFTs) exploiting the convolution theorem~\cite{mathieu2013fast,vasilache2014fast}. More recently, Lavin and Gray have pioneered the use of the more general class of Winograd-style algorithms~\cite{lavin2016fast,winograd1980arithmetic}. Their implementation and its cuDNN derivatives~\cite{chetlur2014cudnn} have produced state-of-the-art GPU performance on deep networks of small kernels. In this Section we provide a brief overview of the theory underlying this approach, focusing on the aspects that are important for exploiting the architecture of multicore CPUs.
\subsection{Fast Vector Convolution}
\label{ss:vector}
Consider the 1-dimension convolution $\mathbf{s} = \mathbf{g} * \mathbf{d}$, where the kernel and data vectors are of length $G$ and $D$. This problem can be rephrased as one of polynomial multiplication by introducing the associated polynomials $d(x)$, $g(x)$ and $s(x) = g(x)d(x)$, where the coefficients $s_i = \sum_{k}g_{i - k}d_k$ of $x^{i}$ are the solution to the desired convolution. This computation can be distributed across a set of efficient local computations by considering the Chinese Remainder Theorem (CRT)~\cite{ding1996chinese}, as summarized in Theorem 1. By observing that $s(x) = \left[g(x)d(x)\right] \,\mathrm{mod}\, m(x)$ for any polynomial $m(x)$ of sufficiently high degree, we can exploit Theorem 1 to efficiently calculate $s(x)$, as shown in Algorithm 1, which can be conveniently rephrased in terms matrix-vector products:
\begin{equation}
\label{eq:winograd_1d}
\mathbf{s} = \mathbf{A} \left[ \,(\mathbf{Cg}) \odot (\mathbf{Bd}) \, \right],
\end{equation}
where $\mathbf{C}$, $\mathbf{B}$ and $\mathbf{A}$ are introduced as the kernel, data and inverse transforms respectively. With respect to Algorithm 1, Step (1) is implemented by the kernel and data transforms, Step (2) by their transformed element-wise product and Step (3) by the final inverse transform.
\begin{theorem}[CRT for Polynomials]
\ \\Let $m(x)=\Pi_{k=1}^{r} m^{(k)}(x)$, where $m^{(k)}(x)$ are pairwise coprime. If $b^{(1)}(x),...,b^{(r)}(x)$ are a set of polynomials then there must exist a unique polynomial $s(x)$ which satisfies the set of congruences:
\begin{align*}
s(x) &\equiv b^{(1)}(x)\,\mathrm{mod}\,m^{(1)}(x) \\
s(x) &\equiv b^{(2)}(x)\,\mathrm{mod}\,m^{(2)}(x) \\
\vdots \\
s(x) &\equiv b^{(r)}(x)\,\mathrm{mod}\,m^{(r)}(x),
\end{align*}
provided the degree of m(x) is not less than that of s(x).
\end{theorem}
\begin{algorithm}[h!]
\caption{Fast Vector Convolution}
\label{alg:example}
\begin{algorithmic}
\STATE {\bfseries Input:} $g(x)$, $d(x)$, $m(x)$
\FOR{$k=1$ {\bfseries to} $r$}
\STATE (1) Compute residual polynomials for $g(x)$ and $d(x)$:
\begin{align*}
g^{(k)}(x) \equiv g(x)\,\mathrm{mod}\,m^{(k)}(x)\\
d^{(k)}(x) \equiv d(x)\,\mathrm{mod}\,m^{(k)}(x)
\end{align*}
\STATE (2) Compute residual polynomial multiplications:
\begin{equation*}
s^{(k)}(x) = \left[g^{(k)}(x) d^{(k)}(x)\right]\,\mathrm{mod}\,m^{(k)}(x)
\end{equation*}
\ENDFOR
\STATE (3) Reduce partial convolutions to solve $s(x)$:
\begin{equation*}
s(x) = \sum_{k=1}^{r} s^{(k)}(x)a^{(k)}(x)
\end{equation*}
\end{algorithmic}
\end{algorithm}
\subsection{Minimal Winograd Algorithms}
\label{ss:minimal}
In the above formulation (\ref{eq:winograd_1d}), the matrices $\mathbf{C}$ and $\mathbf{B}$ are the remainders of the polynomial divisions $g(x)$ and $d(x)$ by $m^{(k)}(x)$ respectively. The derivation of $\mathbf{A}$ is more involved and is omitted for brevity. Importantly, the only parameter required to synthesize these matrices (in addition to the kernel $g(x)$ and data $d(x)$) is the polynomial $m(x)$.
Traditionally, the selection of $m(x)$ has been subject to many constraints. First, it should be chosen such that the transform matrices contain only degree-1 (scalar) values. Lavin has published code that automates this procedure using the Cook-Toom algorithm to produce transformed kernels and data both of length $D$~\cite{lavin_git}. For an unpadded convolution $\mathbf{s} = \mathbf{g} * \mathbf{d}$ of length $S = D - G + 1$ and ignoring the cost of applying the transforms, this fast algorithm therefore requires $SG/D$ fewer computations to calculate than the standard sliding-window approach. Inappropriate selection of $m(x)$ would yield matrices of polynomials (degree $> 1$) that require considerably more scalar multiplications to compute.
In reality, the transformations themselves require expensive matrix multiplications that can outweigh the above saving. Accordingly, existing implementations of fast convolution aim to synthesize matrices enriched for ``simple" (e.g. integer) values. There are two motivations for this. First, it improves numeric stability which can have an impact on double-precision convolutions~\cite{lavin2016fast}. More importantly, it supports the hand-crafting of minimal algorithms. These algorithms reduce the cost of applying transform matrices by identifying and eliminating redundant sub-expressions. A famous instance of this approach was documented by Winograd~\cite{winograd1980arithmetic}. Consider the following matrices:
\begin{align*}
\mathbf{A} &= \begin{bmatrix}
1 & 1 & 1 & 0 \\
0 & 1 & -1 & -1
\end{bmatrix} \\
\mathbf{B} &= \begin{bmatrix}
1 & 0 & -1 & 0 \\
0 & 1 & 1 & 0 \\
0 & -1 & 1 & 0 \\
0 & 1 & 0 & -1
\end{bmatrix} \quad
\mathbf{C} = \begin{bmatrix}
1 & 0 & 0 \\
\frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\[0.3em]
\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} \\[0.3em]
0 & 0 & 1
\end{bmatrix}.
\end{align*}
By substituting these matrices into (\ref{eq:winograd_1d}) and factoring out redundant computations, we arrive at the following minimal algorithm for vector convolution:
\begin{equation*}
\mathbf{d} * \mathbf{g} = \begin{bmatrix}
m_1 + m_2 + m_3 \\[0.3em]
m_2 - m_3 - m_4
\end{bmatrix},
\end{equation*}
where:
\begin{align*}
m_1 = (d_0 - d_2)g_0, \quad m_2 &= (d_1 + d_2)\frac{g_0 + g_1 + g_2}{2}, \\
m_4 = (d_1 - d_3)g_2, \quad m_3 &= (d_2 - d_1)\frac{g_0 - g_1 + g_2}{2}.
\end{align*}
This is a so-called $F(S,G)$ algorithm for vector convolution, here for $S = 2$ and $G = 3$. Importantly, this algorithm only works for fixed length kernel and data vectors (here $D = 4$). Generating $F(S, G)$ algorithms for different combinations requires both (a) searching over the space of possible $m(x)$ polynomials as input to Lavin's or similar code~\cite{lavin_git}, and (b) reducing the matrix multiplications to a minimal set of addition, multiplication and shifting operations. To our knowledge there are no automated solutions to either step and thus only a small set of hand-crafted Winograd-style algorithms (e.g. $F(2,3)$, $F(3,4)$ and $F(2,5)$) have been released as fast CPU~\cite{nnpack} or GPU primitives~\cite{chetlur2014cudnn}.
\section{Deep Tensor Convolution}
Below we present an alternative approach to fast convolution that removes the need to hand-craft minimal algorithms. This new approach is better suited to video and volumetric image processing for two main reasons. First, the number of terms involved in a closed-form solution for 3 and higher-dimensional convolutions makes Winograd-style refactoring impractical. Second, by removing numeric simplicity as a constraint we are instead able to synthesize transforms optimized to CPU architectural constraints, e.g. data that are integer multiples of the AVX register width. This is made possible by the relaxed memory constraints of CPUs and allows us to close the previous CPU-GPU performance gap by a full order-of-magnitude.
We first define $N$-dimensional convolution and describe how existing fast algorithms can be extended to this general case. Instead of crafting a minimal algorithm, we show how relaxed memory constraints and efficient sparse linear algebra of CPU systems can be leveraged to amortize transform costs. Later we show how architecture-aware transform synthesis can lead to further acceleration.
\subsection{Convolution in $N$-Dimensions}
Mathematically, the standard convolutional layer used in 2D ConvNets extends trivially to higher-dimensional tensors. Consider a network where for each layer $i$, kernel $j$ and channel $m
, the kernel weights $\boldsymbol{\mathcal{G}}^{(i,j,m)} = (g\,_{p,q,r})$ and resulting feature map $\boldsymbol{\mathcal{D}}^{(i,j)} = (d\,_{x,y,z})$ are both 3D tensors. This calculation can be expressed element-wise as:
\begin{equation}
\label{eq:3d}
d\,^{(i+1,j)}_{x,y,z} = f\left( b^{(i,j)} + \sum_m \sum_{p, q, r} g\,^{(i,j,m)}_{p,q,r} d\,^{(i,\, j)}_{x+p,\, y+q,\, z+r} \right),
\end{equation}
\noindent{where $ b^{(i,j)}$ is the bias term and $f$ is a ReLU or other non-linear activation function. This extends to higher dimensions by looping over additional subscripts on $g$ and $d$.}
The dimensionality of feature maps is clearly preserved in (\ref{eq:3d}), e.g. a video at the input produces a video at the output. The triple $(p, q, r)$-loop ranges from 0 to the layer-$i$ kernel size to perform sliding-window convolution, and the $m$-loop is a reduction over the previous layer's output channels. This differs from previous studies where the temporal axis is encoded as network channels and flattened after the first layer~\cite{karpathy2014large,simonyan2014two}, producing a single 2D image or class label at the output. These methods have been shown to produce less accurate results on a broad range of video processing tasks when compared to true 3D ConvNets~\cite{tran2015learning}.
It is also evident from (\ref{eq:3d}) why higher-dimensional ConvNets suffer from issues of impractical memory consumption. Each layer of an $N$-dimensional network requires $\boldsymbol{\mathcal{G}}$ and $\boldsymbol{\mathcal{D}}$ to be stored as $N+2$ and $N+1$--dimensional tensors, owing to their operation over multiple kernels and channels. We believe that this multiplicative effect has likely stalled the adoption of the deeper network architectures that dominate image processing tasks, with recent studies instead compromising on network expressiveness to fit within the 16 GB memory constraints of today's top-end GPUs~\cite{ji20133d,lee2015recursive,tran2015learning}.
\subsection{Accelerating Tensor Convolution}
Sidestepping memory constraints by shifting from GPU to CPU hardware is conceptually trivial, as most popular ConvNet frameworks support execution on both CPU and GPU environments. However, the issue preventing the widespread adoption of CPU implementations is not a lack of software support but the large perceived gap between CPU and GPU performance. This is reminiscent of a large ongoing CPU-vs-GPU debate, with various studies claiming that GPUs provide anywhere from 100-to-1000x speed-up across broad problem domains~\cite{lee2010debunking}. A recent review has demonstrated a similar performance gap in the order of 50x across the most popular ConvNet frameworks~\cite{shi2016benchmarking}. Even if distributed GPU solutions like TensorFlow require tensors to be re-computed or swapped between GPU and host CPU memory~\cite{abadi2016tensorflow}, this overhead is easy to justify if the alternative is a 50-fold increase in single-node execution time.
Here we describe how fast algorithms for convolution can be extended to the general case of $N$-dimensional tensors, where the theoretical speed-up is a substantial $(SG/D)^N$. Although recent studies have begun to explore extensions of FFT-based convolution to 3-dimensions~\cite{zlateski2016znni}, to our knowledge there have been no attempts to extend Lavin and Gray's Winograd-style approach~\cite{lavin2016fast}. In order to extend the fast vector algorithm to 1 to $N$-dimensions, we consider the $n$-mode product of a tensor, $\boldsymbol{\mathcal{X}} \in \mathbb{R}^{I_1 \times I_2 \times \dots \times I_N}$, with a matrix, $\mathbf{U} \in \mathbb{R}^{J\times I_n}$, herein denoted as $\boldsymbol{\mathcal{X}} \times_n \mathbf{U}$~\cite{kolda2009tensor}:
\begin{equation}
\label{eq:tensorprod}
(\boldsymbol{\mathcal{X}} \times_n \mathbf{U})_{i_1, \dots, i_{n-1}, j, i_{n+1}, \dots, i_N} = \sum_{i_n = 1}^{I_n}x_{i_1, \dots, i_N}u_{j, i_n}.
\end{equation}
In our case $\mathbf{U}$ is sparse and $\boldsymbol{\mathcal{X}}$ is dense, so we implement (\ref{eq:tensorprod}) such that $\mathbf{U}$ is traversed in the outermost two loops. We also introduce the following notation for brevity:
\begin{equation*}
\boldsymbol{\mathcal{X}} \times_{n=1}^{N} \mathbf{U}_n = \boldsymbol{\mathcal{X}} \times_{1} \mathbf{U}_1 \times_{2} \dots \times_{N} \mathbf{U}_N.
\end{equation*}
The fast algorithm for tensor convolution applies the transforms $\mathbf{C}_n$, $\mathbf{B}_n$ and $\mathbf{A}_n$ separately to each dimension $n$ of the kernel and data tensors, $\boldsymbol{\mathcal{G}}$ and $\boldsymbol{\mathcal{D}}$:
\begin{equation}
\label{eq:winond}
\boldsymbol{\mathcal{S}} =\left[\,( \boldsymbol{\mathcal{G}}\times_{n=1}^{N} \mathbf{C}_{n} )\odot ( \boldsymbol{\mathcal{D}}\times_{n=1}^{N} \mathbf{B}_{n} ) \,\right]\times_{n=1}^{N} \mathbf{A}_{n}.
\end{equation}
It is straightforward to show that (\ref{eq:winograd_1d}) is a special case of (\ref{eq:winond}) by considering the following equivalence:
\begin{equation*}
\label{eq:equiv}
\boldsymbol{\mathcal{Y}} = \boldsymbol{\mathcal{X}} \times_n \mathbf{U} \Leftrightarrow \mathbf{Y}_{(n)} = \mathbf{UX}_{(n)},
\end{equation*}
where the matrix $\mathbf{X}_{(n)}$ is the mode-$n$ major unfolding of tensor $\boldsymbol{\mathcal{X}}$~\cite{kolda2009tensor}. In the 1-dimensional case, $\mathbf{X}_{(1)}$ is simply $\mathbf{x}$ and thus $\boldsymbol{\mathcal{X}} \times_1 \mathbf{U} = \mathbf{Ux}$. Likewise in 2D, as $\boldsymbol{\mathcal{X}}\times_1 \mathbf{U} = \mathbf{UX}$ and $\boldsymbol{\mathcal{X}}\times_2 \mathbf{U} = \mathbf{UX}^\top$ then (\ref{eq:winond}) reduces to the case reported by~\cite{lavin2016fast}:
\begin{equation*}
\mathbf{S} = \mathbf{A} \left[ \,(\mathbf{CGC^\top}) \odot (\mathbf{BDB^\top}) \, \right]\mathbf{A^\top}.
\end{equation*}
\subsection{Amortizing Transform Costs}
Manually reducing transform costs via Winograd-style minimal algorithms is important for 2-dimensional GPU implementations. However, this is less important for a CPU implementation of higher-dimensional convolution. The reasons are two-fold: (a) the matrix multiplication cost can be amortized across a larger number of kernels and channels due to relaxed memory constraints; and (b) CPUs are able to directly leverage the sparse structure of these matrices for further acceleration. Although efficient sparse linear algebra is possible on GPUs, this typically involves reshuffling sparse matrices into a dense representation (e.g. COO, CSR or ELLPACK~\cite{grewe2011automatically}) and introduces unnecessary computational overhead.
As a simple example, consider Winograd's minimal F(2,3) algorithm presented in Section~\ref{ss:minimal}. Computing the output $\mathbf{s}$ of length $S=2$ requires a total of 6 multiplications -- 4 between the data and kernel, and 2 by a constant factor of 0.5. The 4 additions are ignored as modern CPUs can compute fused multiply-accumulate operations in a single cycle. By contrast, computing $\mathbf{s}$ explicitly by equation (\ref{eq:winograd_1d}) requires 28 multiplications -- 4 for the element-wise product, 16 for the data transform and 8 for the inverse transform (assuming transformed kernels are cached at training time). Even leveraging sparsity in the transform matrices requires 19 multiplications, which is more than triple that required for Winograd's minimal algorithm.
The game changes when one considers these approaches in the context of a ConvNet layer with multiple channels and kernels. Without loss of generality, assume the numbers of kernels and channels are both equal to $M$. As the inverse transform can be applied once over the reduced output and the data transform once across all kernels, the required number of multiplications is just $4M^2 + 24M$ (versus $6M^2$ for Winograd). This can be reduced further to $4M^2 + 15M$ by exploiting the sparsity of $\mathbf{A}$ and $\mathbf{B}$.
\begin{figure}[t]
\begin{center}
\centerline{\includegraphics[width=0.83\columnwidth]{fig1.pdf}}
\caption{Reduction in computations achieved by fast tensor convolution (forward pass) for a C3D kernel ($3\times3\times3$) as a function of number of layer channels and kernels. Dashed line indicates direct convolution baseline.}
\label{fig:speedup}
\end{center}
\vskip -0.3in
\end{figure}
Although it is also possible to restructure Winograd's algorithm to exploit the size of the network, for larger networks the $4M^2$ multiplications required by the element-wise product quickly renders the linear transform cost negligible. It is also impractical to construct similar minimal algorithms in higher dimensions. Consider the C3D network of $3\times 3\times 3$ kernels that has yielded state-of-the-art performance across many video processing benchmarks~\cite{tran2015learning}. As an example, we synthesize the following transform matrices such that convolution reduces to a $6\times 6\times 6$ element-wise product:
\begin{align*}
\mathbf{A} &= \begin{bmatrix}
1 &1 &1 & 1 & 1 & 0\\
0 &1 & -1 & \frac{1}{3} & -\frac{1}{3} & 0\\
0 & 1& 1 & \frac{1}{9} & \frac{1}{9} & 0\\
0 &1 & -1 & \frac{1}{27}& -\frac{1}{27} & 1
\end{bmatrix}\\
\mathbf{B} &= \begin{bmatrix}
\frac{1}{9} & 0 & -\frac{10}{9}& 0 & 1& 0\\
0 & -\frac{1}{9} & -\frac{1}{9} & 1 & 1 & 0\\
0 &\frac{1}{9} &-\frac{1}{9} & -1 & 1 & 0\\
0 & -\frac{1}{3} & -1 & \frac{1}{3} & 1 &0\\
0& \frac{1}{3} & -1 & -\frac{1}{3} & 1& 0\\
0 & \frac{1}{9} & 0 & -\frac{10}{9} & 0 &1
\end{bmatrix}\\
\mathbf{C} &= \begin{bmatrix}
9 & \frac{9}{16} & \frac{9}{16} & -\frac{81}{16} & -\frac{81}{16} & 0\\
0 & \frac{9}{16} & -\frac{9}{16} & -\frac{27}{16} & \frac{27}{16} & 0\\
0 & \frac{9}{16} & \frac{9}{16} & -\frac{9}{16} & -\frac{9}{16} & 1
\end{bmatrix}^\top.
\end{align*}
The theoretical ceiling on speed-up obtainable using these matrices is 8-fold, ignoring the cost of the matrix-tensor products required when applying (\ref{eq:winond}). Figure~\ref{fig:speedup} demonstrates the actual reduction in computations as a function of kernels and channels. For a network of just 100 kernels and 100 channels, it is possible to obtain greater than 6-fold acceleration with respect to direct sliding-window convolution. This is triple the performance margin that could be gained if the network was constrained to 10 kernels and channels due to a lower memory ceiling.
We can further improve this performance margin by exploiting the sparsity of the matrices themselves, as it is comparatively straightforward to implement efficient sparse linear algebra for CPUs. One might worry that the transform matrix sparsity is inversely proportional to the degree of $m(x)$. However, this simply suggests that our fast algorithm is best suited for networks of small kernels, which is fortunately well-aligned with recent trends in deep ConvNet architecture~\cite{he2016deep,simonyan2014very,szegedy2015going}. Sparsity and numerical precision also decrease as a function of $D$. In practice, the data matrix $\textbf{D}$ is not the full feature map (e.g. an ImageNet image) but rather one of many small, overlapping input tiles (each of size $D\times D$, stepping by $S$ along both axes) whose $S\times S$ outputs are stitched together to form the final convolution result. In Section~\ref{ss:avxwino} we discuss how the fully-automated nature of our implementation can leverage this property for further performance improvement.
\section{Optimizing for CPU Architecture}
There are a myriad of algorithmic tricks that can be applied to reduce the number of computations required for convolution. Consider the special case where our transforms are the discrete Fourier transform (DFT) and inverse DFT matrices. As the Fourier transform of a real-valued signal has Hermitian symmetry, the number of unique terms in the element-wise product can be reduced~\cite{mathieu2013fast}. More generally, one could also apply the Strassen algorithm to reduce the number of steps required for matrix multiplication~\cite{cong2014minimizing}.
In practice, the merit of any of these approaches depends intimately on whether they can be implemented to effectively leverage hardware. Consider the 50-to-1 performance ratio observed between existing GPU and CPU implementations~\cite{shi2016benchmarking}. For the devices used in this study (Titan X versus Xeon E7-8890), the ratio of theoretical throughput is actually less than to 5-to-1. This seems to suggest that current CPU performance limitations are largely issues of software rather than hardware.
Although some previous studies have discussed CPU-specific performance optimizations for neural networks~\cite{vanhoucke2011improving}, these guidelines have not necessarily translated to optimal implementations. For example, the Eigen 3.2 linear algebra library (used until recently by TensorFlow) does not provide native support for AVX (vectorized) instructions, introducing a tight bottleneck on theoretical throughput. Looking beyond a single core, a recent review demonstrates poor multicore scalability across all major ConvNet frameworks~\cite{shi2016benchmarking}. Solving these two issues alone has the potential to close the CPU-GPU gap by a full order-of-magnitude, and this improvement is multiplicative with the algorithmic savings described earlier.
\subsection{Single-Core Utilization}
Although our fast algorithm requires theoretically fewer computations to execute than na\"{\i}ve convolution (e.g. 8-fold for C3D kernels), it is considerably more difficult to implement with high CPU utilization.
Consider the element-wise product $\boldsymbol{\mathcal{G}}^\prime \odot \boldsymbol{\mathcal{D}}^\prime$, summed for each channel $m = 1\dots, M$ to produce the $N$-dimensional tensor $\boldsymbol{\mathcal{S}}^\prime$. We can compute the ratio of computations, i.e. 1 multiply and 1 accumulate operation per $(g,\,d)$-pair, to the volume of memory loaded:
\begin{equation*}
\frac{\mathrm{computations}}{\mathrm{memory\,accesses}} = \frac{2D^NM}{2D^NM} = 1.
\end{equation*}
Little's Law shows this is problematic for effective CPU utilization, as convolution expressed in this form is bottlenecked by memory bandwidth~\cite{little1961proof}. To solve this problem, recall that $\boldsymbol{\mathcal{D}}$ is one of many small, overlapping tiles that span the full-size feature map. Considering $T$ of these tiles, we introduce the following matrices:
\begin{equation}
\label{eq:practical}
\hat{\mathbf{S}}^{(i)} = \hat{\mathbf{D}}^{(i)} \times \hat{\mathbf{G}}^{(i)},
\end{equation}
where $\hat{\mathbf{D}}^{(i)} \in \mathbb{R}^{T\times M}$ (tiles-by-channels) and $\hat{\mathbf{G}}^{(i)} \in \mathbb{R}^{M\times K}$ (channels-by-kernels). Each matrix $i \in 1,\dots, D^N$ captures a single $(x, y)$ coordinate in the earlier $\boldsymbol{\mathcal{G}}^\prime \odot \boldsymbol{\mathcal{D}}^\prime$ element-wise product, which is fused with the channel-wise reduction into end-to-end matrix multiplications:
\begin{equation*}
\frac{\mathrm{computations}}{\mathrm{memory\,accesses}} = \frac{2D^NMTK}{D^N(MT + MK)} = \frac{2\,TK}{T+K}.
\end{equation*}
\begin{algorithm}[t]
\caption{$N$-Dimensional Convolution with SIMD}
\label{alg:efficient}
\begin{algorithmic}
\FOR{$i=1$ {\bfseries by} $W$ {\bfseries to} $D^N$}
\FOR{$m=1$ {\bfseries to} $M$}\vskip 0.05in
\STATE$\texttt{FMA}\left(\hat{\mathbf{s}}_{\,t,\, k}^{(i\,:\,i+W)},\, \hat{\mathbf{d}}_{\,t,\, m}^{(i\,:\,i+W)},\, \hat{\mathbf{g}}_{\,m,\, k}^{(i\,:\,i+W)} \right)$
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
As $T$ can be any number of the small $D^N$ input tiles, we can select $T = K$ to demonstrate a compute-to-memory ratio that grows linearly in the number of kernels.
The fast convolutional form in (\ref{eq:practical}) is also well-suited to a number of other practical CPU performance optimizations~\cite{vanhoucke2011improving}. Foremost among these is the effective use of AVX (vectorized) and FMA (fused multiply-accumulate) floating-point SIMD operations. Consider the function \texttt{FMA}($\mathbf{x},\, \mathbf{y},\, \mathbf{z}$), which calculates the sum of vector $\mathbf{x}$ with the element-wise product $\mathbf{y} \odot \mathbf{z}$ and stores the result in $\mathbf{x}$, all in a single CPU cycle. This function can be leveraged for an efficient practical implementation of (\ref{eq:practical}), as presented in Algorithm~\ref{alg:efficient} for a single tile-kernel pair $s_{\,t,\, k}^{(i)} \in \hat{\mathbf{S}}^{(i)}$ and an AVX vector of width $W$. An illustration of the 2-dimensional case is provided in Figure~\ref{fig:avx}. On our Xeon CPU with 256-bit AVX registers and two dedicated FMA units, this optimization alone can yield a 32-fold speed-up over na\"{\i}ve implementations. This margin is expected to double with the introduction of 512-bit AVX registers for Intel Skylake and Xeon Phi.
\begin{figure}[t]
\begin{center}
\centerline{\includegraphics[width=1.0\columnwidth]{fig5c.pdf}}
\caption{Illustration of Algorithm~\ref{alg:efficient} using 2-dimensional ConvNets as an example. Both the element-wise product $\mathbf{G}^\prime \odot \mathbf{D}^\prime$ and reduction down $M$ channels are captured within matrix multiplication. Multiple elements in $\hat{\mathbf{s}}_{\,t,\, k}$ can be calculated simultaneously by filling AVX registers into-the-page. This technique generalizes trivially to $N$-dimensions by substituting $D^2$ for $D^N$.}
\label{fig:avx}
\end{center}
\vskip -0.3in
\end{figure}
We benchmarked the performance of our fast convolution algorithm on a 1.44 TFLOP/s Xeon E7-8890 CPU and observe that it executes at $\sim$70\% maximum utilization. This includes all steps from input to output, including all necessary data reshuffling. As a point of comparison, Intel's own MKL convolutional primitive runs at just $20\%$ (excluding reshuffling) on the same processor. The Eigen 3.2. linear algebra library is lower utilization still, capped at just $3.5\%$ due to a lack of AVX and FMA support. Both of these libraries have been widely used by popular ConvNet frameworks including Caffe, CNTK, TensorFlow and Torch.
\subsection{AVX-Aware Transform Synthesis}
\label{ss:avxwino}
The fully automated nature of our transform generation allows for the synthesis of transform matrices that optimize for CPU architectural constraints. From Figure~\ref{fig:avx}, it is clear the full utilization can only be achieved if $D^N$ is an integer multiple of the AVX vector width $W$. This is an important optimization, as data volumes are constantly small (invariant of numbers of channels and kernels) and thus there is little opportunity to amortize padding overhead.
Table~\ref{tab:transforms} summarizes statistics for example transforms that we have generated for square 2 and 3-dimensional kernels, enumerated automatically using~\cite{lavin_git}. In each case, we generate transforms for the smallest possible $\mathbf{D}\in\mathbb{R}^{D\times D}$ such that $SG/D > 1$ and $D^2 \,\mathrm{mod}\, W = 0$. The matrices are provided in the Supplementary Materials
\begin{table}[t]
\centering
\caption{Size, transform sparsity and algorithmic speed-up statistics for example transforms matrices. Associated matrices are provided in the Supplementary Materials.\vspace{1em}}
\label{tab:transforms}
\begin{tabular}{c|ccc|ccc|cc}
& \multicolumn{3}{c|}{\textbf{size}} & \multicolumn{3}{c|}{\textbf{sparsity}} & \multicolumn{2}{c}{\textbf{speed-up}} \\
& $D$ & $G$ & $S$ & $\mathbf{A}$ & $\mathbf{B}$ & $\mathbf{D}$ & 2D & 3D \\ \hline
(a) & 4 & 2 & 3 & 0.33 & 0.50 & 0.25 & 2.25 & 3.38 \\
(b) & 4 & 3 & 2 & 0.25 & 0.50 & 0.33 & 2.25 & 3.38 \\
(c) & 8 & 4 & 5 & 0.20 & 0.31 & 0.19 & 6.25 & 15.63 \\
(d) & 8 & 5 & 4 & 0.19 & 0.31 & 0.20 & 6.25 & 15.63 \\
(e) & 8 & 6 & 3 & 0.17 & 0.31 & 0.21 & 5.06 & 11.39
\end{tabular}
\end{table}
\subsection{Multicore Scalability}
Single-core utilization is just one dimension of performance optimization. Many modern systems contain both multiple CPU chips, with shared access to host RAM; and multiple cores per chip, with shared access to faster L3 cache. We adopt a relatively simple parallelization scheme where threads simultaneously operate on different subsets of $T$ input tiles. To avoid memory contention and other concurrency issues we adopt the Cilk Plus work-stealing scheduler supported by GCC 4.8~\cite{blumofe1996cilk,robison2013composable}, simply applying its fork-join primitive to all for-loops with no iteration dependencies. The number of tiles $T$ per thread is empirically tuned to simultaneously maximize L3 cache utilization ($T$ cannot be too large) and compute-to-memory ratio ($T$ cannot be too small).
We observe that even this simple parallelization scheme yields near-optimal linear scalability. In Figure~\ref{fig:multicore} we present ConvNet throughput as a function of processor cores for both (a) our fast algorithm, and (b) our own multicore implementation of na\"{\i}ve convolution (which is comparatively simple to implement). Scalability is measured across a single convolution layer for a $1024\times 1024$ image with kernels of size $4\times 4$. To avoid NUMA issues relating to expensive inter-chip communication, we spawn independent instances for each CPU in our 4-socket shared-memory server such that all 18 threads in Figure~\ref{fig:multicore} are bound to a single chip. When using all 18 cores of our Intel Xeon E7-8890 CPU the scalability of (a) is 95\% theoretically optimal. As a point of comparison, a recent review examined the scalability of popular ConvNet frameworks Caffe, CNTK, TensorFlow and Torch on a similar 16-core Xeon E5-2630 CPU~\cite{shi2016benchmarking}. They reported multicore scalability ranging from $16\%$ (Caffe) to $42\%$ (TensorFlow), which is equivalent to a 2.3 to 5.9-fold improvement with our implementation.
\begin{figure}[t]
\begin{center}
\centerline{\includegraphics[width=0.92\columnwidth]{fig4.pdf}}
\caption{Multicore scalability of our cache-aware and Cilk-optimized implementations of (a) fast convolution, and (b) na\"{\i}ve convolution. Dashed line indicates theoretical scalability limit with respect to a single-core implementation. Executed on 18-core Intel Xeon E7-8890 processor with 45 MB L3-cache.}
\label{fig:multicore}
\end{center}
\vskip -0.3in
\end{figure}
\subsection{Performance Benchmarking}
The most popular ConvNet benchmarks focus exclusively on GPU performance~\cite{chintala2015convnet}. The only study we could find presenting thorough CPU benchmarking is that of Shi \emph{et al.}, comparing the throughput of Caffe, CNTK, Tensorflow and Torch for the AlexNet and ResNet architectures~\cite{shi2016benchmarking}. Although this is a useful study for ball-parking our multicore scalability, it is difficult to extrapolate fair comparisons to our overall system throughput for many reasons. Foremost is that the authors do not select CPU-optimized implementations. They adopt an earlier version of TensorFlow that uses the Eigen 3.2 library (no AVX/FMA support), and otherwise use the default framework-specific implementations of convolution rather than linking to optimized packages such as Intel MKL.
We benchmark 2D ConvNet performance against two popular frameworks: TensorFlow, using the newer Eigen 3.3 library (with AVX support); and Caffe, compiled to use Intel's optimized MKL library. We consider the propagation time of a $224\times224$ ImageNet image through three convolution layers to capture any necessary inter-layer reshuffling. We choose this simple architecture over a named network because we are not interested in comparing execution times of pooling, fully-connected or other layers. We also select an obscure kernel size ($4\times4$) for which there have been no Winograd-style fast algorithms published, in order to demonstrate the generality of our implementation to arbitrary kernels. Each layer contains a modest 32 channels and 32 kernels for spreading the cost associated with applying transform matrices. Results presented are the fastest across batch sizes of 1, 20 and 200. An important innovation of our approach is that it is batch size-agnostic, making it suitable for single-image autoregressive models common in generative modelling and deep reinforcement learning.
\begin{figure}[t]
\begin{center}
\centerline{\includegraphics[width=0.85\columnwidth]{fig6_new.pdf}}
\caption{Measured throughput (megavoxels per second) of (a) our fast 2D convolution implementation (as a special case of our $N$-dimensional algorithm), (b) TensorFlow, using the latest Eigen 3.3, and (c) Caffe, using Intel MKL. Throughput is calculated by propagating $224\times 224$ images through 3 convolutional layers.}
\label{fig:tensorflow}
\end{center}
\vskip -0.3in
\end{figure}
Our performance benchmarks are presented in Figure~\ref{fig:tensorflow}. The single-core throughput of (a) our fast algorithm is 0.89 MVox/s, compared to (b) 0.18 for TensorFlow and (c) 0.19 for Caffe. Increasing cores from 1 to 18, our throughput improves to 10.9 MVox/s compared to 1.77 for TensorFlow and 0.41 for Caffe. This is equivalent to an approximate 5 to 25-fold improvement in overall performance. In terms of multicore scalability, this is (a) 68\% versus (b) 55\% and (c) 12\%. We note that our performance here is lower than the $95\%$ presented in Figure~\ref{fig:multicore} for a larger input size (i.e. $T$ is much larger, yielding a better compute-to-memory ratio), and that the scalability for TensorFlow and Caffe are both similar to those reported in~\cite{shi2016benchmarking}.
\section{Discussion}
Motivated by the recent success of 3-dimensional ConvNets in video and volumetric image processing~\cite{lee2015recursive,tran2015learning}, we have proposed a transition to CPU hardware to overcome the memory constraints limiting the size and expressivity of these networks. Key to this transition is overcoming the impractical performance gap between existing CPU and GPU implementations. To achieve this, we extended previous algorithms of fast convolution to the $N$-dimensional case, yielding an order-of-magnitude reduction in computations for popular networks such as C3D. Importantly, our implementation diverges from previous studies that focus on the hand-crafting of minimal Winograd-style algorithms. We instead exploit the relaxed memory constraints, efficient sparse access and other architectural considerations of CPU hardware to overcome the cost of applying transform matrices.
The obvious alternative to our approach is to overcome memory constraints by splitting large networks across multiple GPU devices. Distributed frameworks such as TensorFlow are valuable for a broad class of machine learning problems, e.g. many of the data mining tasks faced by large organizations where the data itself is often sharded across different machines. However, it is important to recognize that the horizontal scalability paradigm is not a one-size-fits-all solution. Consider the increasing demand for real-time CPU solutions to image and video processing, particularly on mobile devices. Moving forward, we expect that intensive ConvNet-driven tasks such as video classification and de-noising will continue to migrate from the realm of academic research to practical realization~\cite{shi2016real}. Efficient CPU implementations of ConvNets and other deep learning algorithms will play a fundamental role in this transition.
At the opposite end of the spectrum, some ``big data" problems in the image processing domain are, counterintuitively, too big to be solved in a distributed setting. Consider the emerging field of high-throughput connectomics~\cite{meirovitch2016multi}. Multi-beam electron microscopes image cross-sectional slices of neural tissue at nanometer-resolution, which are then segmented by ConvNets to reconstruct the 3-dimensional morphology and interconnectivity of individual neurons~\cite{ronneberger2015u}. The major issue here is simply one of scale -- a seemingly modest cubic millimeter volume of neural tissue takes several months to image at the TB/hr pace of modern electron microscopes, which exceeds maximum data transfer rates. To avoid introducing communication bottlenecks to the connectomics pipeline, it is necessary that segmentation can execute in real-time on a server physically co-located in the same room as the microscope~\cite{lichtman2014big,matveev2016}. Shared-memory CPU systems can support hundreds of cores and terabytes of memory in a single server, and it is critical that systems be implemented to exploit these valuable resources.
Treating 2D ConvNets as a special case of tensor convolution, our implementation yields 5 to 25-fold improved throughput compared to previous state-of-the-art on CPU. This is an important step toward bridging the performance gap between CPU and GPU hardware and is particularly important in the context of emerging hardware trends, e.g. Intel announcing that future generations of CPUs will contain dedicated deep learning accelerators. More importantly, we believe that removing constraints on 3D ConvNet size will herald new opportunities in the machine learning community; particularly in the context of generative models~\cite{denton2015deep,goodfellow2014generative}, where rich temporal correlations are currently ignored when learning latent manifolds~\cite{ledig2016photo}.
\section*{Acknowledgements}
Support is gratefully acknowledged from the National Science Foundation (NSF) under grants IIS-1447786 and CCF-1563880, and the Intelligence Advanced Research Projects Activity (IARPA) under grant 138076-5093555.
|
1,314,259,995,055 | arxiv | \section{\textsc{Compress++}\xspace} \label{sec:compresspp}
To offset any excess error due to \textsc{Compress}\xspace while maintaining its near-linear runtime,
we next introduce \textsc{Compress++}\xspace (\cref{alg:compresspp}), a simple two-stage meta-procedure for faster root-thinning.
\textsc{Compress++}\xspace takes as input an oversampling parameter $\ensuremath{\mfk{g}}\xspace$, a halving algorithm \textsc{Halve}\xspace, and a $2^\ensuremath{\mfk{g}}\xspace$-thinning algorithm \textsc{Thin}\xspace (see \cref{def:thinning_algo}). In our applications, \textsc{Halve}\xspace and \textsc{Thin}\xspace are derived from the same base algorithm (e.g., from KT with different thinning factors), but this is not required.
\textsc{Compress++}\xspace first runs the faster but slightly more erroneous $\textsc{Compress}\xspace(\textsc{Halve}\xspace, \ensuremath{\mfk{g}}\xspace)$ algorithm to produce an intermediate coreset of size $2^{\ensuremath{\mfk{g}}\xspace} \sqrt{n}$.
Next, the slower but more accurate \textsc{Thin}\xspace algorithm is run on the greatly compressed intermediate coreset to produce a final output of size $\sqrt{n}$.
In the sequel, we demonstrate how to set $\ensuremath{\mfk{g}}\xspace$ to offset error inflation due to \textsc{Compress}\xspace while maintaining its fast runtime.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\begin{algorithm2e}[ht!]
\caption{\textsc{Compress++}\xspace \label{alg:compresspp}}
\SetAlgoLined
\DontPrintSemicolon
\small
\KwIn{oversampling parameter\xspace \ensuremath{\mfk{g}}\xspace, halving alg.\ $\textsc{Halve}\xspace$, $2^\ensuremath{\mfk{g}}\xspace$-thinning alg.\ $\textsc{Thin}\xspace$, point sequence $ \cset_{\mrm{in}} $ of size $n$}
%
$\mc{S}_{\textsc{C}} \ \quad\gets\quad \textsc{Compress}\xspace( \textsc{Halve}\xspace, \ensuremath{\mfk{g}}\xspace, \cset_{\mrm{in}}) $ \quad // coreset of size $2^{\ensuremath{\mfk{g}}\xspace}\sqrt{n}$ \\
$\mc{S}_{\textsc{C++}\xspace} \ \ \gets \quad \textsc{Thin}\xspace\left(\mc{S}_{\textsc{C}} \right)$ \ \ \qquad\qquad \qquad\quad\ \ // coreset of size $\sqrt{n}$ \\
\KwRet{ $\mc{S}_{\textsc{C++}\xspace}$}
\end{algorithm2e}
\vspace{-3mm}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{Integration error and runtime guarantees for \textsc{Compress++}\xspace}
%
%
The following result, proved in \cref{sec:compresspp_subgamma}, relates the runtime and single-function integration error of \textsc{Compress++}\xspace to the runtime and error of \textsc{Halve}\xspace and \textsc{Thin}\xspace.
%
\newcommand{Runtime and integration error of \textsc{Compress++}\xspace}{Runtime and integration error of \textsc{Compress++}\xspace}
\begin{theorem}[\tbf{Runtime and integration error of \textsc{Compress++}\xspace}]
\label{thm:compresspp_subgamma}
If $\textsc{Halve}\xspace$ and $\textsc{Thin}\xspace$ have runtimes $r_{\textsc{H}}(n)$ and $r_{\thintag}(n)$ respectively for inputs of size $n$, then \textsc{Compress++}\xspace has runtime %
\begin{talign}
\label{eq:runtime_cpp}
r_{\textsc{C++}\xspace} (n) &=
r_{\textsc{C}}(n)+
r_{\thintag}({\l_n}{/2})
\qtext{where}
r_{\textsc{C}}(n)\seq{\cref{run_time_cp}}\sum_{i=0}^{\beta_n} 4^{i}\cdot r_{\textsc{H}} ( \l_n 2^{-i} ),
%
\end{talign}
%
%
%
%
%
%
%
%
%
%
$\l_n\!=\! 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{n}$, and $\beta_n\!=\!\log_4 n \!-\! \ensuremath{\mfk{g}}\xspace\!-\!1$ as in \cref{thm:compress_sub_gamma}.
Furthermore, if for some function $f$, $\textsc{Halve}\xspace \in~\mathcal{G}^{f} (\nu_{\textsc{H}})$ and $\textsc{Thin}\xspace\in\mathcal{G}^{f} (\nu_{\thintag})$, then $\textsc{Compress++}\xspace \in \mathcal{G}^{f} (\nu_{\textsc{C++}\xspace})$ with
\begin{talign}
\label{eq:nu_cpp}
\nu^2_{\textsc{C++}\xspace}(n) &=
\nu^2_{\textsc{C}}(n)
+
\nu^2_{\thintag}({\l_n}{/2})
\qtext{where}
\nu^2_{\textsc{C}}(n)
\seq{\cref{eq:nu_cp}}\sum_{i=0}^{\beta_n} 4^{-i} \cdot \nu^2_{\textsc{H}} ( \l_n 2^{-i} ).
\end{talign}
\end{theorem}
\vspace{-4mm}
\begin{remark}[Near-linear runtime and near-quadratic speed-ups for \textsc{Compress++}\xspace]
\label{rem:runtime_compresspp}
\normalfont
When \textsc{Halve}\xspace and \textsc{Thin}\xspace have quadratic runtimes with $\max(r_{\textsc{H}}(n), r_{\thintag}(n)) = n^{2}$,
\cref{thm:compresspp_subgamma,rem:runtime_speedup} yield that $r_{\textsc{C++}\xspace}(n) \leq 4^{\ensuremath{\mfk{g}}\xspace + 1}\,n (\log_4(n)-\ensuremath{\mfk{g}}\xspace) + 4^{\ensuremath{\mfk{g}}\xspace} n$.
Hence, \textsc{Compress++}\xspace maintains a near-linear runtime
\begin{talign}
\label{eq:cpp_runtime_speedup}
r_{\textsc{C++}\xspace}(n) = \mc{O}(n \log_4^{c+1}(n))
\qtext{whenever} 4^\ensuremath{\mfk{g}}\xspace = \mc{O}(\log_4^c n).
%
\end{talign}
If \textsc{Halve}\xspace and \textsc{Thin}\xspace instead have super-quadratic runtimes with $\max(r_{\textsc{H}}(n), r_{\thintag}(n)) = n^\tau$, then by \cref{rem:runtime_speedup} we have
%
$r_{\textsc{C++}\xspace}(n) \leq (\frac{4^{\tau}}{2^{\tau}-4}+1)\, 2^{\ensuremath{\mfk{g}}\xspace \tau} n^{\tau/2}$,
so that \textsc{Compress++}\xspace provides a near-quadratic speed up
$%
r_{\textsc{C++}\xspace}(n) = \mc{O}(n^{\tau/2} \log_4^{c\tau/2}(n))$
whenever
$4^\ensuremath{\mfk{g}}\xspace = \mc{O}(\log_4^c n).
$%
\end{remark}
\begin{remark}[\textsc{Compress++}\xspace inflates sub-Gaussian error by at most $\sqrt{2}$]\label{rem:compresspp_error}
\normalfont
%
In the usual case that $n\,\nu_{\textsc{H}} (n)$ is non-decreasing in $n$,
\cref{thm:compresspp_subgamma,rem:compress_error_inflation} imply that
\begin{talign}
\label{eq:compresspp_error}
\nu^2_{\textsc{C++}\xspace}(n)
\leq ( \log_4 n - \ensuremath{\mfk{g}}\xspace) \nu^2_{\textsc{H}} (\l_n) + \nu^2_{\thintag}(\frac{\l_n}{2})
%
&= \nu^2_{\thintag}(\frac{\l_n}{2})
\cdot
\parenth{1+ \frac{\log_4 n- \ensuremath{\mfk{g}}\xspace}{4^{\ensuremath{\mfk{g}}\xspace}} \cdot (\frac{\zeta_{\textsc{H}}(\l_n)}{\zeta_{\thintag}(\l_n/2)})^2 }%
%
%
\end{talign}
where we have introduced the rescaled quantities
$\zeta_{\textsc{H}}(\l_n) \defeq \frac{\l_n}{2} \nu_{\textsc{H}}(\l_n)$ and $\zeta_{\thintag}(\frac{\l_n}{2}) \defeq \sqrt{n}\, \nu_{\thintag}(\frac{\l_n}{2})$.
Therefore, \textsc{Compress++}\xspace satisfies
\begin{talign}\label{eq:ossymb-condition}
\nu_{\textsc{C++}\xspace}(n) \leq \sqrt{2} \nu_{\thintag}(\frac{\l_n}{2})
\qtext{whenever}
\ensuremath{\mfk{g}}\xspace
\geq \log_4\log_4 n
+ \log_2(\frac{\zeta_{\textsc{H}}(\l_n)}{\zeta_{\thintag}(\l_n/2)}).
\end{talign}
That is, whenever \textsc{Compress++}\xspace is run with an oversampling parameter\xspace $\ensuremath{\mfk{g}}\xspace$ satisfying \cref{eq:ossymb-condition} its sub-Gaussian error is never more than $\sqrt{2}$ times the second-stage \textsc{Thin}\xspace error.
Here, \textsc{Thin}\xspace represents a strong baseline for comparison as thinning from $\l_n/2$ to $\sqrt n$ points should incur at least as much error as thinning from $n$ to $\sqrt{n}$ points.
\end{remark}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
As we illustrate in the next example, when \textsc{Thin}\xspace and \textsc{Halve}\xspace are derived from the same thinning algorithm, the ratio $\frac{\zeta_{\textsc{H}}(\l_n)}{\zeta_{\thintag}(\l_n/2)}$ is typically bounded by a constant $C$ so that the choice $\ensuremath{\mfk{g}}\xspace = \ceil{\log_4 \log_4 n + \log_2 C}$ suffices to simultaneously obtain the $\sqrt{2}$ relative error guarantee \cref{eq:ossymb-condition} of \cref{rem:compresspp_error} and the substantial speed-ups \cref{eq:cpp_runtime_speedup} of \cref{rem:runtime_compresspp}.
%
\begin{example}[\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace-\textsc{Compress++}\xspace]
\label{ex:ktsplitcompresspp}
\normalfont
In the notation of \cref{ex:kh_subgamma},
consider running \textsc{Compress++}\xspace with $\textsc{Halve}\xspace = \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(
\frac{\l^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta
%
)$ when applied to an input of size $\l$ and $\textsc{Thin}\xspace = \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(
\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \delta
%
)$.
As detailed in \cref{sec:proof_of_ktsplitcompresspp}, on an event of probability $1 - \frac{\delta}{2}$, all \textsc{Compress++}\xspace invocations of \textsc{Halve}\xspace and \textsc{Thin}\xspace are simultaneously $f$-sub-Gaussian with parameters satisfying
\begin{talign}\label{eq:ktsplit_zetas}
\zeta_{\textsc{H}}(\ell) \!=\! \zeta_{\thintag}(\ell) \!=\! \frac{2}{\sqrt{3}}\sqrt{\log(
\frac{6\sqrt{n}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}{\delta}
%
)\infnorm{\mbf{k}}}
\Longrightarrow
%
\frac{\zeta_{\textsc{H}}(\l_n)}{\zeta_{\thintag}(\frac{\l_n}{2})} \!=\! 1
%
%
\text{ for all $f$ with $\knorm{f}\!=1$.}
\end{talign}
%
Since \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace runs in $\Theta(n^2)$ time, \cref{rem:runtime_compresspp,rem:compresspp_error} imply that \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace-\textsc{Compress++}\xspace with
%
$\ensuremath{\mfk{g}}\xspace \!=\!\ceil{ \log_4 \log_4 n} $
runs in near-linear $\mc{O} ( n \log^2 n )$ time and inflates sub-Gaussian error by at most $\sqrt{2}$.
%
%
%
%
%
%
%
%
\hfill\small{\ensuremath{\blacksquare}}
\end{example}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{MMD guarantees for \textsc{Compress++}\xspace}
\newcommand{MMD guarantees for \textsc{Compress++}\xspace}{MMD guarantees for \textsc{Compress++}\xspace}
Next, we bound the MMD error of \textsc{Compress++}\xspace in terms of the MMD error of \textsc{Halve}\xspace and \textsc{Thin}\xspace.
The proof of the following result can be found in \cref{sec:compressppmmd}.
\begin{theorem}[MMD guarantees for \textsc{Compress++}\xspace] \label{thm:compressppmmd}
If $\textsc{Thin}\xspace \in \mc{G}_{\kernel}(a',\!v')$, $\textsc{Halve}\xspace \in \mc{G}_{\kernel}(a,\!v)$ for $n\,a_n$ and $n\, v_n$ non-decreasing, and $\Earg{\P_{\textsc{Halve}\xspace}\mbf{k}\mid \cset_{\mrm{in}}} = \P_{\mrm{in}} \mbf{k}$, then $\textsc{Compress++}\xspace \in \mc{G}_{\kernel}(\what{a},\what{v})$ with
\begin{talign}
\what{v}_n
\defeq \wtil{v}_n + v'_{\l_n/2}
\qtext{and}
%
%
\what{a}_n \defeq \wtil{a}_n
+a'_{\l_n/2}
+ \what{v}_n \sqrt{\log 2}
\label{eq:cpp_params}
\end{talign}
for $\wtil{v}_n$ and $\wtil{a}_n $ defined in \cref{thm:compress_mmd} and $\l_n = 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt n$ as in \cref{thm:compress_sub_gamma}.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\end{theorem}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\begin{remark}[\tbf{\textsc{Compress++}\xspace inflates MMD guarantee by at most $4$}]\label{rem:compresspp_mmd_inflation}
\normalfont
\newcommand{\wtil{\zeta}_{\textsc{H}}(\l_n)}{\wtil{\zeta}_{\textsc{H}}(\l_n)}
\newcommand{\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})}{\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})}
\cref{thm:compressppmmd} implies that the \textsc{Compress++}\xspace
$\mbf{k}$-sub-Gaussian error $\vareps_{\mbf{k}, \textsc{Compress++}\xspace}(n) = \max(\what{a}_{n},\what{v}_{n})$ satisfies
\begin{talign}
\vareps_{\mbf{k}, \textsc{Compress++}\xspace}(n)
&\leq (10 \log(n+1)\, \vareps_{\mbf{k}, \textsc{Halve}\xspace}(\l_n) + \vareps_{\mbf{k}, \textsc{Thin}\xspace}(\frac{\l_n}{2}))\,(1+\sqrt{\log 2}) \\
&\leq \vareps_{\mbf{k}, \textsc{Thin}\xspace}(\frac{\l_n}{2}) (\frac{10 \log(n+1)}{2^{\ensuremath{\mfk{g}}\xspace}} \frac{\wtil{\zeta}_{\textsc{H}}(\l_n)}{\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})} + 1)(1+\sqrt{\log 2}),\label{eq:cppzeta1}
\end{talign}
where we have introduced the rescaled quantities $\wtil{\zeta}_{\textsc{H}}(\l_n)\!\defeq\! \frac{\l_n}{2} \,\vareps_{\mbf{k}, \textsc{Halve}\xspace}(\l_n)$
and
$\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})\! \defeq \!\sqrt{n} \,\vareps_{\mbf{k}, \textsc{Thin}\xspace}(\frac{\l_n}{2})$.
Therefore, \textsc{Compress++}\xspace satisfies
\begin{talign}
\vareps_{\mbf{k}, \textsc{Compress++}\xspace}(n)
\leq 4\,\vareps_{\mbf{k}, \textsc{Thin}\xspace}(\frac{\l_n}{2})
\qtext{whenever}
\ensuremath{\mfk{g}}\xspace &\geq { \log_2 \log (n+1) + \log_2 (\ifactortwo\frac{\wtil{\zeta}_{\textsc{H}}(\l_n)}{\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})}) }.
\label{eq:g_compresspp}
\end{talign}
In other words, relative to a strong baseline of thinning from $\frac{\l_n}{2}$ to $\sqrt{n}$ points, \textsc{Compress++}\xspace inflates $\mbf{k}$-sub-Gaussian error by at most a factor of $4$ whenever $\ensuremath{\mfk{g}}\xspace$ satisfies~\cref{eq:g_compresspp}. For example, when the ratio ${\wtil{\zeta}_{\textsc{H}}(\l_n)}{/\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})}$ is bounded by $C$, it suffices to choose $\ensuremath{\mfk{g}}\xspace = \ceil{\log_2\log(n\!+\!1)\!+\! \log_2(\ifactortwo C)}$.
\end{remark}
\begin{example}[KT-\textsc{Compress++}\xspace]\normalfont
\label{ex:ktcompresspp}
In the notation of \cref{ex:kt_subgamma,rem:sym},
consider running \textsc{Compress++}\xspace with $\textsc{Halve}\xspace = \trm{symmetrized } \textsc{KT}\xspace(
\frac{\l^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta)$ when applied to an input of size $\l$ and $\textsc{Thin}\xspace = \textsc{KT}\xspace(\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \delta)$.
As we detail in \cref{sec:proof_of_ktcompresspp}, on an event of probability $1 - \frac{\delta}{2}$, all \textsc{Compress++}\xspace invocations of \textsc{Halve}\xspace and \textsc{Thin}\xspace are simultaneously $\mbf{k}$-sub-Gaussian with %
%
%
%
%
%
%
%
%
%
%
%
%
%
\begin{talign}
\wtil{\zeta}_{\textsc{H}}(\l_n) = \wtil{\zeta}_{\thintag}(\frac{\l_n}{2}) = C_{v}\sqrt{\staticinfnorm{\mbf{k}}\log(
\frac{6\sqrt{n}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}{\delta}
)}\ \mathfrak{M}_{\cset_{\mrm{in}},\mbf{k}}
\quad\Longrightarrow\quad
\frac{\wtil{\zeta}_{\textsc{H}}(\l_n)}{\wtil{\zeta}_{\thintag}(\frac{\l_n}{2})} = 1.
\label{eq:verify_rho_kt}
\end{talign}
As \textsc{KT}\xspace runs in $\Theta(n^2)$ time, \cref{rem:runtime_compresspp,rem:compresspp_mmd_inflation} imply that \textsc{KT}\xspace-\textsc{Compress++}\xspace with
$\ensuremath{\mfk{g}}\xspace \!=\ceil{ \!\log_2 \log n\! +\! \ifactorthree}$ %
runs in near-linear $\mc{O} ( n \log^3 n )$ time and inflates $\mbf{k}$-sub-Gaussian error by at most~$4$. \hfill\small{\ensuremath{\blacksquare}}
\end{example}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{Integration error and runtime guarantees for \textsc{Compress}\xspace}
Our first result relates the runtime and single-function integration error of \textsc{Compress}\xspace to the runtime and error of \textsc{Halve}\xspace.
We measure integration error for each function $f$ probabilistically in terms of the sub-Gaussian parameter $\nu$ of \cref{def:subgamma_algo} and
measure runtime by the number of dominant operations performed by \textsc{Halve}\xspace (e.g., the number of kernel evaluations performed by kernel thinning). %
\compressalg
\newcommand{Runtime and integration error of \textsc{Compress}\xspace}{Runtime and integration error of \textsc{Compress}\xspace}
\begin{theorem}[\tbf{Runtime and integration error of \textsc{Compress}\xspace}]
\label{thm:compress_sub_gamma}
If \textsc{Halve}\xspace has runtime $r_{\textsc{H}}(n)$ for inputs of size $n$, then \textsc{Compress}\xspace has runtime %
\begin{talign}
%
%
%
%
%
%
r_{\textsc{C}}(n) &=
\sum_{i=0}^{\beta_n} 4^{i}\cdot r_{\textsc{H}}( \l_n 2^{-i}),
\label{run_time_cp}
\end{talign}
where $\l_n \!\defeq\! 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{n}$ (twice the output size of \textsc{Compress}\xspace), and $\beta_n \!\defeq\! \log_2(\frac{n}{\l_n}) =\log_4 n\! -\! \ensuremath{\mfk{g}}\xspace\!-\!1$.
Furthermore, if, for some function $f$, $ \textsc{Halve}\xspace \in \mathcal{G}^{f} ( \nu_{\textsc{H}} ) $, then $\textsc{Compress}\xspace \in \mathcal{G}^{f} (\nu_{\textsc{C}})$ with
\begin{talign}
\label{eq:nu_cp}
\nu^2_{\textsc{C}}(n) &=\textstyle \sum_{i=0}^{ \beta_n } 4^{-i} \cdot \nu^2_{\textsc{H}} ( \l_n 2^{-i} ).
%
\end{talign}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\end{theorem}
As we prove in \cref{sec:compress_subgamma}, the runtime guarantee~\cref{run_time_cp} is immediate once we unroll the \textsc{Compress}\xspace recursion and identify that \textsc{Compress}\xspace makes $4^i$ calls to \textsc{Halve}\xspace with input size $\l_n 2^{-i}$.
The error guarantee~\cref{eq:nu_cp} is more subtle: here, \textsc{Compress}\xspace benefits significantly from
random cancellations among the \emph{conditionally independent} and \emph{mean-zero} \textsc{Halve}\xspace errors.
Without these properties, the errors from each \textsc{Halve}\xspace call could compound without cancellation leading to a significant degradation in \textsc{Compress}\xspace quality.
Let us now unpack the most important implications of \cref{thm:compress_sub_gamma}.
\newcommand{\tau}{\tau}
\begin{remark}[Near-linear runtime and quadratic speed-ups for \textsc{Compress}\xspace]\normalfont
\label{rem:runtime_speedup}
\cref{thm:compress_sub_gamma} implies that a quadratic-time \textsc{Halve}\xspace with $r_{\textsc{H}}(n) = n^2$ yields a near-linear time \textsc{Compress}\xspace with $r_{\textsc{C}}(n) \leq 4^{\ensuremath{\mfk{g}}\xspace + 1}\,n (\log_4(n)-\ensuremath{\mfk{g}}\xspace)$.
If \textsc{Halve}\xspace instead has super-quadratic runtime $r_{\textsc{H}}(n) = n^\tau$, \textsc{Compress}\xspace enjoys a quadratic speed-up:
%
$r_{\textsc{C}}(n) \leq c_{\tau}'\,n^{\tau/2}$ for $c_{\tau}' \defeq \frac{2^{\tau(\ensuremath{\mfk{g}}\xspace+2)}}{2^{\tau}-4}$.
More generally, whenever \textsc{Halve}\xspace has superlinear runtime $r_{\textsc{H}}(n) = n^\tau\, \rho(n)$ for some $\tau\geq 1$ and non-decreasing $\rho$, \textsc{Compress}\xspace satisfies
\begin{talign}
r_{\textsc{C}}(n)
\leq
\begin{cases}
c_{\tau} \cdot n\, (\log_4(n)-\ensuremath{\mfk{g}}\xspace)\,\rho(\l_n) & \text{for } \tau \leq 2 \\
c_{\tau}'\cdot n^{\tau/2}\, \rho(\l_n) & \text{for } \tau > 2
\end{cases}
\qtext{where}
c_{\tau} \defeq 4^{(\tau-1)(\ensuremath{\mfk{g}}\xspace+1)}.
\end{talign}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\end{remark}
\begin{remark}[\tbf{\textsc{Compress}\xspace inflates sub-Gaussian error by at most $\mbi{\sqrt{\log_4 n}}$}]\label{rem:compress_error_inflation} \label{rem:compress_approx}
\normalfont
\cref{thm:compress_sub_gamma} also implies
\begin{talign}
\nu_{\textsc{C}} (n) \leq \sqrt{\beta_n+1}\,\nu_{\textsc{H}}(\l_n) = \sqrt{\log_4 n - \ensuremath{\mfk{g}}\xspace}\, \nu_{\textsc{H}}(\l_n)
%
%
\end{talign}
in the usual case that $ n \, \nu_{\textsc{H}}(n) $
is non-decreasing in $n$. Hence the sub-Gaussian error of \textsc{Compress}\xspace is at most $\sqrt{\log_4 n}$ larger than that of halving an input of size $\l_n$.
This is an especially strong benchmark, as $\l_n$ is twice the output size of \textsc{Compress}\xspace, and thinning from $n$ to $\frac{\l_n}{2}$ points should incur at least as much approximation error as halving from $\l_n$ to $\frac{\l_n}{2}$ points.
%
%
\end{remark}
\begin{example}[\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace-\textsc{Compress}\xspace] \label{ex:ktsplitcompress}
\normalfont
Consider running \textsc{Compress}\xspace with, for each \textsc{Halve}\xspace input of size $\l$, $\textsc{Halve}\xspace =
\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\frac{\l^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \delta)
$ from \cref{ex:kh_subgamma}.
Since \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace runs in time $\Theta(n^2)$, \textsc{Compress}\xspace runs in near-linear $\mc{O}(n \log n)$ time by \cref{rem:runtime_speedup}.
In addition, as we detail in \cref{sec:proof_of_ktsplitcompress}, on an event of probability $1-\frac{\delta}{2}$, every \textsc{Halve}\xspace call invoked by \textsc{Compress}\xspace is $f$-sub-Gaussian with
\begin{talign}\label{eq:ktsplit-halve-subgsn}
\nu_{\textsc{H}}(\ell)
= \frac{4}{\ell\sqrt{3}} \sqrt{ \log(
\frac{12n4^{\ensuremath{\mfk{g}}\xspace} (\beta_n+1)}{\ell \delta}
%
) \infnorm{\mbf{k}}}
\qtext{for all $f$ with $\knorm{f}=1$.}
\end{talign}
Hence, \cref{rem:compress_error_inflation} implies that \textsc{Compress}\xspace is $f$-sub-Gaussian on the same event with
$\nu_{\textsc{C}}(n) \! \leq \! \sqrt{\log_4 n\! -\! \ensuremath{\mfk{g}}\xspace}\, \nu_{\textsc{H}}(\l_n),$
a guarantee within $\sqrt{\log_4 n}$ of the original $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\delta)$ error \cref{eq:ktsplit_subgauss}.
\hfill\small{\ensuremath{\blacksquare}}
\end{example}
\newcommand{C_{\tbf{KH}}}{C_{\tbf{KH}}}
\newcommand{C'_{\tbf{KH}}}{C'_{\tbf{KH}}}
\newcommand{C_{\tbf{GS}}}{C_{\tbf{GS}}}
\newcommand{C'_{\tbf{GS}}}{C'_{\tbf{GS}}}
\newcommand{C_{\tbf{GSq}}}{C_{\tbf{GSq}}}
\newcommand{C'_{\tbf{GSq}}}{C'_{\tbf{GSq}}}
\subsection{MMD guarantees for \textsc{Compress}\xspace}
Next, we bound the MMD error of \textsc{Compress}\xspace in terms of the MMD error of \textsc{Halve}\xspace. Recall that $\mmd_{\mbf{k}}$ \cref{eq:kernel_mmd_distance} represents the worst-case integration error across the unit ball of the RKHS of $\mbf{k}$.
Its proof, based on the concentration of subexponential matrix martingales, is provided in \cref{sec:compress_mmd}.
\newcommand{MMD guarantees for \textsc{Compress}\xspace}{MMD guarantees for \textsc{Compress}\xspace}
\begin{theorem}[MMD guarantees for \textsc{Compress}\xspace]
\label{thm:compress_mmd}
Suppose $\textsc{Halve}\xspace \in \mc{G}_{\kernel}(a, v)$ for $n\,a_n$ and $n\, v_n$ non-decreasing and $\Earg{\P_{\textsc{Halve}\xspace}\mbf{k}\mid \cset_{\mrm{in}}} = \P_{\mrm{in}} \mbf{k}$.
Then $\textsc{Compress}\xspace \in \mc{G}_{\kernel}(\wtil{a}, \wtil{v})$ with
\begin{talign}
%
%
\wtil{v}_n \defeq 4(a_{\l_n} \!+\! v_{\l_n})\sqrt{2(\log_4 n\!-\!\ensuremath{\mfk{g}}\xspace)},
\qtext{and}
\wtil{a}_n \defeq \wtil{v}_n \sqrt{\log(n\!+\!1)},
\label{eq:cp_params}
\end{talign}
where $\l_n = 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{n}$ as in \cref{thm:compress_sub_gamma}.
\end{theorem}
\begin{remark}[Symmetrization]
\label{rem:sym}
\normalfont
We can convert any halving algorithm into one that satisfies
the unbiasedness condition $\Earg{\P_{\textsc{Halve}\xspace}\mbf{k}\mid \cset_{\mrm{in}}} = \P_{\mrm{in}} \mbf{k}$ without impacting integration error by \emph{symmetrization}, i.e., by returning either the outputted half or its complement with equal probability.
\end{remark}
\newcommand{10}{10}
\newcommand{\ifactortwo}{8.5} %
\newcommand{\ifactorthree}{3.1} %
\begin{remark}[\tbf{\textsc{Compress}\xspace inflates MMD guarantee by at most $\mbi{10 \log(n\!+\!1)}$}]\label{rem:compress_mmd_inflation}
\normalfont
\cref{thm:compress_mmd} implies that the $\mbf{k}$-sub-Gaussian error of \textsc{Compress}\xspace is always at most \mbox{$10\log (n\!+\!1)$} times that of \textsc{Halve}\xspace with input size $\l_n\! =\! 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt n$ since
\begin{talign}
\vareps_{\mbf{k}, \textsc{Compress}\xspace}(n) \seq{\mrm{\cref{def:mmd_subgamma_algo}}} \max(\wtil{a}_n, \wtil{v}_n)
&\sless{\cref{eq:cp_params}}
10 \log (n+1) \max(a_{\l_n}, v_{\l_n})
= 10 \log(n+1) \cdot \vareps_{\mbf{k}, \textsc{Halve}\xspace}(\l_n).
\end{talign}
As in \cref{rem:compress_error_inflation}, \textsc{Halve}\xspace applied to an input of size $\l_n$ is a particularly strong benchmark, as
thinning from $n$ to $\frac{\l_n}{2}$ points should incur at least as much MMD error as halving from $\l_n$ to $\frac{\l_n}{2}$.
\end{remark}
\begin{example}[\textsc{KT}\xspace-\textsc{Compress}\xspace] \label{ex:ktcompress}
\normalfont
Consider running \textsc{Compress}\xspace with, for each \textsc{Halve}\xspace input of size $\l$, $\textsc{Halve}\xspace = \textsc{KT}\xspace(\frac{\l^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \delta)
$ from \cref{ex:kt_subgamma} after symmetrizing as in \cref{rem:sym}.
Since \textsc{KT}\xspace has $\Theta(n^2)$ runtime, \textsc{Compress}\xspace yields near-linear $\mc{O}(n \log n)$ runtime by \cref{rem:runtime_speedup}.
Moreover, as we detail in \cref{sec:proof_of_ktcompress}, using the notation of \cref{ex:kt_subgamma}, on an event $\mc E$ of probability at least $1-\frac{\delta}{2}$, every \textsc{Halve}\xspace call invoked by \textsc{Compress}\xspace is $\mbf{k}$-sub-Gaussian with
\begin{talign}
a_\l = \frac{2C_{a}}{\l}\sqrt{\staticinfnorm{\kernel_{\mrm{split}}}},
\qtext{and}
v_\l = \frac{2C_{v}}{\l}\sqrt{\staticinfnorm{\kernel_{\mrm{split}}}\log(
\frac{12n4^{\ensuremath{\mfk{g}}\xspace} (\beta_n+1)}{\ell \delta})}\ \mathfrak{M}_{\cset_{\mrm{in}},\kernel_{\mrm{split}}}.
\end{talign}
Thus, \cref{rem:compress_mmd_inflation} implies that, on $\mc E$, \textsc{KT}\xspace-\textsc{Compress}\xspace has $\mbf{k}$-sub-Gaussian error
$\vareps_{\mbf{k}, \textsc{Compress}\xspace}(n)\! \leq\! 10 \log(n\!+\!1) \vareps_{\mbf{k}, \textsc{Halve}\xspace}(\l_n)$, a guarantee within $10 \log(n\!+\!1)$ of the original $\textsc{KT}\xspace(\delta)$ MMD error \cref{eq:kt_params}.
%
\hfill\small{\ensuremath{\blacksquare}}
\end{example}
\section{Additional Definitions and Notation}
\label{sec:add_notation}
This section provides additional definitions and notation used throughout the appendices.
%
%
We associate with each algorithm \textsc{Alg}\xspace and input $\cset_{\mrm{in}}$ the {measure difference}
\begin{talign}
\label{eq:psi_alg}
\phi_{\textsc{Alg}\xspace} \left( \cset_{\mrm{in}} \right) &\defeq \P_{\cset_{\mrm{in}}} - \P_{\mc{S}_{\textsc{Alg}\xspace}}
= \frac{1}{n} \sum_{x \in \cset_{\mrm{in}} } \delta_x - \frac{1}{ n_{\mrm{out}} } \sum_{x \in \mc{S}_{\textsc{Alg}\xspace}} \delta_x
\end{talign}
%
%
%
%
%
%
%
%
%
that characterizes how well the output empirical distribution approximates the input. %
We will often write $\phi_{\textsc{Alg}\xspace}$ instead of $\phi_{\textsc{Alg}\xspace}(\cset_{\mrm{in}})$ for brevity if $\cset_{\mrm{in}}$ is clear from the context.
We also make use of the following standard definition of a sub-Gaussian random variable \citep[see, e.g., ][Sec.~2.3]{boucheron2013concentration}.
\begin{definition}[Sub-Gaussian random variable]
\label{def:subgamma}
We say that a random variable $G $ is sub-Gaussian with parameter $ \nu $ and write $G \in\mathcal{G} (\nu)$ if
\begin{talign}
\mathbb{E} \left[ \exp \left( \lambda \, G \right) \right] \leq \exp\left( \frac{ \lambda^2 \nu^2 }{2} \right)
\qtext{for all} {\lambda \in \ensuremath{\mathbb{R}}}.
\end{talign}
\end{definition}
%
%
%
%
%
%
%
%
%
Given \cref{def:subgamma}, it follows that
$ \textsc{Alg}\xspace \in \mathcal{G}^{f} \left( \nu \right) $ for a function $f$ as in \cref{def:subgamma_algo} if and only if the random variable $ \phi_{\textsc{Alg}\xspace}(f) \defeq \P_{\cset_{\mrm{in}}}f - \P_{\mc{S}_{\textsc{Alg}\xspace}}f$
%
is sub-Gaussian with parameter $\nu$ conditional on the input $\cset_{\mrm{in}}$.
In our proofs, it is often more convenient to work with an unnormalized \emph{measure discrepancy}
\begin{talign}
\label{eq:psi_phi_reln}
\psi_{\textsc{Alg}\xspace}(\cset_{\mrm{in}}) \defeq n \cdot \phi_{\textsc{Alg}\xspace}(\cset_{\mrm{in}}) \seq{\cref{eq:psi_alg}} \sum_{x \in \cset_{\mrm{in}} } \delta_x - \frac{n}{ n_{\mrm{out}} } \sum_{\mc{S}_{\textsc{Alg}\xspace}} \delta_x.
%
\end{talign}
By definition \cref{eq:psi_phi_reln}, we have the following useful equivalence:
\begin{talign}
\label{eq:phi_psi_equivalence}
\psi_{\textsc{Alg}\xspace}(f) \defeq n \cdot \phi_{\textsc{Alg}\xspace}(f) \in \mathcal{G} \left( \sigma_{\textsc{Alg}\xspace} \right)
\Longleftrightarrow
\phi_{\textsc{Alg}\xspace}(f) \in \mathcal{G} \left( \nu_{\textsc{Alg}\xspace} \right)
\qtext{ for }
\sigma_{\textsc{Alg}\xspace} \!=\! n \cdot \nu_{\textsc{Alg}\xspace}.
\end{talign}
%
%
%
%
%
%
%
%
\section{\textsc{Compress}\xspace}
\label{sec:compress}
\vspace{-3mm}
The core subroutine of \textsc{Compress++}\xspace is a new meta-procedure called \textsc{Compress}\xspace that,
given a halving algorithm \textsc{Halve}\xspace, an oversampling parameter\xspace \ensuremath{\mfk{g}}\xspace, and $n$ input points, outputs a thinned coreset of size $2^\ensuremath{\mfk{g}}\xspace \sqrt{n}$.
The \textsc{Compress}\xspace algorithm (\cref{algo:compress}) is very simple to implement: first, divide the input points into four subsequences of size $\frac n4$ (in any manner the user chooses); second, recursively call \textsc{Compress}\xspace on each subsequence to produce four coresets of size $ 2^{\ensuremath{\mfk{g}}\xspace-1} \sqrt{n} $; finally, call \textsc{Halve}\xspace on the concatenation of those coresets to produce the final output of size $2^\ensuremath{\mfk{g}}\xspace \sqrt{n}$.
As we show in \cref{sec:compress_streaming},
\textsc{Compress}\xspace can also be implemented in a streaming fashion to consume at most $\mc{O}(4^{\ensuremath{\mfk{g}}\xspace} \sqrt{n})$ memory. %
\section{Discussion and Conclusions}
\label{sec:conclusion}
\vspace{-3mm}
We introduced a new general meta-procedure, \textsc{Compress++}\xspace, for speeding up thinning algorithms while preserving their error guarantees up to a factor of $4$.
When combined with the quadratic-time \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace and kernel thinning algorithms of \citet{dwivedi2021kernel,dwivedi2022generalized}, the result is near-optimal distribution compression in near-linear time.
Moreover, the same simple approach can be combined with any slow thinning algorithm to obtain comparable summaries in a fraction of the time.
Two open questions recommend themselves for future investigation.
First, why does Herd-\textsc{Compress++}\xspace improve upon the original kernel herding algorithm in lower dimensions, and can this improvement be extended to higher dimensions and to other algorithms?
Second, is it possible to thin significantly faster than \textsc{Compress++}\xspace without significantly sacrificing approximation error?
Lower bounds tracing out the computational-statistical trade-offs in distribution compression would provide a precise benchmark for optimality and point to any remaining opportunities for improvement.
\section{Supplementary Details for Experiments}
\label{sec:additional_experiments}
In this section, we provide supplementary experiment details deferred from \cref{sec:experiments}, as well as some additional results.
In the legend of each MMD plot, we display an empirical rate of decay.
In all experiments involving kernel thinning, we set the algorithm failure probability parameter $\delta = \frac{1}{2}$ %
and compare $\textsc{KT}\xspace(\delta)$ to \textsc{Compress}\xspace and \textsc{Compress++}\xspace with $\textsc{Halve}\xspace$ and $\textsc{Thin}\xspace$ set as in \cref{ex:ktcompress,ex:ktcompresspp} respectively.
\subsection{Mixture of Gaussian target details and MMD plots}
\label{sec:mog_supplement}
For the target used for coreset visualization in \cref{fig:8mog}, the mean locations are on two concentric circles of radii $10$ and $20$, and are given by
\begin{talign}
\mu_j =\alpha_j \begin{bmatrix} \sin(j)\\\cos(j) \end{bmatrix}
\qtext{where $\alpha_j = 10 \cdot \mbf 1(j\leq 16) + 20 \cdot \mbf 1(j>16)$}
\qtext{for $j = 1, 2, \ldots, 32$.}
\end{talign}
Here we also provide additional results with mixture of Gaussian targets given by
$\P = \frac{1}{M}\sum_{j=1}^{M}\mc{N}(\mu_j, \mbf{I}_d)$ for $M \in \braces{4, 6, 8}$. The mean locations for these are given by
\begin{talign}
\label{eq:mog_description}
\mu_1 &= [-3, 3]^\top, \quad \mu_2 = [-3, 3]^\top, \quad \mu_3 = [-3, -3]^\top, \quad \mu_4 = [3, -3]^\top,\\
\mu_5 &= [0, 6]^\top, \qquad \mu_6 = [-6, 0]^\top, \quad \mu_7 = [6, 0]^\top, \qquad \mu_8 = [0, -6]^\top.
\end{talign}
\cref{fig:mog} plots the MMD errors of KT and herding experiments for the
mixture of Gaussians targets with $4 , 6$ and $8$ centers, and notice again that \textsc{Compress++}\xspace provides a competitive performance to the original algorithm, in fact suprisingly, improves upon herding.
\begin{figure}[ht!]
\centering
\begin{tabular}{c}
\includegraphics[width=\linewidth]{mog_kt_mmd_4_10.pdf}\\
\hline
\includegraphics[width=\linewidth]{mog_herd_mmd_4_9.pdf}
\end{tabular}
\caption{For $M$-component mixture of Gaussian targets, KT-\textsc{Compress++}\xspace and Herd-\textsc{Compress++}\xspace improve upon the MMD of \textrm{i.i.d.}\ sampling (ST) and closely track or improve upon the error of their quadratic-time input algorithms, KT and kernel herding (Herd).
See \cref{sec:mog_supplement} for more details.}
\label{fig:mog}
\end{figure}
%
%
%
%
%
\subsection{Details Of MCMC Targets}
Our set-up for the MCMC experiments is identical to that of \citet[Sec.~6]{dwivedi2021kernel}, except that we use all post-burn-in points to generate our Goodwin and Lotka-Volterra input point sequences $\cset_{\mrm{in}}$ instead of only the odd indices. In particular, we use the MCMC output of~\citet{DVN/MDKNWM_2020} described in~\citep[Sec. 4]{riabiz2020optimal} and perform thinning experiments after discarding the burn-in points. To generate an input $\cset_{\mrm{in}}$ of size $n$ for a thinning algorithm, we downsample the post-burn-in points using standard thinning. For Hinch, we additionally do coordinate-wise normalization by subtracting the sample mean and dividing by sample standard deviation of the post-burn-in-points.
In \cref{sec:experiments}, RW and ADA-RW respectively refer to Gaussian random walk and adaptive Gaussian random walk Metropolis algorithms \citep{haario1999adaptive} and MALA and pMALA respectively refer to the Metropolis-adjusted Langevin algorithm \citep{roberts1996exponential} and pre-conditioned MALA \citep{girolami2011riemann}. For Hinch experiments, RW 1 and RW 2 refer to two independent runs of Gaussian random walk, and ``Tempered'' denotes the runs targeting a tempered Hinch posterior.
For more details on the set-up, we refer the reader to \citet[Sec.~6.3, App.~J.2]{dwivedi2021kernel}.
\section{Experiments}
\label{sec:experiments}
We now turn to an empirical evaluation of the speed-ups and error of \textsc{Compress++}\xspace.
%
%
%
We begin by describing the thinning algorithms, compression tasks, evaluation metrics, and kernels used in our experiments.
Supplementary experimental details and results can be found in \cref{sec:additional_experiments}.
\iidmmdfig
\para{Thinning algorithms} Each experiment compares a high-accuracy, quadratic time thinning algorithm---either target kernel thinning~\citep{dwivedi2022generalized} or kernel herding~\citep{chen2012super}---with our near-linear time \textsc{Compress}\xspace and \textsc{Compress++}\xspace variants that use the same input algorithm to \textsc{Halve}\xspace and \textsc{Thin}\xspace. In each case, we perform root thinning, compressing $n$ input points down to $\sqrt n $ points, so that \textsc{Compress}\xspace is run with $\ensuremath{\mfk{g}}\xspace=0$.
For \textsc{Compress++}\xspace, we use $\ensuremath{\mfk{g}}\xspace = 4$ throughout to satisfy the small relative error criterion \cref{eq:ossymb-condition} in all experiments.
When halving we restrict each input algorithm to return distinct points and symmetrize the output as discussed in \cref{rem:sym}.
%
\para{Compressing \textrm{i.i.d.}\ summaries} To demonstrate the advantages of \textsc{Compress++}\xspace over equal-sized \textrm{i.i.d.}\ summaries we compress input point sequences $\cset_{\mrm{in}}$ drawn \textrm{i.i.d.}\ from either (a) Gaussian targets $\P = \mc N(0, \mbf{I}_d)$ with $d \in \braces{2, 4, 10, 100}$ or (b) $M$-component mixture of Gaussian targets $\P = \frac{1}{M}\sum_{j=1}^{M}\mc{N}(\mu_j, \mbf{I}_2)$ with $M \in \braces{4, 6, 8, 32}$ and component means $\mu_j\in\reals^2$ defined in \cref{sec:additional_experiments}.
\para{Compressing MCMC summaries}
To demonstrate the advantages of \textsc{Compress++}\xspace over standard MCMC thinning, we also compress input point sequences $\cset_{\mrm{in}}$
generated by a variety of popular MCMC algorithms (denoted by RW, ADA-RW, MALA, and pMALA)
targeting four challenging Bayesian posterior distributions $\P$.
%
%
%
%
In particular, we adopt the four posterior targets of \citet{riabiz2020optimal} based on the \emph{\citet{goodwin1965oscillatory} model} of oscillatory enzymatic control ($d=4$), the \emph{\citet{lotka1925elements,volterra1926variazioni} model} of oscillatory predator-prey evolution ($d=4$), the \emph{\citet{hinch2004simplified} model} of calcium signalling in cardiac cells ($d=38$), and a tempered Hinch model posterior ($d=38$).
%
%
%
%
%
%
%
%
%
%
Notably, for the Hinch experiments, each summary point discarded via an accurate thinning procedure saves 1000s of downstream CPU hours by avoiding an additional critically expensive whole-heart simulation \citep{riabiz2020optimal}.
See \cref{sec:additional_experiments} for MCMC algorithm and target details.
%
%
\para{Kernel settings} Throughout we use a Gaussian kernel $\mbf{k}(x, y) = \exp(-\frac1{2\sigma^2} \twonorm{x-y}^2)$ with $\sigma^2$ as specified by \citet[Sec. K.2]{dwivedi2021kernel} for the MCMC targets and $\sigma^2=2d$ otherwise.%
%
%
%
%
\mcmcmmdfig
\para{Evaluation metrics} For each thinning procedure we report mean runtime across 3 runs and mean MMD error across 10 independent runs $\pm$ 1 standard error (the error bars are often too small to be visible). All runtimes were measured on a single core of an Intel Xeon CPU. %
For the \textrm{i.i.d.}\ targets, we report $\mmd_{\mbf{k}}(\P, \P_{\mrm{out}})$ which can be exactly computed in closed-form. For the MCMC targets, we report the thinning error $\mmd_{\mbf{k}}(\P_{\mrm{in}}, \P_{\mrm{out}})$ analyzed directly by our theory (\cref{thm:compress_mmd,thm:compressppmmd}).
%
%
%
%
%
%
\para{Kernel thinning results}
We first apply \textsc{Compress++}\xspace to the near-optimal KT algorithm to obtain comparable summaries at a fraction of the cost.
\cref{fig:gauss,fig:mcmc} reveal that, in line with our guarantees, KT-\textsc{Compress++}\xspace matches or nearly matches the MMD error of KT in all experiments while also substantially reducing runtime.
For example, KT thins $65000$ points in $10$ dimensions in $20$m, while KT-\textsc{Compress++}\xspace needs only $1.5$m; KT takes more than a day to thin $250000$ points in $100$ dimensions, while KT-\textsc{Compress++}\xspace takes less than an hour (a 32$\times$ speed-up).
For reference we also display the error of standard thinning (ST) to highlight that KT-\textsc{Compress++}\xspace significantly improves approximation quality relative to the standard practice of \textrm{i.i.d.}\ summarization or standard MCMC thinning.
See \cref{fig:mog} in \cref{sec:mog_supplement} for analogous results with mixture of Gaussian targets.
%
%
%
%
%
%
%
%
%
%
\para{Kernel herding results}
A strength of \textsc{Compress++}\xspace is that it can be applied to any thinning algorithm, including those with suboptimal or unknown performance guarantees that often perform well in practical.
In such cases, \cref{rem:compresspp_error,rem:compress_mmd_inflation} still ensure that \textsc{Compress++}\xspace error is never much larger than that of the input algorithm.
As an illustration, we apply \textsc{Compress++}\xspace to the popular quadratic-time kernel herding algorithm (Herd).
\cref{fig:gauss} shows that Herd-\textsc{Compress++}\xspace matches or nearly matches the MMD error of Herd in all experiments while also substantially reducing runtime.
For example, Herd requires more than $11$ hours to compress $250000$ points in $100$ dimensions, while Herd-\textsc{Compress++}\xspace takes only $14$ minutes (a 45$\times$ speed-up).
Moreover, surprisingly, Herd-\textsc{Compress++}\xspace is consistently more accurate than the original kernel herding algorithm for lower dimensional problems.
See \cref{fig:mog} in \cref{sec:mog_supplement} for comparable results with mixture of Gaussian $\P$.
%
\mogfig
%
%
\para{Visualizing coresets}
For a 32-component mixture of Gaussians target, \cref{fig:8mog} visualizes the coresets produced by \textrm{i.i.d.}\ sampling, KT, kernel herding, and their \textsc{Compress++}\xspace variants. The \textsc{Compress++}\xspace coresets closely resemble those of their input algorithms and, compared with \textrm{i.i.d.}\ sampling, yield visibly improved stratification across the mixture components.
%
%
\section{Introduction}
Distribution compression---constructing a concise summary of a probability distribution---is at the heart of many learning and inference tasks. For example, in Monte Carlo integration and Bayesian inference, $n$ representative points are sampled \textrm{i.i.d.}\ or from a Markov chain to approximate expectations and quantify uncertainty under an intractable (posterior) distribution~\citep{robert1999monte}. However, these standard sampling strategies are not especially concise.
For instance, the Monte Carlo estimate $\P_{\mrm{in}} f \defeq \frac1n \sum_{i=1}^n f(x_i)$ of an unknown expectation $\P f \defeq \ensuremath{{\mathbb{E}}}_{X\sim \P}[f(X)]$ based on $n$ \textrm{i.i.d.}\ points has $\Theta(n^{-\frac{1}{2}})$ integration error $\abss{\Pf-\P_{\mrm{in}}f}$, requiring $10000$ points for $1\%$ relative error and $10^6$ points for $0.1\%$ error.
Such bloated sample representations preclude downstream applications with critically %
expensive function
evaluations like computational cardiology, where a 1000-CPU-hour tissue or organ simulation is required for each sample point~\citep{niederer2011simulating,augustin2016anatomically,strocchi2020simulating}. %
To restore the feasibility of such critically expensive tasks, it is common to thin down the initial point sequence to produce a much smaller coreset. The standard thinning approach, select every $t$-th point~\citep{owen2017statistically}, while being simple often
leads to an substantial increase in error: e.g., standard thinning $n$ points from a fast-mixing Markov chain yields
$\Omega(n^{-\frac{1}{4}})$ error when $n^{\frac{1}{2}}$ points are returned. Recently, \cite{dwivedi2021kernel} introduced a more effective alternative, \emph{kernel thinning} (KT), that provides near optimal $\wtil{\order}_d(n^{-\frac{1}{2}})$ error when compressing $n$ points in $\ensuremath{\mathbb{R}}^d$ down to size $n^{\frac{1}{2}}$. While practical for moderate sample sizes, the runtime of this algorithm scales quadratically with the input size $n$, making its execution prohibitive for very large $n$. Our goal is to significantly improve the runtime of such compression algorithms while providing comparable error guarantees.
\para{Problem setup}
Given a sequence $\cset_{\mrm{in}}$ of $n$ input points summarizing a target distribution $\P$, our aim is to identify a high quality coreset $\cset_{\mrm{out}}$ of size $\sqrt{n}$ in time nearly linear in $n$.
We measure coreset quality
via its integration error
$|\P f - \P_{\cset_{\mrm{out}}} f| \defeq |\P f - \frac{1}{|\cset_{\mrm{out}}|}\sum_{x\in\cset_{\mrm{out}}} f(x)|$
for functions $f$ in the reproducing kernel Hilbert space (RKHS) $\mc{H}_{\kernel}$ induced by a given kernel $\mbf{k}$~\citep{berlinet2011reproducing}.
We consider both single function error and kernel \emph{maximum mean discrepancy} \citep[MMD,][]{JMLR:v13:gretton12a}, the worst-case integration error over the unit RKHS norm ball: %
\begin{talign}
\mmd_{\mbf{k}}(\P, \P_{\cset_{\mrm{out}}})&\defeq \sup_{\knorm{f}\leq 1}\abss{\Pf-\P_{\cset_{\mrm{out}}}f}.
%
\label{eq:kernel_mmd_distance}
\end{talign}
\para{Our contributions}
We introduce a new simple meta procedure---\textsc{Compress++}\xspace---that significantly speeds up a generic thinning algorithm while simultaneously inheriting the error guarantees of its input up to a factor of $4$. A direct application of \textsc{Compress++}\xspace to KT improves its quadratic $\Theta(n^2)$ runtime to near linear $\mc{O}(n \log^3 n)$ time while preserving its error guarantees.
Since the $\wtil{\order}_d(n^{-\frac{1}{2}})$ KT MMD guarantees of \citet{dwivedi2021kernel} match the $\Omega(n^{-\frac{1}{2}})$ minimax lower bounds of \citet{tolstikhin2017minimax,phillips2020near} up to factors of $\sqrt{\log n}$ and constants depending on $d$, KT-\textsc{Compress++}\xspace also provides near-optimal MMD compression for a wide range of $\mbf{k}$ and $\P$.
Moreover, %
the practical gains from applying \textsc{Compress++}\xspace are substantial: KT thins $65,000$ points in 10 dimensions in $20$m, while KT-\textsc{Compress++}\xspace needs only 1.5m; KT takes more than a day to thin $250,000$ points in $100$ dimensions, while KT-\textsc{Compress++}\xspace takes less than 1hr (a 32$\times$ speed-up). For larger $n$, the speed-ups are even greater due to the order $\frac{n}{\log^3 n}$ reduction in runtime.
\textsc{Compress++}\xspace can also be directly combined with any thinning algorithm, even those that have suboptimal guarantees but often perform well in practice, like kernel herding~\citep{chen2012super}, MMD-critic~\citep{kim2016examples},
and Stein thinning~\citep{riabiz2020optimal}, all of which run in $\Omega(n^2)$ time.
As a demonstration, we combine \textsc{Compress++}\xspace with the popular kernel herding algorithm and observe 45$\times$ speed-ups when compressing $250,000$ input points.
In all of our experiments, \textsc{Compress++}\xspace leads to minimal loss in accuracy and, surprisingly, even improves upon herding accuracy for lower-dimensional problems.
Most related to our work are the merge-reduce algorithms of \citet{matousek1995approximations,chazelle1996linear,phillips2008algorithms} which also speed up input thinning algorithms while controlling approximation error.
In our setting, merge-reduce runs in time $\Omega(n^{1.5})$ given an $n^2$-time input and in time $\Omega(n^{(\tau+1)/2})$ for slower $n^{\tau}$-time inputs \citep[see, e.g.,][Thm.~3.1]{phillips2008algorithms}.
In contrast, \textsc{Compress++}\xspace runs in near-linear $\mc{O}(n \log^3 n)$ time for any $n^2$-time input and in $\mc{O}(n^{\tau/2}\log^\tau n)$ time for slower $n^\tau$-time inputs.
After providing formal definitions in \cref{sec:definitions}, we introduce and analyze \textsc{Compress++}\xspace and its primary subroutine \textsc{Compress}\xspace in \cref{sec:compress,sec:compresspp},
demonstrate the empirical benefits of \textsc{Compress++}\xspace in \cref{sec:experiments}, and present conclusions and opportunities for future work in~\cref{sec:conclusion}.
\subsubsection*{Reproducibility Statement}
See the \texttt{goodpoints} Python package for Python implementations of all methods in this paper and
\begin{center}
\url{https://github.com/microsoft/goodpoints}
\end{center}
for code reproducing each experiment.
\subsubsection*{Acknowledgments}
We thank Carles Domingo-Enrich for alerting us that an outdated proof of \cref{thm:compress_mmd} was previously included in the appendix.
RD acknowledges the support by the National Science Foundation under Grant No. DMS-2023528 for the Foundations of Data Science Institute (FODSI). Part of this work was done when AS was interning at Microsoft Research New England.
\bibliographystyle{iclr2022_conference}
{\small
\section{Thinning and Halving Algorithms}
\label{sec:definitions}
\vspace{-3mm}
We begin by defining the thinning and halving algorithms that our meta-procedures take as input.
\begin{definition}[\tbf{Thinning and halving algorithms}]
\label{def:thinning_algo}
A \emph{thinning algorithm} \textsc{Alg}\xspace
takes as input a point sequence $\cset_{\mrm{in}}$ of length $n$ and returns a (possibly random) point sequence $\cset_{\alg}$ of length $n_{\mrm{out}}$.
We say \textsc{Alg}\xspace is
%
\emph{$\alpha_n$-thinning} if $n_{\mrm{out}} = \floor{n/\alpha_n}$ and \emph{root-thinning} if $\alpha_n = \sqrt n$.
Moreover, we call \textsc{Alg}\xspace a \emph{halving algorithm} if $\cset_{\alg}$ always contains exactly $\floor{\frac{n}{2}}$ of the input points.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\end{definition}
Many thinning algorithms offer high-probability bounds on the integration error $|\P_{\cset_{\mrm{in}}} f - \P_{\mc{S}_{\textsc{Alg}\xspace}} f|$.
We capture such bounds abstractly using the following definition of a sub-Gaussian thinning %
\begin{definition}[\tbf{Sub-Gaussian thinning algorithm}]\label{def:subgamma_algo}
For a function $f$, we call a thinning algorithm $\textsc{Alg}\xspace$ \emph{$f$-sub-Gaussian} with parameter $\nu$ and write $\textsc{Alg}\xspace \in \mathcal{G}^{f} (\nu)$ if
\begin{talign}
\mathbb{E}[\exp(\lambda(\P_{\cset_{\mrm{in}}} f - \P_{\mc{S}_{\textsc{Alg}\xspace}} f))\mid \cset_{\mrm{in}}]
\leq
\exp\parenth{\frac{\lambda^2\nu^2(n)}{2}}
\qtext{for all}
\lambda \in \ensuremath{\mathbb{R}}.
\end{talign}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\end{definition}
\vspace{-3mm}
\cref{def:subgamma_algo} is equivalent to a sub-Gaussian tail bound for the integration error, and, by \citet[Section 2.3]{boucheron2013concentration},
if $\textsc{Alg}\xspace \in \mathcal{G}^{f} (\nu)$ then
$\mathbb{E}[\P_{\mc{S}_{\textsc{Alg}\xspace}} f\mid \cset_{\mrm{in}}] = \P_{\cset_{\mrm{in}}} f$
and, for all $\delta \in (0,1)$,
\begin{talign}
\label{eq:tail_bounds}
|\P_{\cset_{\mrm{in}}} f \!-\! \P_{\mc{S}_{\textsc{Alg}\xspace}} f| \leq \nu(n) \sqrt{2\log (\frac2\delta) },
\stext{with probability at least}
1-\delta
\stext{given} \cset_{\mrm{in}}.
%
%
%
%
\end{talign}
Hence the integration error of $\textsc{Alg}\xspace$ is dominated by the sub-Gaussian parameter $\nu(n)$.
\begin{example}[\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace]%
\label{ex:kh_subgamma}
\normalfont
Given a kernel $\mbf{k}$ and $n$ input points $\cset_{\mrm{in}}$, the $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\delta)$ algorithm\footnote{\label{footnote:ktdelta}The $\delta$ argument of $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\delta)$ or $\textsc{KT}\xspace(\delta)$ indicates that each parameter $\delta_i = \frac{\delta}{\l}$ in \citet[Alg.~1a]{dwivedi2022generalized}, where $\l$ is the size of the input point sequence compressed by $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\delta)$ or $\textsc{KT}\xspace(\delta)$.} of \citet[Alg.~1a]{dwivedi2022generalized,dwivedi2021kernel} takes $\Theta(n^2)$ kernel evaluations to output a coreset of size $n_{\mrm{out}}$ with better-than-\textrm{i.i.d.}\ integration error.
Specifically, \citet[Thm.~1]{dwivedi2022generalized} prove that, on an event with probability $1-\frac{\delta}{2}$, $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\delta) \in \mathcal{G}^{f} ( \nu)$ with
\begin{talign}
%
\nu(n) =
\frac{2}{n_{\mrm{out}}\sqrt{3}} \sqrt{ \log( \frac{6n_{\mrm{out}}\log_2(n/n_{\mrm{out}})}{\delta} ) \infnorm{\mbf{k}}}
\label{eq:ktsplit_subgauss}
%
\end{talign}
for all $f$ with $\knorm{f}=1$.
\hfill\small{\ensuremath{\blacksquare}}
\end{example}
Many algorithms also offer high-probability bounds on the kernel MMD \cref{eq:kernel_mmd_distance}, the worst-case integration error across the unit ball of the RKHS.
We again capture these bounds abstractly using the following definition of a $\mbf{k}$-sub-Gaussian thinning algorithm.
\newcommand{\mc{G}_{\kernel}}{\mc{G}_{\mbf{k}}}
\newcommand{a}{a}
\newcommand{v}{v}
\begin{definition}[\tbf{$\mbf{k}$-sub-Gaussian thinning algorithm}]\label{def:mmd_subgamma_algo}
For a kernel $\mbf{k}$, we call a thinning algorithm $\textsc{Alg}\xspace$ \emph{$\mbf{k}$-sub-Gaussian}
with parameter $v$ and shift $a$ and write $\textsc{Alg}\xspace\in\mc{G}_{\kernel}(v, a)$ if %
\begin{talign}
\label{eq:mmd_thin}
\P[\mmd_{\mbf{k}} \left( \cset_{\mrm{in}} , \mc{S}_{\textsc{Alg}\xspace} \right) \geq a_{n} + v_{n} \sqrt{t} \,\big\vert\, \cset_{\mrm{in}}] \leq e^{-t}
\qtext{for all} t \geq 0.
\end{talign}
%
%
%
%
%
%
%
%
We also call $\vareps_{\mbf{k},\textsc{Alg}\xspace}(n)\defeq\max(v_{n}, a_{n})$ the \emph{$\mbf{k}$-sub-Gaussian error} of \textsc{Alg}\xspace.
\end{definition}
\begin{example}[Kernel thinning]%
\label{ex:kt_subgamma}
\normalfont
Given a kernel $\mbf{k}$ and $n$ input points $\cset_{\mrm{in}}$, the generalized kernel thinning ($\textsc{KT}\xspace(\delta)$) algorithm\Cref{footnote:ktdelta} of \citet[Alg.~1]{dwivedi2022generalized,dwivedi2021kernel} takes $\Theta(n^2)$ kernel evaluations to output a coreset of size $n_{\mrm{out}}$ with near-optimal MMD error. In particular, by leveraging an appropriate auxiliary kernel $\kernel_{\mrm{split}}$, \citet[Thms.~2-4]{dwivedi2022generalized} establish that, on an event with probability $1-\frac{\delta}{2}$, $\textsc{KT}\xspace(\delta) \in \mc{G}_{\kernel}(a, v)$ with
\begin{talign}
\label{eq:kt_params}
a_n = \frac{C_{a}}{n_{\mrm{out}}}\sqrt{\staticinfnorm{\kernel_{\mrm{split}}}},
\qtext{and}
v_n = \frac{C_{v}}{n_{\mrm{out}}}\sqrt{\staticinfnorm{\kernel_{\mrm{split}}}\log(\frac{6n_{\mrm{out}}\log_2(n/n_{\mrm{out}})}{\delta})}\ \mathfrak{M}_{\cset_{\mrm{in}},\kernel_{\mrm{split}}},
\end{talign}
where $\sinfnorm{\kernel_{\mrm{split}}} = \sup_{x} \kernel_{\mrm{split}}(x,x)$, $C_{a}$ and $C_{v}$ are explicit constants, and $\mathfrak{M}_{\cset_{\mrm{in}},\kernel_{\mrm{split}}} \geq 1$ is non-decreasing in $n$ and varies based on the tails of $\kernel_{\mrm{split}}$ and the radius of the ball containing $\cset_{\mrm{in}}$.~\hfill\small{\ensuremath{\blacksquare}}
\end{example}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{Proof of \cref{thm:compress_mmd}: MMD guarantees for \textsc{Compress}\xspace} \label{sec:compress_mmd}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
Our proof proceeds in several steps. To control the MMD \cref{eq:kernel_mmd_distance}, we will control the Hilbert norm of the measure discrepancy of \textsc{Compress}\xspace \cref{eq:psi_phi_reln}, which we first write as a weighted sum of measure discrepancies from different (conditionally independent) runs of \textsc{Halve}\xspace. To effectively leverage the MMD tail bound assumption for this weighted sum, we reduce the problem to establishing a concentration inequality for the operator norm of an associated matrix. We carry out this plan in four steps summarized below.
First, in \cref{sub:reduce_mmd_vector_norm} we express the MMD associated with each \textsc{Halve}\xspace measure discrepancy as the Euclidean norm of a suitable vector (\cref{lem:mmd_to_vec}). Second, in \cref{sub:vector_to_matrix} we define a matrix dilation operator for a vector that allows us to control vector norms using matrix spectral norms (\cref{lem:dilation_results}). Third, in \cref{sub:matrix_freedman} we prove and apply a sub-Gaussian matrix Freedman concentration inequality (\cref{lem:matrix_exp_bernstein}) to control the MMD error for the \textsc{Compress}\xspace output, which in turn requires us to establish moment bounds for these matrices by leveraging tail bounds for the MMD error (\cref{lem:tailboundstomoments}). Finally, we put together the pieces in \cref{sub:thm2_put_together} to complete the proof.
We now begin our formal argument.
We will make use of the unrolled representation \cref{eq:psi_compress} for the \textsc{Compress}\xspace measure discrepancy $\psi_{\textsc{C}}(\cset_{\mrm{in}})$ in terms of the \textsc{Halve}\xspace inputs $(\mc{S}^{{\mrm{in}}}_{ k , j})_{j\in[4^k]}$ of size $n_k = 2^{\ensuremath{\mfk{g}}\xspace+1-k}\sqrt{n}$ for $0\leq k\leq \log_4 n \!-\!\ensuremath{\mfk{g}}\xspace\!-\!1$.
For brevity, we will use the shorthand $\psi_{\textsc{C}} \defeq \psi_{\textsc{C}}( \cset_{\mrm{in}})$, $\spsi_{k,j} \defeq \psi_{\textsc{H}} ( \mc{S}^{{\mrm{in}}}_{ k , j})$, and $\psi_{\thintag} \defeq \psi_{\thintag}(\mc{S}_{\textsc{C}})$ hereafter.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{Reducing MMD to vector Euclidean norm}
\label{sub:reduce_mmd_vector_norm}
%
%
Number the elements of $\cset_{\mrm{in}}$ as $(x_1,\dots, x_n)$, define the $n \times n$ kernel matrix $\mbf{K} \defeq (\mbf{k}(x_i, x_j))_{i,j=1}^{n}$, and let $\mbf{K}^{\frac{1}{2}}$ denote a matrix square-root such that $\mbf{K} = \mbf{K}^{\frac{1}{2}} \cdot \mbf{K}^{\frac{1}{2}}$ (which exists since $\mbf{K}$ is a positive semidefinite matrix for any kernel $\mbf{k}$).
Next, let $\mc{S}^{\mrm{out}}_{k, j}$ denote the output sequence corresponding to $\spsi_{k,j}$ (i.e., running \textsc{Halve}\xspace on $\mc{S}^{{\mrm{in}}}_{ k , j}$), and let $\braces{e_i}_{i=1}^n$ denote the canonical basis of $\ensuremath{\mathbb{R}}^n$. The next lemma (with proof in \cref{sub:proof_of_lem_mmd_to_vec}) relates the Hilbert norms to Euclidean norms of carefully constructed vectors.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\newcommand{MMD as a vector norm}{MMD as a vector norm}
\begin{lemma}[\tbf{MMD as a vector norm}]
\label{lem:mmd_to_vec}
Define the vectors
\begin{talign}
\label{eq:def_u_and_v}
u_{k, j} \!\defeq\! \mbf{K}^{\frac{1}{2}} \sum_{i=1}^n e_i \parenth{\mbf 1(x_i\! \in \!\mc{S}^{{\mrm{in}}}_{ k , j}) \!- \! 2\!\cdot\! \mbf 1(x_i\! \in\! \mc{S}^{\mrm{out}}_{k, j})}, \stext{and}
u_{\textsc{C}} \!\defeq\! \sum_{k=0}^{\log_4 n \!-\!\ensuremath{\mfk{g}}\xspace\!-\!1} \sum_{j=1}^{4^{k}} w_{k, j} u_{k, j},
\end{talign}
where $w_{k, j} \!\defeq\!\frac{\sqrt{n}}{2^{\ensuremath{\mfk{g}}\xspace+1+k}}$.
Then, we have
\begin{talign}
\label{eq:knorm_twonorm}
&
%
n^2 \cdot \mmd_\mbf{k}^2\left( \cset_{\mrm{in}} , \mc{S}_{ \textsc{C}} \right) = \twonorm{u_{\textsc{C}}}^2,
%
%
%
\qtext{and}\\
&\mathbb{E}[u_{k,j} \vert (u_{k', j'} : j' \in [4^{k'}], k' > k)] = 0
\qtext{for} k = 0, \ldots, \log_4 n \!-\!\ensuremath{\mfk{g}}\xspace\!-\!2,
%
%
\label{eq:zero_mean_vector}
\end{talign}
and $ u_{k,j} $ for $ j \in [4^{k}] $ are conditionally independent given $(u_{k', j'} : j' \in [4^{k'}], k' > k)$.
\end{lemma}
%
%
%
%
%
Applying \cref{eq:knorm_twonorm}, we effectively reduce the task of controlling the MMD errors to controlling the Euclidean norm of suitably defined vectors.
Next, we reduce the problem to controlling the spectral norm of a suitable matrix.
%
\subsection{Reducing vector Euclidean norm to matrix spectral norm}
\label{sub:vector_to_matrix}
To this end, we define a symmetric dilation matrix operator: given a vector $u \in \ensuremath{\mathbb{R}}^n$, define the matrix $\mbf{M}_{u}$ as
\begin{talign}
\mbf{M}_{u}
&\!\defeq \!
\begin{pmatrix}
0 & u^{\top} \\
u & \mbf{0}_{n \times n}
\end{pmatrix} \in \reals^{ (n+1) \times (n+1)}.
\label{eq:def_M}
\end{talign}
It is straightforward to see that $u\mapsto\mbf{M}_{u}$ is a linear map. In addition, the matrix $\mbf{M}_{u}$ also satisfies a few important properties (established in \cref{sub:proof_dilation_results}) that we use in our proofs.
\newcommand{Properties of the dilation operator}{Properties of the dilation operator}
\begin{lemma}[\tbf{Properties of the dilation operator}]
\label{lem:dilation_results}
For any $u \in \ensuremath{\mathbb{R}}^n$, the matrix $\mbf{M}_{u}$~\cref{eq:def_M} satisfies
\begin{talign}
\opnorm{\mbf{M}_{u}} \seq{(a)} \twonorm{u} \seq{(b)} \lambda_{\max}(\mbf{M}_{u}), \qtext{and}
\mbf{M}_u^q \stackrel{(c)}{\preceq} \twonorm{u}^{q} \mat{I}_{n+1} \stext{ for all } q \in \natural.
\label{eq:power_of_M}
\end{talign}
\end{lemma}
Define the shorthand $\mbf{M}_{k, j} \defeq \mbf{M}_{w_{k, j} u_{k, j}}$ (defined in \cref{lem:mmd_to_vec}).
Applying \cref{lem:mmd_to_vec,lem:dilation_results}, we find that
\begin{talign}
n \mmd_\mbf{k}\left( \cset_{\mrm{in}} , \mc{S}_{ \textsc{C}} \right)
%
\!\seq{\cref{eq:knorm_twonorm}}\! \twonorm{u_{\textsc{C}}}
&\!\seq{\cref{eq:power_of_M}}\! \lambda_{\max}(\mbf{M}_{u_{\textsc{C}}})
\!\seq{(i)}\! \lambda_{\max}({\sum_{k=0}^{\log_4 n \!-\!\ensuremath{\mfk{g}}\xspace\!-\!1} \sum_{j=1}^{4^{k}} \mbf{M}_{k, j}}),
\label{eq:mcp_op}
\end{talign}
where step~(i) follows from the linearity of the dilation operator. Thus to control the MMD error, it suffices to control the maximum eigenvalue of the sum of matrices appearing in \cref{eq:mcp_op}.
\subsection{Controlling the spectral norm via a sub-Gaussian matrix Freedman inequality}
\label{sub:matrix_freedman}
To control the maximum eigenvalue of the matrix $\mbf{M}_{u_{\textsc{C}}}$, we make use of \cref{eq:mcp_op} and the following sub-Gaussian generalization of the matrix Freedman inequality of \citet[Thm. 7.1]{user_friendly_matrix}.
The proof of \cref{lem:matrix_exp_bernstein} can be found in \cref{sec:additional_lemmas}. For two matrices $A$ and $B$ of the same size, we write $A \preceq B$ if $B-A$ is positive semidefinite.
\newcommand{Sub-Gaussian matrix Freedman inequality}{Sub-Gaussian matrix Freedman inequality}
\begin{lemma}[Sub-Gaussian matrix Freedman inequality]\label{lem:matrix_exp_bernstein}
Consider a sequence $\parenth{\mat{Y}_{i}}_{i=1}^{N}$ of self-adjoint random matrices in $\ensuremath{\mathbb{R}}^{m\times m} $ and a fixed sequence of scalars $\parenth{R_i}_{i=1}^{N}$ satisfying
%
%
\begin{talign}
\label{eq:matrix_bernstein_conditions}
\mathbb{E} \left[ \mat{Y}_i \vert \left( \mat{Y}_j \right)_{j =1}^{i-1} \right] \seq{(A)} 0
\ \stext{and}\ \mathbb{E} \left[ \mat{Y}^{q}_i \vert \left( \mat{Y}_j \right)_{j =1}^{i-1} \right] \stackrel{(B)}{\preceq} (\frac{q}{2})! R_i^{q} \mbf I,
\text{ for all } i \in [N] \text{ and } q \in 2\natural.
\end{talign}
Define the variance parameter $ \sigma^2 \defeq \sum_{i=1}^{N} R_i^2$.
Then,
\begin{talign}
\P [ \lambda_{\max} ( \sum_{i=1}^{N} \mat{Y}_i ) \geq \sigma\sqrt{8(t+\log m)}]
\leq e^{-t}
\qtext{for all} t > 0,
\end{talign}
and equivalently
\begin{talign}
\P [ \lambda_{\max} ( \sum_{i=1}^{N} \mat{Y}_i ) \leq \sigma\sqrt{8\log(m/\delta)}]
\geq 1-\delta
\qtext{for all} \delta \in (0,1].
\end{talign}
\end{lemma}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
To apply \cref{lem:matrix_exp_bernstein} with the matrices ${\mbf{M}_{k, j}}$, we need to establish the zero-mean and moment bound conditions for suitable $R_{k, j}$ in \cref{eq:matrix_bernstein_conditions}.
\subsubsection{Verifying the zero mean condition~\cref{eq:matrix_bernstein_conditions}(A) for ${\mbf{M}_{k, j}}$}
To this end, first we note that the conditional independence and zero-mean property of ${\spsi_{k, j}}$ implies that the random vectors $u_{k, j}$ and the matrices $\mbf{M}_{k, j}$ also satisfy a similar property, and in particular that
\begin{talign}
\mathbb{E}\brackets{\mbf{M}_{k, j} \mid \parenth{\mbf{M}_{k', j'}: k' > k, j' \in [4^{k'}]}} = \mat{0}
\qtext{for} j \in [4^k], k \in \braces{0, 1, \ldots, \log_4 n-\ensuremath{\mfk{g}}\xspace -1}.
\label{eq:zero_mean_matrix}
\end{talign}
\subsubsection{Establishing moment bound conditions~\cref{eq:matrix_bernstein_conditions}(B) for ${\mbf{M}_{k, j}}$ in terms of ${R_{k, j}}$ via MMD tail bounds for \textsc{Halve}\xspace}
To establish the moment bounds on $\mbf{M}_{k, j}$, note that \cref{lem:mmd_to_vec,lem:dilation_results} imply that
\begin{talign}
\label{eq:mjk_bound}
\mbf{M}_{k, j}^{q} = \mbf{M}_{w_{k, j} u_{k, j}}^{q} \stackrel{\cref{eq:power_of_M}}{\preceq} \twonorm{w_{k, j} u_{k, j}}^q \cdot \mat{I}_{n+1} \seq{\cref{eq:knorm_twonorm}} w_{k, j}^q \norm{u_{k, j}}_2^{q} \cdot \mat{I}_{n+1}
%
\end{talign}
where $w_{k, j}$ was defined in \cref{lem:mmd_to_vec}. Thus it suffices to establish the moment bounds on $\twonorm{u_{k, j}}^{q}$. To this end, we first state a lemma that converts tail bounds to moment bounds.
See \cref{sub:proof_of_tail_moments} for the proof inspired by \citet[Thm.~2.3]{boucheron2013concentration}.
\newcommand{Tail bounds imply moment bounds}{Tail bounds imply moment bounds}
\begin{lemma}[Tail bounds imply moment bounds] \label{lem:tailboundstomoments}
For a non-negative random variable $ Z $,
%
\begin{talign}
\P[ Z \!>\! a \!+\! v \sqrt{ t} ] \!\leq\! e^{-t }
\ \stext{for all} t\geq0
\ \Longrightarrow \
\mathbb{E} [Z^{q}]
\leq (2a\!+\!2v)^q (\frac{q}{2})!
%
\ \stext{for all} q \in 2\natural.
\end{talign}
\end{lemma}%
To obtain a moment bound for $\twonorm{u_{k, j} }$, we first state some notation.
For each $n$, define the quantities
\begin{talign}
\label{eq:new_params}
a'_{n} \defeq n a_{n}, \quad
{v'_{n}} \defeq n v_{n}
%
%
\end{talign}
where $a_n$ and $v_n$ are the parameters such that $\textsc{Halve}\xspace \in \mc{G}_{\kernel}(a_n, v_n) $ on inputs of size $n$.
%
%
%
Using an argument similar to \cref{lem:mmd_to_vec}, we have
%
\begin{talign}
\label{eq:psi_k_j_norm}
\norm{ u_{k,j} }_2 = n_{k, j} \mmd_\mbf{k}(\mc{S}^{{\mrm{in}}}_{ k , j}, \mc{S}^{\mrm{out}}_{k, j})
\qtext{for}n_{k, j} = \sabss{\mc{S}^{{\mrm{in}}}_{ k, j}} = \sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1-k}.
%
%
\end{talign}
Thereby, using the $\mc{G}_{\kernel}$ assumption on \textsc{Halve}\xspace implies that
\begin{talign}
\label{eq:psi_halve_bound}
\P [ \norm{u_{k,j} }_2 \geq a'_{\l_{k}'} + v{'}_{\l_{k}'} \sqrt{ t} \mid (u_{k',j'}: j' \in [4^{k'}], k' >k) ] \leq e^{-t} \stext{for all} t\geq 0,
%
%
%
\end{talign}
where
\begin{talign}
\label{eq:lkprime}
\l_{k}' \defeq n_{k, j} = \sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1-k}
\end{talign}
and, notably, $\l_n = \l_0'$.
Combining the bound~\cref{eq:psi_halve_bound} with \cref{lem:tailboundstomoments}
yields that
\begin{talign}
\mathbb{E} [\norm{u_{k,j} }_2^{q} \mid (u_{k',j'}: j' \in [4^{k'}], k' >k)]
\leq (\frac{q}{2})! (2a'_{\l_{k}'}+2v'_{\l_{k}'})^q,
\label{eq:psi_norm_bound_exp}
\end{talign}
for all $q \in 2\natural$,
where $\l_k$ is defined in \cref{eq:psi_halve_bound}. Now, putting together \cref{eq:psi_norm_bound_exp,eq:mjk_bound}, and using the conditional independence of $\mbf{M}_{k, j}$, we obtain the following control on the $q$-th moments of $\mbf{M}_{k, j}$ for $q \in 2\mbb N$:
\begin{talign}
\mathbb{E}\brackets{\mbf{M}_{k, j}^q \big \vert \parenth{\mbf{M}_{k', j'}, k' > k, j' \in [4^{k'}]}}
&\stackrel{\cref{eq:mjk_bound}}{\preceq}
w_{k, j}^q \!\cdot\! \mathbb{E}\brackets{\norm{u_{k,j} }_2^{q} \big\vert \braces{u_{k', j'}, k' > k, j' \in [4^{k'}]}}\! \cdot \! \mat{I}_{n+1}
\\
&\stackrel{\cref{eq:psi_norm_bound_exp}}{\preceq}
w_{k, j}^q \!\cdot\! \parenth{
(2a'_{\l_{k}'}+2v'_{\l_{k}'})^q (\frac{q}{2})!} \!\cdot \! \mat{I}_{n\!+\!1}\\
&=
(\frac{q}{2})! R_{k, j}^{q} \mat{I}_{n\!+\!1}
\stext{where}
R_{k, j} \defeq
2w_{k,j} (a'_{\l_{k}'}+v'_{\l_{k}'})
\label{eq:Rk_Ak}
\end{talign}
where $\l_k$ is defined in \cref{eq:lkprime}. In summary, the computation above establishes the condition (B) from the display~\cref{eq:matrix_bernstein_conditions} for the matrices ${\mbf{M}_{k, j}}$ in terms of the sequence ${R_{k, j}}$ defined in \cref{eq:Rk_Ak}.
\subsection{Putting the pieces together for proving \cref{thm:compress_mmd}}
\label{sub:thm2_put_together}
Define
\begin{talign}
\label{eq:sigma_r_mc}
%
%
\wtil{\sigma}
\defeq
\sqrt{ \log_4 n - \ensuremath{\mfk{g}}\xspace} \cdot
2 (a_{\sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1}}+v_{\sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1}})
%
\end{talign}
Now, putting \cref{eq:zero_mean_matrix,eq:Rk_Ak} together, we conclude that with a suitable ordering of the indices $(k, j)$, the assumptions of \cref{lem:matrix_exp_bernstein} are satisfied by the random matrices $\parenth{\mbf{M}_{k, j}, j \in [4^k], k \in \braces{0, 1, \ldots, \log_4 n - \ensuremath{\mfk{g}}\xspace-1}}$ with the sequence $\parenth{R_{k, j}}$.
%
%
%
%
%
%
Now, since $\l'_k = \sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1-k}$~\cref{eq:psi_halve_bound} is decreasing in $k$, $w_{k,j}= \frac{\l'_k}{ 4^{\ensuremath{\mfk{g}}\xspace+1}}$ (as defined in \cref{lem:mmd_to_vec}), and $a_n'$ and $ v_n'$~\cref{eq:new_params} are assumed non-decreasing in $n$, we find that
%
\begin{talign}
n^2 \cdot \wtil{\sigma}^2
%
%
%
&\seq{\cref{eq:sigma_r_mc}}
n^2 ( \log_4 n - \ensuremath{\mfk{g}}\xspace)( 2 (a_{\sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1}}+v_{\sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace+1}}) )^2 \\
&\seq{\cref{eq:psi_halve_bound}}
\left( \log_4 n - \ensuremath{\mfk{g}}\xspace \right)\frac{n}{4^{\ensuremath{\mfk{g}}\xspace+1}} ( 2 (a'_{\l_{0}'}+v'_{\l_{0}'}) )^2 \\
& \geq
\sum_{k = 0}^{\log_4 n -\ensuremath{\mfk{g}}\xspace - 1} \frac{n}{4^{\ensuremath{\mfk{g}}\xspace+1}}( 2 (a'_{\l_{k}'}+v'_{\l_{k}'}) )^2 \\
&=
\sum_{k = 0}^{\log_4 n -\ensuremath{\mfk{g}}\xspace - 1} \sum_{j =1 }^{4^k} \frac{n}{4^{\ensuremath{\mfk{g}}\xspace+1+k}}( 2 (a'_{\l_{k}'}+v'_{\l_{k}'}) )^2 \\
&=
\sum_{k = 0}^{\log_4 n -\ensuremath{\mfk{g}}\xspace - 1} \sum_{j =1 }^{4^k} ( 2w_{k,j} (a'_{\l_{k}'}+v'_{\l_{k}'}) )^2 \\
&\seq{\cref{eq:Rk_Ak}}
\sum_{k = 0}^{\log_4 n -\ensuremath{\mfk{g}}\xspace - 1} \sum_{j =1 }^{4^k} R_{k, j}^2.
%
%
\end{talign}
Finally, applying \cref{eq:mcp_op} and invoking \cref{lem:matrix_exp_bernstein} with $\sigma \gets n\wtil{\sigma}$ and $m \gets n+1$, we conclude that
\begin{talign}
&\P[\mmd\left( \cset_{\mrm{in}} , \mc{S}_{ \textsc{C}} \right) \geq \wtil{\sigma}\sqrt{8(\log(n+1)+t)}] \\
&\seq{\cref{eq:mcp_op}} \P[\lambda_{\max} (\sum_{k=0}^{\log_4 n \!-\!\ensuremath{\mfk{g}}\xspace\!-\!1} \sum_{j=1}^{4^{k}} \mbf{M}_{k, j}) \geq n\wtil{\sigma}\sqrt{8(\log(n+1)+t)}] \\
&\leq e^{-t}
\qtext{for all} t >0,
\end{talign}
which in turn implies
\begin{talign}
\P[\mmd\left( \cset_{\mrm{in}} , \mc{S}_{ \textsc{C}} \right) \geq \wtil{a}_n + \wtil{v}_n \sqrt{t}]
\leq e^{-t}
\stext{for} t\geq 0,
\end{talign}
since the parameters $ \wtil{v}_n, \wtil{a}_n$~\cref{eq:cp_params} satisfy
\begin{talign}
\wtil{v}_n \seq{\cref{eq:cp_params}} 4(a_{\l_n} \!+\! v_{\l_n})\sqrt{2(\log_4 n\!-\!\ensuremath{\mfk{g}}\xspace)} \seq{\cref{eq:sigma_r_mc}} \wtil{\sigma}\sqrt{8},
\qtext{and}
\wtil{a}_n \seq{\cref{eq:cp_params}} \wtil{v}_n \sqrt{\log(n\!+\!1)} = \wtil{\sigma}\sqrt{8\log(n+1)}.
\end{talign}
Comparing with \cref{def:mmd_subgamma_algo}, \cref{thm:compress_mmd} follows.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{Streaming Version of \textsc{Compress}\xspace }
\label{sec:compress_streaming}
\textsc{Compress}\xspace can be efficiently implemented in a streaming fashion (\cref{algo:compress_streaming}) by viewing the recursive steps in \cref{algo:compress} as different levels of processing, with the bottom level denoting the input points and the top level denoting the output points.
The streaming variant of the algorithm efficiently maintains memory at several levels and processes inputs in batches of size $4^{\ensuremath{\mfk{g}}\xspace+1}$.
At any level $i$ (with $i=0$ denoting the level of the input points), whenever there are $2^i 4^{\ensuremath{\mfk{g}}\xspace+1}$ points, the algorithm runs \textsc{Halve}\xspace on the points in this level, appends the output of size $2^{i-1}4^{\ensuremath{\mfk{g}}\xspace+1}$ to the points at level $i+1$, and empties the memory at level $i$ (and thereby level $i$ never stores more than $2^i 4^{\ensuremath{\mfk{g}}\xspace+1}$ points). In this fashion, just after processing $n=4^{k+\ensuremath{\mfk{g}}\xspace+1}$ points, the highest level is $k+1$, which contains a compressed coreset of size $2^{k-1} 4^{\ensuremath{\mfk{g}}\xspace+1} = 2^{k+\ensuremath{\mfk{g}}\xspace+1} 2^{\ensuremath{\mfk{g}}\xspace} = \sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace}$ (outputted by running \textsc{Halve}\xspace at level $k$ for the first time), which is the desired size for the output of \textsc{Compress}\xspace.
%
\begin{algorithm2e}[ht!]
\caption{\small\textsc{Compress}\xspace\ (Streaming) -- Outputs stream of coresets of size $2^\ensuremath{\mfk{g}}\xspace\sqrt{n}$ for $n = 4^{k+\ensuremath{\mfk{g}}\xspace+1}$ and $k\in\natural$}
\label{algo:compress_streaming}
\small{
\BlankLine
\KwIn{
%
halving algorithm \textsc{Halve}\xspace, oversampling parameter\xspace~$\ensuremath{\mfk{g}}\xspace$, stream of input points $x_1, x_2, \ldots$}
%
%
\BlankLine
$\mc{S}_0 \gets \braces{}$ \qquad\qquad\qquad\qquad\qquad\qquad// Initialize empty level $0$ coreset \\
\For{$t=1, 2, \ldots, $ }{
$\mc{S}_0 \gets \mc{S}_0 \cup
(x_j)_{j=1+(t-1)\cdot 4^{\ensuremath{\mfk{g}}\xspace+1}}^{t\cdot {4^{\ensuremath{\mfk{g}}\xspace+1}}}$ \quad// Process input in batches of size $ 4^{\ensuremath{\mfk{g}}\xspace+1}$ \\
\If{$t == 4^j$ \stext{\textup{for}} $j\in\natural$}{
$\mc{S}_{j+1} \gets \braces{}$ \qquad\qquad\qquad \quad\ \ // Initialize level $j+1$ coreset after processing $4^{j+\ensuremath{\mfk{g}}\xspace+1}$ input points \\
}
%
%
\For{ $i=0, \ldots, \lceil \log_4 t \rceil +1 $ }{
\If{ $ \abs{\mc{S}_i} == 2^{i}4^{\ensuremath{\mfk{g}}\xspace+1} $ }{
$\mc{S} \gets \textsc{Halve}\xspace({\mc{S}_i} ) $ \quad\quad\ \ \ \ \ // Halve level $i$ coreset to size $2^{i-1} 4^{\ensuremath{\mfk{g}}\xspace+1} $\\
%
%
%
%
%
%
$\mc{S}_{i+1} \gets \mc{S}_{i+1} \cup \mc{S} $ \ \ \qquad\ \,// Update level $i+1$ coreset: has size $\in\braces{1,2,3, 4}\cdot 2^{i-1} 4^{\ensuremath{\mfk{g}}\xspace+1}$ \\
$\mc{S}_i \gets \braces{}$ \qquad\qquad\qquad\ \ \ // Empty coreset at level $i$
}
}
%
%
\If{$t == 4^j$ \stext{\textup{for}} $j\in\natural$}{
\textbf{output}\ \ $\mc{S}_{ j + 1 } $ \qquad\qquad\qquad\,\,\ // Coreset of size $\sqrt{n}2^{\ensuremath{\mfk{g}}\xspace}$ with $n\defeq t 4^{\ensuremath{\mfk{g}}\xspace+1}$ and $t = 4^{j}$ for $j\in \natural$
}
}
}
%
%
\end{algorithm2e}
Our next result analyzes the space complexity of the streaming variant (\cref{algo:compress_streaming}) of \textsc{Compress}\xspace. The intuition for gains in memory requirements is very similar to that for running time, as we now maintain (and run \textsc{Halve}\xspace) on subsets of points with size much smaller than the input sequence.
We count the number of data points stored as our measure of memory.
%
%
\begin{proposition}[\textsc{Compress}\xspace Streaming Memory Bound] \label{thm:compress_streaming}
Let \textsc{Halve}\xspace store $ s_{\textsc{H}} (n) $ data points on inputs of size $n$.
Then, after completing iteration $t$, the streaming implementation of $\textsc{Compress}\xspace$ (\cref{algo:compress_streaming}) has used at most $s_{\textsc{C}} \left(t \right) = 4^{\ensuremath{\mfk{g}}\xspace+3} \sqrt{t} + s_{\textsc{H}}( 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{t} )$
data points of memory.
%
\end{proposition}
\begin{proof}
%
At time $t$, we would like to estimate the space usage of the algorithm.
At the $i$th level of memory, we can have at most $ 2^{i+2} 4^\ensuremath{\mfk{g}}\xspace $ data points.
Since we are maintaining a data set of size at most $ \sqrt{t} 4^\ensuremath{\mfk{g}}\xspace $ at time $t$, there are at most $ \frac{\log t}{2} $ levels.
Thus, the maximum number of points stored at time $t$ is bounded by
\begin{talign}
\sum_{i=0}^{ 0.5 \log t } 2^{i+2} 4^\ensuremath{\mfk{g}}\xspace \leq 4^{\ensuremath{\mfk{g}}\xspace+3} \sqrt{t}.
\end{talign}
Furthermore, at any time up to time $t$, we have run \textsc{Halve}\xspace on a point sequence of size at most $ \sqrt{t} 2^{\ensuremath{\mfk{g}}\xspace+1} $ which requires storing at most $ s_{\textsc{H}}( \sqrt{t} 2^{\ensuremath{\mfk{g}}\xspace+1} ) $ additional points.
%
\end{proof}
%
\begin{example}[\textsc{KT}\xspace-\textsc{Compress}\xspace and \textsc{KT}\xspace-\textsc{Compress++}\xspace]
\normalfont
%
First consider the streaming variant of \textsc{Compress}\xspace with $\textsc{Halve}\xspace$ = symmetrized $\textsc{KT}\xspace(\frac{\l^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \delta)$ for \textsc{Halve}\xspace inputs of size $\ell$ as in \cref{ex:ktcompress}.
Since $s_{\textsc{KT}\xspace} \left( n \right) \leq n $ \citep[Sec.~3]{dwivedi2021kernel}, \cref{thm:compress_streaming} implies that $ s_{\textsc{C}} (n) \leq 4^{\ensuremath{\mfk{g}}\xspace + 4} \sqrt{n} $.
Next consider \textsc{Compress++}\xspace with the streaming variant of \textsc{Compress}\xspace, with $\textsc{Halve}\xspace = \trm{symmetrized } \textsc{KT}\xspace(
\frac{\l^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta)$ when applied to an input of size $\l$, and $\textsc{Thin}\xspace = \textsc{KT}\xspace(\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \delta)$ as in \cref{ex:ktcompresspp}.
The space complexity $s_{\textsc{C++}\xspace}(n)= s_{\textsc{C}} (n) \!+ \! s_{\textsc{KT}\xspace} (\l_n) \!=\! 4^{\ensuremath{\mfk{g}}\xspace + 4} \sqrt{n} + \ell_n \leq 4^{\ensuremath{\mfk{g}}\xspace + 5} \sqrt{n} $.
Setting $\ensuremath{\mfk{g}}\xspace$ as in \cref{ex:ktcompresspp}, we get $s_{\textsc{C++}\xspace}(n) = \mc{O}( \sqrt{n}\log^2 n ) $. \hfill\small{\ensuremath{\blacksquare}}
%
%
\end{example}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{Proof of \cref{thm:compress_sub_gamma}: Runtime and integration error of \textsc{Compress}\xspace}
\label{sec:compress_subgamma}
First, we bound the running time of \textsc{Compress}\xspace.
By definition, $ \textsc{Compress}\xspace $ makes four recursive calls to $\textsc{Compress}\xspace$ on inputs of size $ n/4$.
Then, $\textsc{Halve}\xspace$ is run on an input of size $ 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{n} $.
Thus, $r_{\textsc{C}}$ satisfies the recursion
\begin{talign}
r_{\textsc{C}}(n) = 4r_{\textsc{C}}\left( \frac{n}{4} \right) + r_{\textsc{H}}( \sqrt{n}2^{\ensuremath{\mfk{g}}\xspace+1}).
\end{talign}
Since $r_{\textsc{C}} (4^\ensuremath{\mfk{g}}\xspace) = 0$, we may unroll the recursion to find that
\begin{talign}
r_{\textsc{C}}(n) = \sum_{i=0}^{\beta_n} 4^{i} r_{\textsc{H}}( 2^{\ensuremath{\mfk{g}}\xspace+1} \sqrt{n 4^{-i} } ),
\end{talign}
as claimed in \cref{run_time_cp}.
%
%
%
%
%
%
Next, we bound the sub-Gaussian error for a fixed function $f$.
In the measure discrepancy \cref{eq:psi_phi_reln} notation of \cref{sec:add_notation} we have
\begin{talign}
\label{eq:psi_compress}
\psi_{\textsc{C}} \left( \cset_{\mrm{in}}\right) = \sum_{i=1}^4 \psi_{\textsc{C}} \left( \mc{S}_i \right) + \sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1} \psi_{\textsc{H}} (\wtil{\mc{S}})
\end{talign}
where $ \mc{S}_i $ and $\wtil{\mc{S}}$ are defined as in \cref{algo:compress}.
Unrolling this recursion, we find that running \textsc{Compress}\xspace on an input of size $n$ with oversampling parameter\xspace~$\ensuremath{\mfk{g}}\xspace$ leads to applying \textsc{Halve}\xspace on $ 4^i $ coresets of size $n_i = 2^{\ensuremath{\mfk{g}}\xspace+1-i}\sqrt{n}$ for $0\leq i\leq\beta_n$.
Denoting these \textsc{Halve}\xspace inputs by $(\mc{S}_{i,j}^{{\mrm{in}}})_{j\in [4^i]}$, we have
\begin{talign} \label{eq:psi_unroll}
\psi_{\textsc{C}}\left( \cset_{\mrm{in}} \right)
=
\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{i=0}^{ \beta_n } \sum_{j=1}^{4^i} 2^{-i} \psi_{\textsc{H}} ( \mc{S}^{{\mrm{in}}}_{ i , j}).
\end{talign}
Now define %
$ \sigma_{\textsc{H}}(n) = n\nu_{\textsc{H}}(n) $.
Since $ \psi_{\textsc{H}} ( \mc{S}^{{\mrm{in}}}_{ i , j})(f) $ are $\sigma_{\textsc{H}}(n_i)$ sub-Gaussian given $(\mc{S}^{{\mrm{in}}}_{ i' , j'})_{i'> i, j'\geq 1}$ and $(\mc{S}^{{\mrm{in}}}_{ i , j'})_{j'\leq j}$, \cref{subgsn_sum} implies that $\psi_{\textsc{C}}\left( \cset_{\mrm{in}} \right)(f)$ is $\sigma_{\textsc{C}}$ sub-Gaussian given $\cset_{\mrm{in}}$ for
%
%
%
%
%
%
%
%
%
%
\begin{talign}
\sigma_{\textsc{C}}^2 \left( n \right) &= n 4^{-\ensuremath{\mfk{g}}\xspace-1} \sum_{i=0}^{ \beta_n } \sigma_{\textsc{H}}^2 \left( n_i \right) . \label{eq:sigma_cp}
%
\end{talign}
Recalling the relation~\cref{eq:phi_psi_equivalence} between $\sigma$ and $\nu$ from \cref{sec:add_notation}, we conclude that
\begin{talign}
\label{eq:sigma_compress}
\nu^2_{\textsc{C}} \left( n \right) &= \sum_{i=0}^{ \beta_n } 4^{-i} \nu^2_{\textsc{H}} \left(n_i \right).
%
\end{talign}
as claimed in \cref{eq:nu_cp}.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{Proof of \cref{thm:compressppmmd}: MMD guarantees for \textsc{Compress++}\xspace} \label{sec:compressppmmd}
Noting that MMD is a metric, and applying triangle inequality, we have
\begin{talign}
\label{eq:mmd_triangle}
\mmd_\mbf{k}(\cset_{\mrm{in}}, \mc{S}_{\textsc{C++}\xspace}) \leq
\mmd_\mbf{k}(\cset_{\mrm{in}}, \mc{S}_{\textsc{C}})
+ \mmd_\mbf{k}(\mc{S}_{\textsc{C}}, \mc{S}_{\textsc{C++}\xspace}).
\end{talign}
Since $\mc{S}_{\textsc{C++}\xspace}$ is the output of $\textsc{Thin}\xspace(2^{\ensuremath{\mfk{g}}\xspace})$ with $\mc{S}_{\textsc{C}}$ as the input, applying the MMD tail bound assumption~\cref{eq:mmd_thin} with $ |\mc{S}_{\textsc{C}} |= \sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace}$ substituted in place of $n$, we find that
\begin{talign}
\P \left[ \mmd ( \mc{S}_{\textsc{C}}, \mc{S}_{\textsc{C++}\xspace}) \! \geq \! {a'}_{ 2^{\ensuremath{\mfk{g}}\xspace} \sqrt{n} }\! +\! {v'}_{ 2^{\ensuremath{\mfk{g}}\xspace} \sqrt{n} } \sqrt{ t} \! \right]\leq e^{-t} \qtext{for all } t\geq 0.
\label{eq:stage_2_bound}
\end{talign}
Recall that $\l_n / 2 = 2^{\ensuremath{\mfk{g}}\xspace} \sqrt{n}$.
Next, we apply \cref{thm:compress_mmd} with $\textsc{Halve}\xspace $ to conclude that
\begin{talign}
\label{eq:stage_1_bound}
\P [
\mmd_\mbf{k}(\cset_{\mrm{in}}, \mc{S}_{\textsc{C}}) \geq \wtil{a}_n + \wtil{v}_{n}\cdot\sqrt{ t} ] &\leq e^{-t}
\qtext{for all} t \geq 0.
%
%
%
%
%
%
\end{talign}
Thus, we have
\begin{talign}
\P\left[ \mmd_\mbf{k}(\cset_{\mrm{in}}, \mc{S}_{\textsc{C++}\xspace}) \geq a'_{\l_n/2} + \wtil{a}_n + ( v'_{\l_n/2} + \wtil{v}_n ) \sqrt{t} \right] \leq 2\cdot e^{-t}
\qtext{for all} t\geq 0,
\end{talign}
which in turn implies that
\begin{talign}
\P\left[ \mmd_\mbf{k}(\cset_{\mrm{in}}, \mc{S}_{\textsc{C++}\xspace}) \geq a'_{\l_n/2} + \wtil{a}_n + ( v'_{\l_n/2} + \wtil{v}_n ) \sqrt{\log 2} + ( v'_{\l_n/2} + \wtil{v}_n ) \sqrt{t} \right] \leq e^{-t}
\qtext{for all} t\geq 0,
\end{talign}
thereby yielding the claimed result.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{Proof of \cref{thm:compresspp_subgamma}: Runtime and integration error of \textsc{Compress++}\xspace} \label{sec:compresspp_subgamma}
First, the runtime bound~\cref{eq:runtime_cpp} follows directly by adding the runtime of $\textsc{Compress}\xspace\left( \textsc{Halve}\xspace , \ensuremath{\mfk{g}}\xspace \right)$ as given by \cref{run_time_cp} in \cref{thm:compress_sub_gamma} and the runtime of $\textsc{Thin}\xspace$.
Recalling the notation~\cref{eq:psi_alg,eq:psi_phi_reln} from \cref{sec:add_notation} and noting the definition of the point sequences $\mc{S}_{\textsc{C}}$ and $\mc{S}_{\textsc{C++}\xspace}$ in \cref{alg:compresspp}, we obtain the following relationship between the different discrepancy vectors:
%
\begin{talign}
%
\phi_{\textsc{C}}(\cset_{\mrm{in}}) &
= \frac{1}{n}\sum_{ x \in \cset_{\mrm{in}} } \delta_x - \frac{1}{2^{\ensuremath{\mfk{g}}\xspace}\sqrt{n}} \sum_{ x \in \mc{S}_{\textsc{C}} } \delta_x, \\
\label{eq:psi_thin_stage_2}
\phi_{\thintag} \left( \mc{S}_{\textsc{C}} \right) &= \frac{1}{2^{\ensuremath{\mfk{g}}\xspace}\sqrt{n}}\sum_{x \in \mc{S}_{\textsc{C}} } \delta_x - \frac{1}{\sqrt{n}} \sum_{x \in \mc{S}_{ \textsc{C++}\xspace } } \delta_x, \qtext{and}
\\
\label{eq:psi_compresspp}
\phi_{\textsc{C++}\xspace}(\cset_{\mrm{in}})
&=
\frac1n\sum_{ x \in \cset_{\mrm{in}} } \delta_x - \frac{1}{\sqrt n} \sum_{ x \in \mc{S}_{ \textsc{C++}\xspace} } \delta_x
\\
&= %
\phi_{\textsc{C}}(\cset_{\mrm{in}}) + \phi_{\thintag} \left( \mc{S}_{\textsc{C}} \right).
\label{eq:psi_cpp_cp_thin}
\end{talign}
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
Noting the $\mathcal{G}^{f} $ property of \textsc{Halve}\xspace and applying \cref{thm:compress_sub_gamma}, we find that $\phi_{\textsc{C}}(\cset_{\mrm{in}})(f)$ is sub-Gaussian with parameter $\nu_{\textsc{C}}(n)$ defined in~\cref{eq:nu_cp}. Furthermore, by assumption on \textsc{Thin}\xspace, given $\mc{S}_{\textsc{C}}$, the variable $\phi_{\thintag}(\mc{S}_{\textsc{C}})(f)$ is $\nu_{\textsc{C}}(\frac{\l_n}{2})$ sub-Gaussian. The claim now follows directly from \cref{subgsn_sum}.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{Proof of \cref{lem:mmd_to_vec}: MMD as a vector norm}
\label{sub:proof_of_lem_mmd_to_vec}
Let $v_{k,j}
\!\defeq\! \sum_{i=1}^n e_i \parenth{\mbf 1(x_i \in \mc{S}^{{\mrm{in}}}_{ k , j}) \!- \! 2\!\cdot\! \mbf 1(x_i \in \mc{S}^{\mrm{out}}_{k, j})}$.
By the reproducing property of $\mbf{k}$ we
%
%
%
%
%
%
%
%
%
%
%
have
\begin{talign}
\knorm{\spsi_{k,j} \left( \mbf{k} \right) }^2
&=
\norm{\sum_{x\in \mc{S}^{{\mrm{in}}}_{ k , j}}\mbf{k}(x, \cdot) \!- \! 2\sum_{x \in \mc{S}^{\mrm{out}}_{k, j}}\mbf{k}(x, \cdot)}^2_{\mbf{k}} \\
&= \sum_{x\in \mc{S}^{{\mrm{in}}}_{ k , j}, y\in \mc{S}^{{\mrm{in}}}_{ k , j}} \mbf{k}(x, y) - 2 \sum_{x\in \mc{S}^{{\mrm{out}}}_{ k , j}, y\in \mc{S}^{{\mrm{in}}}_{ k , j}} \mbf{k}(x, y)
+ \sum_{x\in \mc{S}^{{\mrm{out}}}_{ k , j}, y\in \mc{S}^{{\mrm{out}}}_{ k , j}} \mbf{k}(x, y)\\
&=
v_{k,j}^\top \mbf{K} v_{k,j}
\seq{\cref{eq:def_u_and_v}}
\twonorm{u_{k,j}}^2.
\label{eq:knorm_matrix}
\end{talign}
Using \cref{eq:psi_unroll,eq:def_u_and_v,eq:def_M}, and mimicking the derivation above~\cref{eq:knorm_matrix}, we can also conclude that
\begin{talign}
\knorm{\psi_{\textsc{C}} \left( \mbf{k} \right) }^2
&= \twonorm{u_{\textsc{C}}}^2.
\end{talign}
Additionally, we note that
\begin{talign}
\mmd_\mbf{k} \left( \cset_{\mrm{in}} , \mc{S}_{ \textsc{C}} \right) = \sup_{ \knorm{f} =1 } \frac{1}{n} \doth{f, \psi_{\textsc{C}} \left( \mbf{k} \right) } = \frac{1}{n} \knorm{ \psi_{\textsc{C}} \left( \mbf{k} \right) } .
\end{talign}
Finally the conditional independence and zero mean property~\cref{eq:zero_mean_vector} follows from \cref{eq:psi_unroll} by noting that conditioned on $(\mc{S}^{{\mrm{in}}}_{ k' , j'})_{k'> k, j'\geq 1}$, the sets $(\mc{S}^{{\mrm{in}}}_{ k , j})_{ j\geq 1}$ are independent.
%
%
%
\subsection{Proof of \cref{lem:dilation_results}: Properties of the dilation operator}
\label{sub:proof_dilation_results}
For claim~(a) in the display~\cref{eq:power_of_M}, we have
\begin{talign}
\label{eq:op_M}
\mbf{M}_{u}^2 = \begin{pmatrix}
\twonorm{u}^2 & \mbf{0}_{n}^\top \\
\mbf{0}_{n} & uu^\top
\end{pmatrix}
\stackrel{(i)}{\preceq} \twonorm{u}^2 \mbf{I}_{n+1}
\quad\Longrightarrow\quad \opnorm{\mbf{M}_{u}} \seq{(ii)} \twonorm{u},
\end{talign}
where step~(i) follows from the standard fact that $uu^\top \preceq \twonorm{u}^2\mbf{I}_n$ and step~(ii) from the facts $\mbf{M}_u^2 \wtil{e}_1 = \twonorm{u}^2 \wtil{e}_1$ for $\wtil{e}_1$ the first canonical basis vector of $\ensuremath{\mathbb{R}}^{n+1}$ and $\opnorm{\mbf{M}_{u}}^2 = \opnorm{\mbf{M}_u^2}$. Claim~(b) follows directly by verifying that the vector $v = [1, \frac{u^\top}{\twonorm{u}}]^\top$ is an eigenvector of $\mbf{M}_{u}$ with eigenvalue $\twonorm{u}$. Finally,
%
%
%
%
%
%
claim~(c) follows directly from the claim~(a) and the fact that $\opnorm{\mbf{M}_{u}^q} = \opnorm{\mbf{M}_u}^q$ for all integers $q \geq 1$.
\subsection{Proof of \cref{lem:matrix_exp_bernstein}: Sub-Gaussian matrix Freedman inequality} \label{sec:additional_lemmas}
We first note the following two lemmas about the tail bounds and symmetrized moment generating functions (MGFs) for matrix valued random variables (see \cref{sub:proof_of_lem_matrix_master_tail,sub:proof_of_lem_matrx_exp_mgf} respectively for the proofs of \cref{lem:matrix_master_tail,lem:matrx_exp_mgf}).
\newcommand{Sub-Gaussian matrix tail bounds}{Sub-Gaussian matrix tail bounds}
\newcommand{Symmetrized sub-Gaussian matrix MGF}{Symmetrized sub-Gaussian matrix MGF}
\begin{lemma}[Sub-Gaussian matrix tail bounds] \label{lem:matrix_master_tail}
Let $\parenth{\mbf{X}_{k} \in \ensuremath{\mathbb{R}}^{m\times m}}_{k\geq1}$ be a sequence of self-adjoint matrices adapted to a filtration $\mathcal{F}_k$, and let $\parenth{\mbf{A}_{k} \in \ensuremath{\mathbb{R}}^{m\times m}}_{k\geq1}$ be a sequence of deterministic self-adjoint matrices.
Define the variance parameter $\sigma^2 \defeq \opnorm{\sum_{k} \mbf A_k}$.
If, for a Rademacher random variable $\varepsilon$ independent of $\parenth{\mbf X_k, \mathcal{F}_k}_{k\geq1}$, we have
\begin{talign}
\label{eq:matrix_tb_assum}
\log \mathbb{E} \left[ \exp( 2\varepsilon\theta \mbf{X}_k ) | \mathcal{F}_{k-1} \right] \preceq 2\theta^2 \mbf{A}_{k}
\qtext{for all} \theta \in \ensuremath{\mathbb{R}},
\end{talign}
%
then we also have
\begin{talign}
\P \left[\lambda_{\max} \left( \sum_{k} \mbf{X}_k \right) \geq t) \right]
\leq
m e^{-{t^2}/{(8\sigma^2)}}
\qtext{for all} t\geq 0.
\end{talign}
\end{lemma}
\begin{lemma}[Symmetrized sub-Gaussian matrix MGF] \label{lem:matrx_exp_mgf}
For a fixed scalar $R$, let $\mbf{X}$ be a self-adjoint matrix satisfying
\begin{talign}
\mathbb{E} \mbf{X} = 0 \qtext{and}
\mathbb{E} \mbf{X}^{q} \preceq (\frac{q}{2})! R^{q} \mbf{I} & \qtext{ for } q \in 2\mathbb N.
\label{eq:subgauss_condition}
\end{talign}
If $\varepsilon$ is a Rademacher random variable independent of $\mbf X$, then
\begin{talign}
\label{eq:subgauss_mgf}
\mathbb{E} \exp \left( 2\varepsilon\theta \mbf{X} \right) \preceq \exp \left(2 \theta^2 R^2\mbf{I} \right) & \qtext{ for all } \theta\in \ensuremath{\mathbb{R}}.
\end{talign}
\end{lemma}
The assumed conditions~\cref{eq:matrix_bernstein_conditions} allow us to apply \cref{lem:matrx_exp_mgf} conditional on $ \left(\mbf{Y}_{i} \right)_{i<k} $ along with the operator monotonicity of $\log$ to find that
\begin{talign}
\log \mathbb{E} \left[ \exp \left(\varepsilon \theta \mbf{Y}_{k} \right) \vert \left\{ \mbf{Y}_{i} \right\}_{i<k} \right] \preceq 2\theta^2R_k^2 \mbf I
\qtext{for all}
\theta \in \ensuremath{\mathbb{R}},
\end{talign}
for a Rademacher random variable $\varepsilon$ independent of $\left(\mbf{Y}_{k} \right)_{k\geq 1}$.
Moreover, $ \opnorm{\sum_{k} \mbf A_k}=\opnorm{\sum_{k} R_k^2 \mbf I} = \sum_{k} R_k^2 = \sigma^2$.
Thus, applying \cref{lem:matrix_master_tail}, we find that
\begin{align}
\P [ \lambda_{\max} ( \textsum_i \mbf{Y}_{i} ) \geq t]
\leq
m e^{-{t^2}/{(8\sigma^2)}}
\stext{for all} t \geq 0.
\end{align}
As an immediate consequence, we also find that
\begin{align}
\P [ \lambda_{\max} ( \textsum_i \mbf{Y}_{i} ) \geq \sqrt{8\sigma^2(t+\log m)}]
\leq e^{-t} \qtext{for all} t\geq 0,
\end{align}
as claimed.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\subsection{Proof of \cref{lem:tailboundstomoments}: Tail bounds imply moment bounds}
\label{sub:proof_of_tail_moments}
%
%
%
%
%
%
%
%
%
%
We begin by bounding the moments of the shifted random variable $X = Z- a$. Note that $Z \geq 0$, so that $X \geq -a$. Next, note that $ X = X_{+} - X_{-} $ where $ X_{\pm} = \max \left( \pm X , 0 \right) $ and that $ |X|^{ q} = X_{+}^{q} + X_{-}^{q} $.
Furthermore, $X_{-}^{q} \leq a^{q}$ by the nonnegativity of $Z$, so that
$
|X|^{q} \leq a^{q} + X_{+}^{q}.
$
For any $u>0$, since $\P \left[ X_{+} > u \right] = \P \left[ X > u \right] = \P[ Z > a + u]$ for any $u > 0 $, we apply the tail bounds on $Z$ to control the moments of $X_{+}$.
In particular, we have
\begin{talign}
\mathbb{E} \left[ X_{+}^{q} \right] ]
& \seq{(i)} q \int_{0}^{\infty} u^{q-1} \P \left[ X_+> u \right] du \\
&\seq{(ii)} q \int_0^{\infty} (v\sqrt{t})^{q-1} \P [ X_{+} > v\sqrt{t} ] \cdot \frac{v}{2\sqrt{t}} dt \\
& \stackrel{(iii)}{\leq} q v^q \int_{0}^{\infty} t^{q/2-1} e^{-t} dt
\seq{(iv)} qv^q \Gamma(\frac{q}{2}),
%
\end{talign}
where we have applied $(i)$
integration by parts,
$(ii)$ the substitution $ u= v \sqrt{t}$, and
$ (iii) $ the assumed tail bound for $Z$.
%
Since $Z = X+ a$, the convexity of the function $t\mapsto t^q$ for $q\geq 1$, and Jensen's inequality imply that for each $q\in 2\mbb N$, we have
\begin{talign}
\mathbb{E} Z^q \leq 2^{q-1}( a^q + \mathbb{E} |X|^q) \leq 2^{q-1}(2 a^q + \mathbb{E} X_{+}^q)
&\leq (2a)^q + 2^{q-1} qv^q \Gamma(\frac{q}{2}) \\
&= (2a)^q + 2^{q-1} qv^q (\frac{q}{2}-1)!\\
&\leq (2a+2v)^q (\frac{q}{2})!
\end{talign}
where the last step follows since $x^q+y^q\leq (x+y)^q$ for all $q\in\naturals$ and $x,y\geq 0$. The proof is now complete.
\subsection{Proof of \cref{lem:matrix_master_tail}: Sub-Gaussian matrix tail bounds}
\label{sub:proof_of_lem_matrix_master_tail}
The proof of this result is identical to that of \citet[Proof of Thm.~7.1]{user_friendly_matrix} as the same steps are justified under our weaker assumption~\cref{eq:matrix_tb_assum}.
Specifically, applying the arguments from \citet[Proof of Thm.~7.1]{user_friendly_matrix}, we find that
\begin{talign}
\mathbb{E}\brackets{\tr \exp(\sum_{k=1}^n\theta \mbf X_k)}
%
%
&\leq
\mathbb{E}\brackets{\tr\exp\parenth{\sum_{k=1}^{n-1}\theta \mbf X_k + \log\mathbb{E}\brackets{\exp(2\varepsilon \theta \mbf X_n)\vert \mc F_{n-1}}}} \\
&\stackrel{\cref{eq:matrix_tb_assum}}{\leq}
\mathbb{E}\brackets{\tr\exp\parenth{\sum_{k=1}^{n-1}\theta \mbf X_k + 2\theta^2 \mathbf A_n}} \\
&\stackrel{(i)}{\leq}
\tr\exp\parenth{2\theta^2\sum_{k=1}^{n} \mathbf A_k}
\stackrel{(ii)}{\leq}
m\exp\parenth{2\theta^2\sigma^2},
\label{eq:mgf_bound}
\end{talign}
where step~(i) follows by iterating the arguments over $k=n-1, \ldots, 1$ and step~(ii) from the standard fact that $\tr(\exp(\mbf A)) \leq m\opnorm{\exp(\mbf A)} = m \exp\sparenth{\opnorm{\mbf A}}$ for an $m\times m$ self-adjoint matrix~$\mbf A$. Next, applying the matrix Laplace transform method \citet[Prop.~3.1]{user_friendly_matrix}, for all $t> 0$, we have
\begin{talign}
\P \left[\lambda_{\max} \left( \sum_{k} \mbf{X}_k \right) \geq t) \right]
&\leq \inf_{\theta>0} \braces{e^{-\theta t} \cdot \mathbb{E}\brackets{\tr \exp(\sum_{k=1}^n\theta \mbf X_k)} }\\
&\sless{\cref{eq:mgf_bound}} m\inf_{\theta>0} \braces{e^{-\theta t} \cdot e^{2\theta^2\sigma^2} }
= me^{-{t^2}/{(8\sigma^2)}},
\end{talign}
where the last step follows from the choice $\theta = \frac{t}{4\sigma^2}$.
The proof is now complete.
\subsection{Proof of \cref{lem:matrx_exp_mgf}: Symmetrized sub-Gaussian matrix MGF}
\label{sub:proof_of_lem_matrx_exp_mgf}
We have
\begin{talign}
\mathbb{E}[\exp(2\varepsilon \theta\mbf X)] = \mathbf I + \sum_{q=1}^{\infty} \frac{2^q \theta^q}{q!} \mathbb{E}[\varepsilon^q \mbf X^q]
&\seq{(i)} \mathbf I + \sum_{k=1}^{\infty} \frac{2^{2k}\theta^{2k}}{(2k)!} \mathbb{E}[\mbf X^{2k}] \\
&\stackrel{(ii)}{\preceq} \mathbf I + \sum_{k=1}^{\infty} \frac{2^{2k} \theta^{2k} \, k!R^{2k}}{(2k)!} \mathbf I \\
&\stackrel{(iii)}{\preceq} \mathbf I + \sum_{k=1}^{\infty} \frac{(2\theta^2R^2)^{k}}{k!} \mathbf I
= \exp(2\theta^2R^2 \mbf I),
\end{talign}
where step~(i) uses the facts that (a) $\mathbb{E}[\varepsilon^q]=\mbf 1(q \in 2\mathbb N)$ and (b) $\mathbb{E}[\varepsilon^q \mbf X^q]= \mathbb{E}[\varepsilon^q]\mathbb{E}[ \mbf X^q]$ since $\varepsilon$ is independent of $\mbf X$, step~(ii) follows from the assumed condition~\cref{eq:subgauss_condition}, and step~(iii) from the fact that $\frac{2^k k!}{(2k)!}\leq \frac{1}{k!}$ \citep[Proof of Thm.~2.1]{boucheron2013concentration}.
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\section{{Proofs of \cref{ex:ktsplitcompress,,ex:ktcompress,,ex:ktsplitcompresspp,,ex:ktcompresspp}}}
\label{sec:kt_example}
We begin by defining the notions of sub-Gaussianity and $\mbf{k}$-sub-Gaussianity on an event.
\begin{definition}[Sub-Gaussian on an event]
We say that a random variable $G$ is \emph{sub-Gaussian on an event $\event$} with parameter $\sigma$ if
\begin{talign}
\mathbb{E} [ \mbf 1[ \event ] \cdot \exp ( \lambda \cdot G ) ] \leq \exp ( \frac{ \lambda^2 \sigma^2 }{2} )
\qtext{for all} \lambda \in \ensuremath{\mathbb{R}}.
\end{talign}
\end{definition}
\begin{definition}[\tbf{$\mbf{k}$-sub-Gaussian on an event}]
\label{def:mmd_subgamma_algo_event}
For a kernel $\mbf{k}$, we call a thinning algorithm $\textsc{Alg}\xspace$ \emph{$\mbf{k}$-sub-Gaussian on an event $\event$}
with parameter $v$ and shift $a$ if %
\begin{talign}
\label{eq:mmd_thin}
\P[ \event, \mmd_{\mbf{k}} ( \cset_{\mrm{in}} , \mc{S}_{\textsc{Alg}\xspace} ) \geq a_{n} + v_{n} \sqrt{t} \,\mid\, \cset_{\mrm{in}}] \leq e^{-t}
\qtext{for all} t \geq 0.
\end{talign}
%
%
%
%
%
%
%
%
%
\end{definition}
We will also make regular use of the unrolled representation \cref{eq:psi_compress} for the \textsc{Compress}\xspace measure discrepancy $\psi_{\textsc{C}}(\cset_{\mrm{in}})$ in terms of the \textsc{Halve}\xspace inputs $(\mc{S}^{{\mrm{in}}}_{ k, j})_{j\in[4^k]}$ of size
\begin{talign}
\label{eq:ni}
n_k = 2^{\ensuremath{\mfk{g}}\xspace+1-k}\sqrt{n}
\qtext{for}
0\leq k\leq \beta_n.
\end{talign}
For brevity, we will use the shorthand $\psi_{\textsc{C}} \defeq \psi_{\textsc{C}}( \cset_{\mrm{in}})$, $\spsi_{k,j} \defeq \psi_{\textsc{H}} ( \mc{S}^{{\mrm{in}}}_{ k, j})$, and $\psi_{\thintag} \defeq \psi_{\thintag}(\mc{S}_{\textsc{C}})$ hereafter.
\subsection{Proof of \cref{ex:ktsplitcompress}: \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace-\textsc{Compress}\xspace}
\label{sec:proof_of_ktsplitcompress}
For $\textsc{Halve}\xspace = \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\frac{\l^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \delta)$ when applied to an input of size $\l$, the proof of Thm.~1 in \citet{dwivedi2022generalized} identifies a sequence of events $\event[k,j]$ and random signed measures $\tilde{\psi}_{k,j}$ such that, for each $0\leq k\leq \beta_n$, $j\in[4^k]$, and $f$ with $\knorm{f}=1$, %
\begin{enumerate}[label=(\alph*)]
\item $\P[\event[k,j]^c] \sless{(i)} \frac{n_k^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \frac{\delta}{2} \seq{(ii)} \frac{1}{2}\frac{\delta}{4^{k}(\beta_n+1) } $,
\item $ \mbf 1 [ \event[k,j] ] \spsi_{k,j} = \mbf 1 [ \event[k,j] ] \tilde{\psi}_{k,j} $, and
\item $\tilde{\psi}_{k,j}(f)$
%
is $n_k\, \nu_{\textsc{H}}(n_k)$
%
sub-Gaussian \cref{eq:ktsplit-halve-subgsn} given $(\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}$ and $(\tilde{\psi}_{ k , j'})_{j'< j}$,
\end{enumerate}
where step~(ii) follows from substituting the definition $n_k=2^{\ensuremath{\mfk{g}}\xspace+1-k}\sqrt{n}$~\cref{eq:ni}.
To establish step~(ii) in property (a), we use the definition\cref{footnote:ktdelta} of $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\frac{n_k^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)}\delta)$ for an input of size $n_k$, which implies that $\delta_i = \frac{n_k}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)}\delta$ in the notation of \citet{dwivedi2022generalized}. The proof of Thm.~1 in \citet{dwivedi2022generalized} then implies that
\begin{talign}
\P[\event[k,j]^c] \leq \sum_{i=1}^{n_k/2}\delta_i = \frac{n_k}{2} \frac{n_k}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)}\delta = \frac{n_k^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \frac{\delta}{2}.
\end{talign}
Hence, on the event $ \event = \bigcap_{k,j} \event[k,j] $, these properties hold simultaneously for all \textsc{Halve}\xspace calls made by \textsc{Compress}\xspace, and, by the union bound,
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
%
\begin{talign}
\label{eq:compress_subgsn_prob}
\P [ \event^c ]
&\leq
\sum_{k=0}^{\beta_n} \sum_{j=1}^{4^k}\P [ \event[k,j]^c ]
\leq
\sum_{k=0}^{\beta_n} 4^k \frac{1}{2}\frac{\delta}{4^{k}(\beta_n+1)}
=
\frac{\delta}{2}.
\end{talign}
%
Now fix any $f$ with $\knorm{f}=1$.
We invoke the measure discrepancy representation \cref{eq:psi_compress}, the equivalence of $\spsi_{k,j}$ and $\tilde{\psi}_{ k, j}$ on $\event$, the nonnegativity of the exponential, and \cref{subgsn_sum} in turn to find
\begin{talign}
\mathbb{E} [ \mbf 1[\event] \cdot \exp( \lambda \cdot \phi_{\textsc{C}} (f))]
&=
\mathbb{E} [ \mbf 1[\event] \cdot \exp( \lambda \cdot \frac{1}{n}\psi_{\textsc{C}} (f))] \\
&=
\mathbb{E} [ \mbf 1[\event] \cdot \exp( \lambda \cdot
\frac{1}{n}\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{k=0}^{ \beta_n } \sum_{j=1}^{4^k} 2^{-k} \spsi_{k,j}(f) ) ] \\
&=
\mathbb{E} [ \mbf 1[\event] \cdot \exp( \lambda \cdot
\frac{1}{n}\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{k=0}^{ \beta_n } \sum_{j=1}^{4^k} 2^{-k} \tilde{\psi}_{ k, j}(f) ) ] \\
&\leq
\mathbb{E} [ \exp( \lambda \cdot
\frac{1}{n}\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{k=0}^{ \beta_n } \sum_{j=1}^{4^k} 2^{-k} \tilde{\psi}_{ k, j}(f) ) ] \\
&\leq
\exp(\frac{\lambda^2 \nu^2_{\textsc{C}}(n)}{2})
\qtext{for}
\nu^2_{\textsc{C}}(n)
=
\sum_{k=0}^{ \beta_n } 4^{-k} \nu^2_{\textsc{H}} (n_k )
\end{talign}
so that $\phi_{\textsc{C}} (f)$ is $\nu_{\textsc{C}}$ sub-Gaussian on $\event$.
\subsection{Proof of \cref{ex:ktcompress}: \textsc{KT}\xspace-\textsc{Compress}\xspace}
\label{sec:proof_of_ktcompress}
For $\textsc{Halve}\xspace =$ symmetrized $\textsc{KT}\xspace(\frac{\l^2}{n 4^{\ensuremath{\mfk{g}}\xspace+1} (\beta_n+1)} \delta)$ when applied to an input of size $\l$, the proofs of Thms.~1--4 in \citet{dwivedi2022generalized} identify a sequence of events $\event[k,j]$ and random signed measures $\tilde{\psi}_{k,j}$ such that, for each $0\leq k\leq \beta_n$ and $j\in[4^k]$,
\begin{enumerate}[label=(\alph*)]
\item $\P[\event[k,j]^c] \leq \frac{1}{2} \frac{\delta}{4^{k}(\beta_n+1) } $,
\item $ \mbf 1 [ \event[k,j] ] \spsi_{k,j} = \mbf 1 [ \event[k,j] ] \tilde{\psi}_{k,j}$,
\item $\P[ \frac{1}{n_k}\knorm{\tilde{\psi}_{k,j}(\mbf{k})} \geq a_{n_k} + v_{n_k} \sqrt{t} \,\mid\, (\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}, (\tilde{\psi}_{ k , j'})_{j'< j}] \leq e^{-t}$ for all $t\geq 0$, and
\item $\mathbb{E}[\tilde{\psi}_{k,j}(\mbf{k}) \mid (\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}, (\tilde{\psi}_{ k , j'})_{j'< j}] = 0$,
\end{enumerate}
where $n_k=2^{\ensuremath{\mfk{g}}\xspace+1-k}\sqrt{n}$ was defined in \cref{eq:ni}.
We derive property (a) exactly as in \cref{sec:proof_of_ktsplitcompress}.
Hence, on the event $ \event = \bigcap_{k,j} \event[k,j] $, these properties hold simultaneously for all \textsc{Halve}\xspace calls made by \textsc{Compress}\xspace, and, by the union bound \cref{eq:compress_subgsn_prob}, $\P [ \event^c ] \leq \frac{\delta}{2}$.
Furthermore, we may invoke the measure discrepancy representation \cref{eq:psi_compress}, the equivalence of $\spsi_{k,j}$ and $\tilde{\psi}_{ k, j}$ on $\event$, the nonnegativity of the exponential, and the proof of \cref{thm:compress_mmd} in turn to find
\begin{talign}
&\P[\event, \mmd(\cset_{\mrm{in}}, \mc{S}_{\textsc{C}})
\geq
\tilde{a}_{n} + \tilde{v}_{n} \sqrt{t}
\mid
\cset_{\mrm{in}}]
=
\P[\event, \frac{1}{n}\knorm{\psi_{\textsc{C}}(\mbf{k})}
\geq
\tilde{a}_{n} + \tilde{v}_{n} \sqrt{t}
\mid
\cset_{\mrm{in}}] \\
&=
\P[\event, \frac{1}{n}\knorm{\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{k=0}^{ \beta_n } \sum_{j=1}^{4^k} 2^{-k} \tilde{\psi}_{ k, j}(\mbf{k})}
\geq
\tilde{a}_{n} + \tilde{v}_{n} \sqrt{t}
\mid
\cset_{\mrm{in}}] \\
&\leq
\P[\frac{1}{n}\knorm{\sqrt{n} 2^{-\ensuremath{\mfk{g}}\xspace-1 } \sum_{k=0}^{ \beta_n } \sum_{j=1}^{4^k} 2^{-k} \tilde{\psi}_{ k, j}(\mbf{k})}
\geq
\tilde{a}_{n} + \tilde{v}_{n} \sqrt{t}
\mid
\cset_{\mrm{in}}]
\leq
e^{-t}
\qtext{for all}
t \geq 0,
\end{talign}
so that \textsc{Compress}\xspace is $\mbf{k}$-sub-Gaussian on $\event$ with parameters $(\tilde v, \tilde a)$.
\subsection{Proof of \cref{ex:ktsplitcompresspp}: \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace-\textsc{Compress++}\xspace}\label{sec:proof_of_ktsplitcompresspp}
For $\textsc{Thin}\xspace = \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(
\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \delta
%
)$
and
$\textsc{Halve}\xspace = \hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(
\frac{\l^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta
%
)$ when applied to an input of size $\l$, the proof of Thm.~1 in \citet{dwivedi2022generalized} identifies a sequence of events $\event[k,j]$ and $\event[\thintag]$ and random signed measures $\tilde{\psi}_{k,j}$ and $\tilde{\psi}_{\thintag}$ such that, for each $0\leq k\leq \beta_n$, $j\in[4^k]$, and $f$ with $\knorm{f}=1$, %
\begin{enumerate}[label=(\alph*)]
\item
$\P[\event[k,j]^c] \sless{(i)}
\frac{n_k^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\frac{\delta}{2}
\seq{(ii)} \frac{2^{\ensuremath{\mfk{g}}\xspace}}{4^{k}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\frac{\delta}{2}
%
\qtext{and}
\P[\event[\thintag]^c] \sless{(iii)} \frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \frac{\delta}{2}
%
$,
\item
$\mbf 1 [ \event[k,j] ] \spsi_{k,j} = \mbf 1 [ \event[k,j] ] \tilde{\psi}_{k,j}
\qtext{and}
\mbf 1[\event[\thintag] ] \psi_{\thintag} = \mbf 1 [ \event[\thintag] ] \tilde{\psi}_{\thintag}$, and
\item $\tilde{\psi}_{k,j}(f)$
%
is $n_k\, \nu_{\textsc{H}}(n_k)$
%
sub-Gaussian \cref{eq:ktsplit_zetas} given $(\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}$ and $(\tilde{\psi}_{ k , j'})_{j'< j}$ and
$\tilde{\psi}_{\thintag}$ is $\frac{\l_n}{2}\, \nu_{\thintag}(\frac{\l_n}{2})$
sub-Gaussian \cref{eq:ktsplit_zetas} given $\mc{S}_{\textsc{C}}$.
\end{enumerate}
Here, step~(i) and (ii) follow exactly as in steps~(i) and (ii) of property~(a) in \cref{sec:proof_of_ktsplitcompress}. For step~(iii), we use the definition\cref{footnote:ktdelta} of $\hyperref[algo:ktsplit]{\color{black}{\textsc{kt-split}}}\xspace(\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)} \delta)$ for an input of size $2^{\ensuremath{\mfk{g}}\xspace} \sqrt{n}$, which implies that $\delta_i =
\frac{\ensuremath{\mfk{g}}\xspace}{\sqrt{n} 2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta$ in the notation of \citet{dwivedi2022generalized}. The proof of Thm.~1 in \citet{dwivedi2022generalized} then implies that
\begin{talign}
\P[\event[\thintag]^c] \leq \sum_{j=1}^{\ensuremath{\mfk{g}}\xspace}\frac{2^{j-1}}{\ensuremath{\mfk{g}}\xspace} \sum_{i=1}^{2^{\ensuremath{\mfk{g}}\xspace-j} \sqrt{n}} \delta_i = \sum_{j=1}^{\ensuremath{\mfk{g}}\xspace}\frac{2^{j-1}}{\ensuremath{\mfk{g}}\xspace} 2^{\ensuremath{\mfk{g}}\xspace-j} \sqrt{n} \frac{1}{\sqrt{n}2^{\ensuremath{\mfk{g}}\xspace}} \cdot \frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)}\delta = \frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)}\frac{\delta}{2},
\end{talign}
as claimed.
Hence, on the event $ \event = \bigcap_{k,j} \event[k,j] \cap \event[\thintag]$, these properties hold simultaneously for all \textsc{Halve}\xspace calls made by \textsc{Compress}\xspace, and, repeating an argument similar to the union bound \cref{eq:compress_subgsn_prob},
\begin{talign}
\P [ \event^c ]
\leq
\P [\event[\thintag]^c]
+
\sum_{k=0}^{\beta_n} \sum_{j=1}^{4^k}\P [ \event[k,j]^c ]
& \leq\frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)}\frac{\delta}{2}
+ \sum_{k=0}^{\beta_n} 4^{k}\frac{2^{\ensuremath{\mfk{g}}\xspace}}{4^{k}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\frac{\delta}{2}
%
%
%
%
%
=
\frac{\delta}{2}.
\label{eq:compresspp_subgsn_prob}
\end{talign}
Moreover, since $\phi_{\textsc{C++}\xspace} = \frac{1}{n}(\psi_{\textsc{C}} + \psi_{\thintag})$, \cref{subgsn_sum} and the argument of \cref{sec:proof_of_ktsplitcompress} together imply that $\phi_{\textsc{C}} (f)$ is $\nu_{\textsc{C++}\xspace}$ sub-Gaussian on $\event$ for each $f$ with $\knorm{f}=1$.
\subsection{Proof of \cref{ex:ktcompresspp}: \textsc{KT}\xspace-\textsc{Compress++}\xspace}\label{sec:proof_of_ktcompresspp}
In the notation of \cref{ex:kt_subgamma}, define
\begin{talign}
\frac{\l_n}{2}a_{\l_n}
&=
\sqrt{n}a'_{\l_n/2}
=
C_{a}\sqrt{\staticinfnorm{\mbf{k}}},
\qtext{and} \\
\frac{\l_n}{2}v_{\l_n}
& =
\sqrt{n}v'_{\l_n/2}
=
C_{v}\sqrt{\staticinfnorm{\mbf{k}}\log(\frac{6(n-\sqrt{n}(2^{\ensuremath{\mfk{g}}\xspace}-\ensuremath{\mfk{g}}\xspace))}{\delta})}\ \mathfrak{M}_{\cset_{\mrm{in}},\mbf{k}}.
\end{talign}
Since $\textsc{Halve}\xspace = \trm{symmetrized } \textsc{KT}\xspace(\frac{\l^2}{4n2^{\ensuremath{\mfk{g}}\xspace}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\delta)$ for inputs of size $\l$ and $\textsc{Thin}\xspace = \textsc{KT}\xspace( \frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)}\delta)$, the proofs of Thms.~1--4 in \citet{dwivedi2022generalized} identify a sequence of events $\event[k,j]$ and $\event[\thintag]$ and random signed measures $\tilde{\psi}_{k,j}$ and $\tilde{\psi}_{\thintag}$ such that, for each $0\leq k\leq \beta_n$ and $j\in[4^k]$,
\begin{enumerate}[label=(\alph*)]
\item
$\P[\event[k,j]^c] \leq \frac{2^{\ensuremath{\mfk{g}}\xspace}}{4^{k}(\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1))}\frac{\delta}{2}
\qtext{and}
\P[\event[\thintag]^c] \leq \frac{\ensuremath{\mfk{g}}\xspace}{\ensuremath{\mfk{g}}\xspace+2^{\ensuremath{\mfk{g}}\xspace}(\beta_n+1)}\frac{\delta}{2}$,
\item
$\mbf 1 [ \event[k,j] ] \spsi_{k,j} = \mbf 1 [ \event[k,j] ] \tilde{\psi}_{k,j}
\qtext{and}
\mbf 1[\event[\thintag] ] \psi_{\thintag} = \mbf 1 [ \event[\thintag] ] \tilde{\psi}_{\thintag}$,
\item $\P[ \frac{1}{n_k}\knorm{\tilde{\psi}_{k,j}(\mbf{k})} \geq a_{n_k} + v_{n_k} \sqrt{t} \,\mid\, (\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}, (\tilde{\psi}_{ k , j'})_{j'< j}] \leq e^{-t}$
{and}
$\P[ \frac{2}{\l_n}\knorm{\tilde{\psi}_{\thintag}(\mbf{k})} \geq a_{\l_n/2}' + v_{\l_n/2}' \sqrt{t} \,\mid\, \mc{S}_{\textsc{C}}] \leq e^{-t}$
for all $t\geq 0$, and
\item $\mathbb{E}[\tilde{\psi}_{k,j}(\mbf{k}) \mid (\tilde{\psi}_{ k' , j'})_{k'> k, j'\geq 1}, (\tilde{\psi}_{ k , j'})_{j'< j}] = 0$.
\end{enumerate}
We derive property (a) exactly as in \cref{sec:proof_of_ktsplitcompresspp}.
Hence, on the event $ \event = \bigcap_{k,j} \event[k,j] \cap \event[\thintag]$, these properties hold simultaneously for all \textsc{Halve}\xspace calls made by \textsc{Compress}\xspace and
\begin{talign}
\wtil{\zeta}_{\textsc{H}}(\l_n) &= \wtil{\zeta}_{\thintag}(\frac{\l_n}{2}) = C_{v}\sqrt{\staticinfnorm{\mbf{k}}\log(\frac{6(n-\sqrt{n}(2^{\ensuremath{\mfk{g}}\xspace}-\ensuremath{\mfk{g}}\xspace))}{\delta})}\ \mathfrak{M}_{\cset_{\mrm{in}},\mbf{k}}.
\end{talign}
Moreover, by the union bound \cref{eq:compresspp_subgsn_prob},
$
\P [ \event^c ]
\leq
\frac{\delta}{2}.
$
Finally, since $\phi_{\textsc{C++}\xspace} = \frac{1}{n}(\psi_{\textsc{C}} + \psi_{\thintag})$ and the argument of \cref{sec:proof_of_ktcompress} implies that \textsc{Compress}\xspace is $\mbf{k}$-sub-Gaussian on $\event$ with parameters $(\tilde v, \tilde a)$,
the triangle inequality implies that \textsc{Compress++}\xspace is $\mbf{k}$-sub-Gaussian on $\event$ with parameters $(\hat v, \hat a)$ as in \cref{sec:compressppmmd}.
|
1,314,259,995,056 | arxiv | \section{Introduction}
\subsection{Monotone systems}
We will be interested in Markov processes, both in discrete and continuous time, that take values in the space $\{0,1\}^{{\mathbb Z}^d}$ of configurations $x=(x(i))_{i\in{\mathbb Z}^d}$ of zeros and ones on the $d$-dimensional integer lattice ${\mathbb Z}^d$. By definition, a map $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ is \emph{local} if $\phi$ depends only on finitely many coordinates, i.e., there exists a finite set $\Delta\subset{\mathbb Z}^d$ and a function $\phi':\{0,1\}^\Delta\to\{0,1\}$ such that $\phi\big((x(i))_{i\in{\mathbb Z}^d}\big)=\phi'\big((x(i))_{i\in\Delta}\big)$ for each $x\in\{0,1\}^{{\mathbb Z}^d}$.
We say that $\phi$ is \emph{monotone} if $x\leq y$ (coordinatewise) implies $\phi(x)\leq\phi(y)$. We say that $\phi$ is \emph{monotonic} if it is both local and monotone.
The discrete time Markov chains $(X_n)_{n\geq 0}$ taking values in $\{0,1\}^{{\mathbb Z}^d}$ that we will be interested in are uniquely characterised by a finite collection $\phi_1,\ldots,\phi_m$ of monotonic maps and a probability distribution $p_1,\ldots,p_m$ on $\{1,\ldots,m\}$. They evolve in such a way that independently for each $n\geq 0$ and $i\in{\mathbb Z}^d$,
\begin{equation}
X_{n+1}(i)=\phi_k(\theta_iX_n)\quad\mbox{with probability }p_k\quad(1\leq k\leq m),
\end{equation}
where for each $j\in{\mathbb Z}^d$, we define a translation operator $\theta_i:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}^{{\mathbb Z}^d}$ by $(\theta_ix)(j):=x(i+j)$ $(i,j\in{\mathbb Z}^d)$. We call such a Markov chain $(X_n)_{n\geq 0}$ a \emph{monotone random cellular automaton}.
The continuous time Markov chains $(X_t)_{t\geq 0}$ taking values in $\{0,1\}^{{\mathbb Z}^d}$ that we will be interested in are similarly characterised by a finite collection $\phi_1,\ldots,\phi_m$ of monotonic maps and a collection of nonnegative rates $r_1,\ldots,r_m$. They evolve in such a way that independently for each $i\in{\mathbb Z}^d$,
\begin{equation}\label{traj}
X_t(i)\mbox{ is replaced by }\phi_k(\theta_iX_t)\mbox{ at the times of a Poisson process with rate }r_k
\end{equation}
$(1\leq k\leq m)$. We call such a Markov process a \emph{monotone interacting particle system}. Well-known results \cite[Thm~I.3.9]{Lig85} show that such processes are well-defined. They are usually constructed so that $t\mapsto X_t(i)$ is piecewise constant and right-continuous at its jump times.
Let $\P^x$ denote the law of the discrete time process started in $X_0=x$ and let $\underline 0$ and $\underline 1$ denote the configurations that are constantly zero or one, respectively. Well-known results imply that there exist invariant laws $\underline\nu$ and $\overline\nu$, called the \emph{lower} and \emph{upper invariant law}, such that
\begin{equation}\label{upconv}
\P^{\underline 0}[X_n\in\,\cdot\,]\Asto{n}\underline\nu
\quad\mbox{and}\quad
\P^{\underline 1}[X_n\in\,\cdot\,]\Asto{n}\overline\nu,
\end{equation}
where $\Rightarrow$ denotes weak convergence of probability laws on $\{0,1\}^{{\mathbb Z}^d}$ with respect to the product topology. Each invariant law $\nu$ of $(X_n)_{n\geq 0}$ satisfies $\underline\nu\leq\nu\leq\overline\nu$ in the stochastic order, and one has $\underline\nu=\overline\nu$ if and only if $\underline\rho=\overline\rho$, where
\begin{equation}
\underline\rho:=\lim_{n\to\infty}\P^{\underline 0}[X_n(i)=1]=\int\underline\nu(\mathrm{d} x)x(i)
\quad\mbox{and}\quad
\overline\rho:=\lim_{n\to\infty}\P^{\underline 1}[X_n(i)=1]=\int\overline\nu(\mathrm{d} x)x(i)
\end{equation}
denote the intensities of the lower and upper invariant laws. Completely analogue statements hold in the continuous-time setting \cite[Thm~III.2.3]{Lig85}. We will be interested in methods to derive lower bounds on $\overline\rho$.
It will be convenient to give names to some special monotonic functions. We start with the constant monotonic functions
\begin{equation}\label{phiconst}
\phi^0(x):=0\quad\mbox{and}\quad\phi^1(x):=1\qquad(x\in{\mathbb Z}^d).
\end{equation}
Apart from these constant functions, all other monotonic functions have the property that $\phi(\underline 0)=0$ and $\phi(\underline 1)=1$, and therefore monotone systems that do not use the function $\phi^0$ (resp.\ $\phi^1$) have the constant configuration $\underline 1$ (resp.\ $\underline 0$) as a fixed point of their evolution. We will discuss whether this fixed point is stable when the original system is perturbed by applying $\phi^0$ (resp.\ $\phi^1$) with a small probability or rate.
The next monotonic function of interest is the ``identity map''
\begin{equation}\label{phiid}
\phi^{\rm id}(x):=x(0)\qquad\big(x\in\{0,1\}^{{\mathbb Z}^d}\big).
\end{equation}
Monotone systems that only use $\phi^{\rm id}$ do not evolve at all, of course. We can think of the continuous-time interacting particle systems as limits of discrete-time cellular automata where time is measured in steps of some small size $\varepsilon$, the maps $\phi_1,\ldots,\phi_m$ are applied with probabilities $\varepsilon r_1,\ldots,\varepsilon r_m$, and with the remaining probability, the identity map $\phi^{\rm id}$ is applied.
For concreteness, to have some examples at hand, we consider three further, nontrivial examples of monotonic functions. For simplicity, we restrict ourselves to two dimensions. We will be interested in the functions
\be\begin{array}{r@{\,}c@{\,}l}\label{phiNEC}
\displaystyle\phi^{\rm NEC}(x)&:=&\displaystyle{\tt round}\big((x(0,0)+x(0,1)+x(1,0))/3\big),\\[5pt]
\displaystyle\phi^{\rm NN}(x)&:=&\displaystyle{\tt round}\big((x(0,0)+x(0,1)+x(1,0)+x(0,-1)+x(-1,0))/5\big),\\[5pt]
\displaystyle\phi^{\rm coop}(x)&:=&\displaystyle x(0,0)\vee\big(x(0,1)\wedge x(1,0)\big),
\end{array}\ee
where ${\tt round}$ denotes the function that rounds off a real number to the nearest integer. The function $\phi^{\rm NEC}$ is known as \emph{North-East-Center voting} or \emph{NEC voting}, for short, and also as \emph{Toom's rule}. In analogy to $\phi^{\rm NEC}$, we let $\phi^{\rm NWC},\phi^{\rm SWC},\phi^{\rm SEC}$ denote maps that describe North-West-Center voting, South-West-Center voting, and South-East-Center voting, respectively, defined in the obvious way. We will call the map $\phi^{\rm NN}$ from (\ref{phiNEC}) \emph{Nearest Neigbour voting} or \emph{NN voting}, for short. Another name found in the literature is the \emph{symmetric majority rule}. Figure~\ref{fig:updens} shows numerical data for random perturbations of the cellular automata defined by $\phi^{\rm NEC}$ and $\phi^{\rm NN}$. Both $\phi^{\rm NEC}$ and $\phi^{\rm NN}$ have obvious generalisations to higher dimensions, but we will not need these. We call $\phi^{\rm coop}$ the \emph{cooperative branching rule}. It is also known as the \emph{sexual reproduction rule} because of the interpretation that when $\phi^{\rm coop}$ is applied at a site $(i_1,i_2)$, two parents at $(i_1+1,i_2)$ and $(i_1,i_2+1)$ produce offspring at $(i_1,i_2)$, provided the parents' sites are both occupied and $(i_1,i_2)$ is vacant.
\subsection{Toom's stability theorem}\label{S:stabil}
Recall the definition of the constant monotonic map $\phi^0$ in (\ref{phiconst}). In what follows, we fix a monotonic map $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ that is not constantly zero or one. For each $p\in[0,1]$, we let $(X^p_k)_{k\geq 0}$ denote the monotone random cellular automaton defined by the monotonic functions $\phi^0$ and $\phi$ that are applied with probabilities $p$ and $1-p$, respectively. We let $\overline\rho(p)$ denote the density of the upper invariant law as a function of $p$. Since $\phi$ is not constant, $\underline 1$ is a fixed point of the deterministic system $(X^0_k)_{k\geq 0}$, and hence $\overline\rho(0)=1$.
We say that $(X_k)_{k\geq 0}=(X^0_k)_{k\geq 0}$ is stable if $\overline\rho(p)\to 1$ as $p\to 0$. Furthermore, we say that $\phi$ is an \emph{eroder} if for each initial state $X^0_0$ that contains only finitely many zeros, one has $X^0_n=\underline 1$ for some $n\in{\mathbb N}$. We quote the following result from \cite[Thm~5]{Too80}.
\begin{quote}
\textbf{Toom's stability theorem}
$(X_k)_{k\geq 0}$ is stable if and only if $\phi$ is an eroder.
\end{quote}
In words, this says that the all-one fixed point is stable under small random perturbations if and only if $\phi$ is an eroder.
For general local maps that need not be monotone, it is known that there exists no algorithm to decide whether a given map is an eroder, even in one dimension \cite{Pet87}. By contrast, for monotonic maps, there exists a simple criterion to check whether a given map is an eroder. Each monotonic map $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ can uniquely be written as
\begin{equation}\label{Aphi}
\phi(x)=\bigvee_{A\in{\cal A}(\phi)}\bigwedge_{i\in A}x(i),
\end{equation}
where ${\cal A}(\phi)$ is a finite collection of finite subsets of ${\mathbb Z}^d$ that have the interpretation that their indicator functions $1_A$ $(A\in{\cal A}(\phi))$ are the minimal configurations on which $\phi$ gives the outcome~1. In particular, ${\cal A}(\phi^0)=\emptyset$ and ${\cal A}(\phi^1)=\{\emptyset\}$, where in (\ref{Aphi}) we use the convention that the supremum (resp.\ infimum) over an empty set is 0 (resp.\ 1). We let ${\rm Conv}(A)$ denote the convex hull of a set $A$, viewed as a subset of ${\mathbb R}^d$. Then \cite[Thm~6]{Too80}, with simplifications due to \cite[Thm~1]{Pon13}, says that a monotonic map $\phi$ that is not constantly zero or one is an eroder if and only if
\begin{equation}\label{erosion}
\bigcap_{A\in{\cal A}(\phi)}{\rm Conv}(A)=\emptyset.
\end{equation}
We note that by Helly's theorem \cite[Corollary~21.3.2]{Roc70}, if (\ref{erosion}) holds, then there exists a subset ${\cal A}'\subset{\cal A}(\phi)$ of cardinality at most $d+1$ such that $\bigcap_{A\in{\cal A}'}{\rm Conv}(A)=\emptyset$. Using (\ref{erosion}), it is straightforward to check that the maps $\phi^{\rm NEC}$ and $\phi^{\rm coop}$, defined in (\ref{phiNEC}), are eroders. On the other hand, one can easily check that $\phi^{\rm NN}$ is not an eroder. Indeed, if $(X^0_n)_{n\geq 0}$ is started in an initial state with a zero on the sites $(0,0),(0,1),(1,0),(1,1)$ and ones everywhere else, then the deterministic system remains in this state forever.
\begin{figure}[htb]
\begin{center}
\inputtikz{updens}
\caption{Density $\overline\rho$ of the upper invariant law of two monotone cellular automata as a function of the parameters, shown on a scale from 0 (white) to 1 (black). On the left: a version of Toom's model that applies the maps $\phi^0$, $\phi^1$, and $\phi^{\rm NEC}$ with probabilities $p$, $r$, and $1-p-r$, respectively. On the right: the mononotone random cellular automaton that applies the maps $\phi^0$, $\phi^1$, and $\phi^{\rm NN}$ with probabilities $p$, $r$, and $1-p-r$, respectively. Contrary to $\phi^{\rm NEC}$, the map $\phi^{\rm NN}$ is not an eroder. By the symmetry between the 0's and the 1's, in both models, the density $\underline\rho$ of the lower invariant law equals $1-\overline\rho$. Due to metastability effects, the area where the upper invariant law differs from the lower invariant law is shown too large in these numerical data. For Toom's model with $r=0$, the data shown above suggest a first order phase transition at $p_{\rm c}\approx 0.057$ but based on numerical data for edge speeds we believe the true value is $p_{\rm c}\approx 0.053$. We conjecture that the model on the right has a unique invariant law everywhere except on the diagonal $p=r$ for $p$ sufficiently small.}
\label{fig:updens}
\end{center}
\end{figure}
\subsection{Main results}
While Toom's stability theorem is an impressive result, it is important to realise its limitations. As Toom already remarked \cite[Section~V]{Too80}, his theorem does not apply to monotone cellular automata whose local state space is not $\{0,1\}$, but $\{0,1,2\}$, for example. Also, his theorem only applies in discrete time and only to random perturbations of cellular automata defined by a single non-constant monotonic map $\phi$.
The most difficult part in the proof of Toom's stability theorem is showing that if $\phi$ is an eroder, then $\overline\rho(p)\to 1$ as $p\to 0$. To give a lower bound on $\overline\rho(p)$ for small values of $p$, Toom uses a Peierls contour argument. The main result of our article is extending this Peierls argument to monotone cellular automata whose definition involves, apart from the constant monotonic map $\phi^0$, several non-constant monotonic maps $\phi_1,\ldots,\phi_m$. We are especially interested in the case when one of these maps is the identity map $\phi^{\rm id}$ and in the closely related problem of giving lower bounds on $\overline\rho(p)$ for monotone interacting particle systems, which evolve in continuous time. Another result of our work is obtaining explicit lower bounds for $\overline\rho(p)$ for concrete models, which has not been attempted very much.
In particular, we extend Toom's definition of a contour to monotone cellular automata that apply several non-constant monotonic maps and to monotone interacting particle systems. We show that $X_n(i)=0$ for some $i\in{\mathbb Z}^d$ (or equivalently $X_t(i)=0$ in continuous time) implies the presence of a Toom contour ``rooted at'' $(n, i)$ (or $(t,i)$ respectively), which in turn can be used to obtain lower bounds for $\overline\rho(p)$ via a Peierls argument. Our main results are contained in Theorems~\ref{T:contour},~\ref{T:strongpres} and~\ref{T:contcontour}. At this point rather than formally stating these results, which would require dwelling into technical details, we state the explicit bounds we obtain as a result of our construction.
Our extension of Toom's result allows us to establish or improve explicit lower bounds for $\overline\rho(p)$ for concrete models. First we consider Toom's set-up, that is monotone random cellular automata that apply the maps $\phi^0$ and $\phi$ with probabilities $p$ and $1-p$, respectively, where $\phi$ is an eroder. An easy coupling argument shows that the intensity $\overline\rho(p)$ of the upper invariant law is a nonincreasing function of $p$, so we can define a \emph{critical parameter}
\begin{equation}
p_{\rm c}:=\sup\{p: \overline\rho(p)>0\}\in[0,1].
\end{equation}
Since $\phi$ is an eroder, Toom's stability theorem tells us that $p_{\rm c}>0$. We show how to derive explicit lower bounds on $p_{\rm c}$ for any choice of the eroder $\phi$, and do this for two concrete examples. We first take for $\phi$ the map $\phi^{\rm NEC}$ and obtain the bound $p_{\rm c}\geq 3^{-21}$, which does not compare well to the estimated value $p_{\rm c}\approx 0.053$ coming from numerical simulations. Nevertheless, this is probably the best rigorous bound currently available. Then we take for $\phi$ the map $\phi^{\rm coop}$ and, improving on Toom's method, we get the bound $p_{\rm c}\geq 1/64$. This is also some way off the estimated value $p_{\rm c}\approx 0.105$ coming from numerical simulations.
Then we consider the monotone random cellular automaton on ${\mathbb Z}^d$ that applies the maps $\phi^0,\phi^{\rm id}$, and $\phi^{\rm coop}$ with probabilities $p,q,r$, respectively with $q=1-p-r$. For each $p,r\geq 0$ such that $p+r\leq 1$, let $\overline\rho(p,r)$ denote the intensity of the upper invariant law of the process with parameters $p,1-p-r,r$. Arguing as before, it is easy to see that for each $0\leq r<1$ we can define a critical paramete
\begin{equation}
p_{\rm c}(r):=\sup\{p: \overline\rho(p, r)>0\}\in[0,1-r].
\end{equation}
By carefully examining the structure of Toom contours for this model, we prove the bound $p_c(r)> 0.00624 r$.
Finally, we consider the interacting particle system on ${\mathbb Z}^2$ that applies the monotonic maps $\phi^0$ and $\phi^{\rm coop}$ with rates $1$ and $\lambda$, respectively. This model was introduced by Durrett~\cite{Dur86} as the \emph{sexual contact process}, and we can think of it as the limit of the previous discrete-time cellular automata. For each $\lambda>0$ we let $\overline\rho(\lambda)$ denote the intensity of the upper invariant law of the process with parameters $1, \lambda$. Again, we define a critical parameter
\begin{equation}
\lambda_{\rm c}:=\inf\{\lambda\geq 0:\overline\rho(\lambda)>0\}\in(0,\infty).
\end{equation}
Numerical simulations suggest the value $\lambda_{\rm c}\approx 12.4$, we show the upper bound $\lambda_{\rm c}\leq 161.1985$. Durrett claimed a proof that $\lambda_{\rm c}\leq 110$, which he describes as ridiculous, but for which he challenges the reader to do better. We have quite not managed to beat his bound, though we are not far off. The proofs of all results in \cite{Dur86} are claimed to be contained in a forthcoming paper with Lawrence Gray \cite{DG85} that has never appeared. In \cite{Gra99}, Gray refered to these proofs as ``unpublished'' and in \cite{BD17}, Durrett cites the paper as an ``unpublished manuscript''.
Although for monotone cellular automata that apply several non-constant monotonic maps and for monotone interacting particle systems our methods do not seem to be enough to obtain bounds on the critical value in general, we believe that our examples are instructive of how one can try to do it for a concrete model.
\subsection{Discussion}
The cellular automaton defined by the NEC voting map $\phi^{\rm NEC}$ is nowadays known as \emph{Toom's model}.
In line with Stigler's law of eponymy, Toom's model was not invented by Toom, but by Vasilyev, Petrovskaya, and Pyatetski-Shapiro, who simulated random perturbations of this and other models on a computer \cite{VPP69}.
The function $p\mapsto\overline\rho(p)$ appears to be continuous except for a jump at $p_{\rm c}$ (see Figure~\ref{fig:updens}). Toom, having heard of \cite{VPP69} during a seminar, proved in \cite{Too74} that there exist random cellular automata on ${\mathbb Z}^d$ with at least $d$ different invariant laws. Although Toom's model is not explicitly mentioned in the paper, his proof method can be applied to prove that $p_{\rm c}>0$ for his model.
In \cite{Too80}, Toom improved his methods and proved his celebrated stability theorem. His paper is quite hard to read. One of the reasons is that Toom tries to be as general as possible. For example, he allows for cellular automata that look back more than one step in time, which severely complicates the statement of conditions like (\ref{erosion}). He also allows for noise that is not i.i.d.\ and cellular automata that are not monotone, even though all his results in the general case can easily be obtained by comparison with the i.i.d.\ monotone case. Toom's Peierls argument in the original paper is quite hard to understand. A more accessible account of Toom's original argument (with pictures!) in the special case of Toom's model can be found in the appendix of \cite{LMS90}.\footnote{Unfortunately, their Figure~6 contains a small mistake, in the form of an arrow that should not be there.} Although in principle, Toom's Peierls argument can be used to derive explicit bounds on $p_{\rm c}$, Toom did not attempt to do so, no doubt in the belief that more powerful methods would be developed in due time.
Bramson and Gray \cite{BG91} have given another alternative proof of Toom's stability theorem that relies on comparison with continuum models (which describe unions of convex sets in ${\mathbb R}^d$ evolving in continuous time) and renormalisation-style block arguments. They somewhat manage to relax Toom's conditions but the proof is very heavy and any explicit bounds derived using this method would presumably be very bad. Gray \cite{Gra99} proved a stability theorem for monotone interacting particle systems. The proofs use ideas from \cite{Too80} and \cite{BG91} and do not lend themselves well to the derivation of explicit bounds. Gray also derived necessary and sufficient conditions for a monotonic map to be an eroder \cite[Thm~18.2.1]{Gra99}, apparently overlooking the fact that Toom had already proved the much simpler condition (\ref{erosion}).
Motivated by abstract problems in computer science, a number of authors have given alternative proofs of Toom's stability theorem in a more restrictive setting \cite{GR88,BS88,Gac95,Gac21}. Their main interest is in a three-dimensional system which evolves in two steps: letting $e_1,e_2,e_3$ denote the basis vectors in ${\mathbb Z}^3$, they first replace $X_n(i)$ by
\[
X'_n(i):={\tt round}\big((X_n(i)+X_n(i+e_1)+X_n(i+e_2))/3\big),
\]
and then set
\[
X_{n+1}(i):={\tt round}\big((X'_n(i)+X'_n(i+e_3)+X'_n(i-e_3))/3\big).
\]
They prove explicit bounds for finite systems, although for values of $p$ that are extremely close to zero.\footnote{In particular, \cite{Gac95} needs $p<2^{-21}3^{-8}$.} The proofs of \cite{GR88} do not use Toom's Peierls argument but rely on different methods. Their bounds were improved in \cite{BS88}. Still better bounds can be found in the unpublished note \cite{Gac95}. The proofs in the latter manuscript are very similar to Toom's argument, with some crucial improvements at the end that are hard to follow due to missing definitions. This version of the argument seems to have inspired the incomplete note by John Preskill \cite{Pre07} who links it to the interesting idea of counting ``minimal explanations''. His definition of a ``minimal explanation'' is a bit stronger than the definition we will adopt in Subsection~\ref{S:finexpl} below, but sometimes, such as in the picture in Figure~\ref{fig:minexpl} on the right, the two definitions coincide. Figure~\ref{fig:minexpl} shows that the relation between Toom contours and minimal explanations is not so straightforward as suggested in \cite{Gac95,Pre07}. We have not found a good way to control the number of minimal explanations with a given number of defective sites and we do not know how to derive the lower bounds on the density of the upper invariant law stated in \cite{Gac95,Pre07}.
Hwa-Nien Chen \cite{Che92,Che94}, who was a PhD student of Lawrence Gray, studied the stability of various variations of Toom's model under perturbations of the initial state and the birth rate. The proofs of two of his four theorems depend on results that he cites from the as yet nonexisting paper \cite{DG85}. Ponselet \cite{Pon13} gave an excellent account of the existing literature and together with her supervisor proved exponential decay of correlations for the upper invariant law of a large class of randomly perturbed monotone cellular automata \cite{MP11}.
There exists duality theory for general monotone interacting particle systems \cite{Gra86,SS18}. The basic idea is that the state in the origin at time zero is a monotone function of the state at time $-t$, and this monotone function evolves in a Markovian way as a function of~$t$. Durrett \cite{Dur86} mentions this dual process as an important ingredient of the proofs of the forthcoming paper \cite{DG85} and it is also closely related to the minimal explanations of Preskill \cite{Pre07}. A good understanding of this dual process could potentially help solve many open problems in the area, but its behaviour is already quite complicated in the mean-field case \cite{MSS20}.
\subsection{Outline}
The paper is organized as follows. We define Toom contours and give an outline of the main idea of the Peierls argument in Subsection~\ref{S:Peierls}. In Subsection~\ref{S:erod} we prove Toom's stability theorem. In Susbsection~\ref{S:twochar} we introduce a stronger notion of Toom contours, that allows us to improve bounds for certain models. We then present two explicit bounds in Toom's set-up in Subsection~\ref{S:explic}. In Subsection~\ref{S:intrins} we consider monotone random cellular automata that apply several non-constant monotonic maps and in Subsection~\ref{S:contfirst} we discuss continuous time results and bounds.
The rest of the paper is devoted for proofs and technical arguments. The results stated in Subsections~\ref{S:Peierls} are proved in Section~\ref{S:contour}. Section~\ref{S:bounds} contains all the proofs of the results stated in Subsections~\ref{S:erod},~\ref{S:twochar} and~\ref{S:explic}. The results of Subsection~\ref{S:intrins} are proved in Section~\ref{S:intbd}. Section~\ref{S:cont} gives the precise definitions and results together with their proofs in the continuous-time setting. Finally, the relation between Toom contours and minimal explanations in the sense of John Preskill \cite{Pre07} is discussed in Section~\ref{S:expla}, where we also discuss the open problem of counting minimal explanations.
\section{Setting and definitions}\label{S:Toomcontours}
\subsection{Toom's Peierls argument}\label{S:Peierls}
In this subsection, we derive a lower bound on the intensity of the upper invariant law for a class of monotone random cellular automata. We use a Peierls argument based on a special type of contours that we will call \emph{Toom contours}. In their essence, these are the contours used in \cite{Too80}, though on the face of it our definitions will look a bit different from those of \cite{Too80}. This pertains especially to the ``sources'' and ``sinks'' defined below that are absent from Toom's formulation and that we think help elucidate the argument. We start by defining a special sort of directed graphs, which we will call \emph{Toom graphs} (see Figure~\ref{fig:Toomgraph}). After that we first give an outline of the main idea of the Peierls argument and then provide the details.
\subsubsection*{Toom graphs}
Recall that a directed graph is a pair $(V,\vec E)$ where $V$ is a set whose elements are called vertices and $\vec E$ is a subset of $V\times V$ whose elements are called directed edges. For each directed edge $(v,w)\in E$, we call $v$ the starting vertex and $w$ the endvertex of $(v,w)$. We let
\begin{equation}
\vec E_{\rm in}(v):=\big\{(u,v')\in\vec E:v'=v\big\}
\quad\mbox{and}\quad
\vec E_{\rm out}(v):=\big\{(v',w)\in\vec E:v'=v\big\}
\end{equation}
denote the sets or directed edges entering and leaving a given vertex $v\in V$, respectively.
We will need to generalise the concept of a directed graph by allowing directed edges to have a \emph{type} in some finite set $\{1,\ldots,\sigma\}$, with the possibility that several edges of different types connect the same two vertices. To that aim, we define an \emph{directed graph with $\sigma$ types of edges} to be a pair $(V,{\cal E})$, where ${\cal E}=(\vec E_1,\ldots,\vec E_\sigma)$ is a sequence of subsets of $V\times V$. We interpret $\vec E_s$ as the set of directed edges of type $s$.
\begin{figure}[t]
\begin{center}
\inputtikz{Toomgraph}
\caption{Example of a Toom graph with three charges. Sources and sinks are indicated with solid dots and internal vertices are indicated with open dots. Note the isolated vertex in the lower right corner, which is a source and a sink at the same time.}
\label{fig:Toomgraph}
\end{center}
\end{figure}
\begin{defi}\label{def:toomgraph}
A \emph{Toom graph} with $\sigma\geq 2$ \emph{charges} is a directed graph with $\sigma$ types of edges $(V,{\cal E})=(V,\vec E_1,\ldots,\vec E_\sigma)$ such that each vertex $v\in V$ satisfies one of the following four conditions:
\begin{enumerate}
\item $|\vec E_{s,{\rm in}}(v)|=0=|\vec E_{s,{\rm out}}(v)|$ for all $1\leq s\leq\sigma$,
\item $|\vec E_{s,{\rm in}}(v)|=0$ and $|\vec E_{s,{\rm out}}(v)|=1$ for all $1\leq s\leq\sigma$,
\item $|\vec E_{s,{\rm in}}(v)|=1$ and $|\vec E_{s,{\rm out}}(v)|=0$ for all $1\leq s\leq\sigma$,
\item there exists an $s\in\{1,\ldots,\sigma\}$ such that $|\vec E_{s,{\rm in}}(v)|=1=|\vec E_{s,{\rm out}}(v)|$\\ and $|\vec E_{l,{\rm in}}(v)|=0=|\vec E_{l,{\rm out}}(v)|$ for each $l\neq s$.
\end{enumerate}
\end{defi}
See Figure~\ref{fig:Toomgraph} for a picture of a Toom graph with three charges. We set
\be\begin{array}{r@{\,}c@{\,}l}\label{eq:sourcesinkint}
\displaystyle V_\circ&:=&\displaystyle\big\{v\in V:
|\vec E_{s,{\rm in}}(v)|=0\ \; \forall 1\leq s\leq\sigma\big\},\\[5pt]
\displaystyle V_\ast&:=&\displaystyle\big\{v\in V:
|\vec E_{s,{\rm out}}(v)|=0\ \; \forall 1\leq s\leq\sigma\big\},\\[5pt]
\displaystyle V_s&:=&\displaystyle\big\{v\in V:
|\vec E_{s,{\rm in}}(v)|=1=|\vec E_{s,{\rm out}}(v)|\big\}\qquad(1\leq s\leq\sigma).
\end{array}\ee
Vertices in $V_\circ,V_\ast$, and $V_s$ are called \emph{sources}, \emph{sinks}, and \emph{internal vertices} with \emph{charge} $s$, respectively. Vertices in $V_\circ\cap V_\ast$ are called \emph{isolated vertices}. Informally, we can imagine that at each source there emerge $\sigma$ charges, one of each type, that then travel via internal vertices of the corresponding charge through the graph until they arrive at a sink, in such a way that at each sink there converge precisely $\sigma$ charges, one of each type. It is clear from this description that $|V_\circ|=|V_\ast|$, i.e., the number of sources equals the number of sinks.
We let $\vec E:=\bigcup_{s=1}^\sigma\vec E_s$ denote the union of all directed edge sets and we let $E:=\big\{\{v,w\}:(v,w)\in\vec E\big\}$ denote the corresponding set of undirected edges. We say that a Toom graph $(V,{\cal E})$ is \emph{connected} if the associated undirected graph $(V,E)$ is connected.
\subsubsection*{Toom contours}
Our next aim is to define \emph{Toom contours}, which are connected Toom graphs that are embedded in space-time ${\mathbb Z}^{d+1}$ in a special way. Let $(V,{\cal E})=(V,\vec E_1,\ldots,\vec E_\sigma)$ be a Toom graph with $\sigma$ charges. Recall that $\vec E=\bigcup_{s=1}^\sigma\vec E_s$.
\begin{defi}\label{def:embedding}
An \emph{embedding} of $(V,{\cal E})$ is a map
\begin{equation}\label{psi}
V\ni v\mapsto\psi(v)=\big(\vec\psi(v),\psi_{d+1}(v)\big)\in{\mathbb Z}^d\times{\mathbb Z}
\end{equation}
that has the following properties:
\begin{enumerate}
\item $\displaystyle\psi_{d+1}(w)=\psi_{d+1}(v)-1$ for all $(v,w)\in\vec E$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1\in V_\ast$ and $v_2\in V$ with $v_1\neq v_2$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1,v_2\in V_s$ with $v_1\neq v_2$ $(1\leq s\leq\sigma)$.
\end{enumerate}
\end{defi}
We interpret~$\vec\psi(v)$ and~$\psi_{d+1}(v)$ as the space and time coordinates of~$\psi(v)$ respectively. Condition (i) says that directed edges $(v,w)$ of the Toom graph $(V,\vec E)$ point in the direction of decreasing time. Condition~(ii) says that sinks do not overlap with other vertices and condition~(iii) says that internal vertices do not overlap with other internal vertices of the same charge. See Figure~\ref{fig:minexpl} for an example of an embedding of a Toom graph. Not every Toom graph can be embedded. Indeed, it is easy to see that if $(V,{\cal E})$ has an embedding in the sense defined above, then
\begin{equation}
|\vec E_1|=\cdots=|\vec E_\sigma|,
\end{equation}
i.e., there is an equal number of charged edges of each charge. The Toom graph of Figure~\ref{fig:Toomgraph} can be embedded, but if we would change the number of internal vertices on one of the paths from a source to a sink, then the resulting graph would still be a Toom graph but it would not be possible to embed it.
\begin{figure}[htb]
\begin{center}
\inputtikz{minexpl}
\caption{On the left: a Toom graph with two charges. Middle: embedding of the Toom graph on the left, with time running downwards. The connected component containing the root $v_\circ$ forms a Toom contour rooted at the origin $(0,0,0)$. On the right: a minimal explanation for a monotone cellular automaton $\Phi$ that applies the maps $\phi^0$ and $\phi^{\rm coop}$ with probabilities $p$ and $1-p$, respectively. The origin has the value zero because the sites marked with a star are defective. This explanation is minimal in the sense that removing any of the defective sites results in the origin having the value one. The Toom contour in the middle picture is present in $\Phi$. In particular, the sinks of the Toom contour coincide with some, though not with all of the defective sites of the minimal explanation.}
\label{fig:minexpl}
\end{center}
\end{figure}
\begin{defi}\label{def:toomcontour}
A \emph{Toom contour} is a quadruple $(V,{\cal E},v_\circ,\psi)$, where $(V,{\cal E})$ is a connected Toom graph, $v_\circ\in V_\circ$ is a specially designated source, and $\psi$ is an embedding of $(V,{\cal E})$ that has the additional properties that:
\begin{enumerate}\addtocounter{enumi}{3}
\item $\psi_{d+1}(v_\circ)>t$ for all $(i,t)\in\psi(V)\backslash\{\psi(v_\circ)\}$,
\end{enumerate}
where $\psi(V):=\{\psi(v):v\in V\}$ denotes the image of $V$ under $\psi$.
\end{defi}
We call $v_\circ$ the \emph{root} of the Toom contour and we say that the Toom contour $(V,{\cal E},v_\circ,\psi)$ is \emph{rooted} at the space-time point $\psi(v_\circ)\in{\mathbb Z}^{d+1}$. See Figure~\ref{fig:minexpl} for an example of a Toom contour with two charges.
For any Toom contour $(V,{\cal E},v_\circ,\psi)$, we write
\begin{equation}\begin{array}{l}\label{Ecirc}
\vec E^\ast:=\bigcup_{s=1}^\sigma\vec E^\ast_s
\quad\mbox{with}\quad
\vec E^\ast_s:=\big\{(v,w)\in\vec E_s:v\in V_s\cup\{v_\circ\}\big\}
\quad(1\leq s\leq\sigma),\\[5pt]
\vec E^\circ:=\bigcup_{s=1}^\sigma\vec E^\circ_s
\quad\mbox{with}\quad
\vec E^\circ_s:=\big\{(v,w)\in\vec E_s:v\in V_\circ\backslash\{v_\circ\}\big\}
\quad(1\leq s\leq\sigma).
\end{array}\ee
i.e., $\vec E^\ast$ is the set of directed edges that have an internal vertex or the root as their starting vertex, and $\vec E^\circ$ are all the other directed edges, that start at a source that is not the root. The special role played by the root will become important in the next subsection, when we define what it means for a Toom contour to be present in a collection of i.i.d.\ monotonic maps.
If $(V,{\cal E},v_\circ,\psi)$ is a Toom contour, then we let
\begin{equation}\begin{array}{c}
\displaystyle\psi(V_\ast):=\big\{\psi(v):v\in V_\ast\big\},\quad
\psi(\vec E^\ast_s):=\big\{\big(\psi(v),\psi(w)\big):(v,w)\in \vec E^\ast_s\big\},\\[5pt]
\psi(\vec E^\circ_s):=\big\{\big(\psi(v),\psi(w)\big):(v,w)\in \vec E^\circ_s\big\}
\qquad(1\leq s\leq\sigma),
\end{array}\ee
denote the images under $\psi$ of the set of sinks $V_\ast$ and the sets of directed edges $\vec E^\ast_s$ and $\vec E^\circ_s$, respectively. We call two Toom contours $(V,{\cal E},v_\circ,\psi)$ and $(V',{\cal E}',v'_\circ,\psi')$ \emph{equivalent} if
\begin{equation}\label{equiv}
\psi(v_\circ)=\psi'(v_\circ),\quad
\psi(V_\ast)=\psi'(V'_\ast),\quad
\psi(\vec E^\ast_s)=\psi'(\vec{E'}^\ast_s),\quad
\psi(\vec E^\circ_s)=\psi'(\vec{E'}^\circ_s).
\end{equation}
\subsubsection*{The main idea of the construction}
We will be interested in monotone random cellular automata that are defined by a probability distribution $p_0,\ldots,p_m$ and monotonic maps $\phi_0,\ldots,\phi_m$, of which $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. This generalises Toom's set-up, who only considered the case $m=1$. We fix an i.i.d.\ collection $\Phi=(\Phi_{(i,t)})_{(i,t)\in{\mathbb Z}^{d+1}}$ of monotonic maps such that $\P[\Phi_{(i,t)}=\phi_k]=p_k$ $(0\leq k\leq m)$. A space-time point $(i,t)$ with $\Phi_{(i,t)}=\phi^0$ is called a \emph{defective} site. In Lemmas \ref{L:maxtraj} and \ref{L:maxup} below, we show that $\Phi$ almost surely determines a stationary process $(\overline X_t)_{t\in{\mathbb Z}}$ that at each time $t$ is distributed according to the upper invariant law $\overline\nu$. Our aim is to give an upper bound on the probability that $\overline X_0(0)=0$, which then translates into a lower bound on the intensity $\overline\rho$ of the upper invariant law.
To achieve this, we first describe a special way to draw a Toom graph inside space-time ${\mathbb Z}^{d+1}$. Such an embedding of a Toom graph in space-time is then called a \emph{Toom contour}. Since our argument requires looking backwards in time, it will be convenient to adopt the convention that in all our pictures (such as Figure~\ref{fig:minexpl}), time runs downwards. Next, we define when a Toom contour is \emph{present} in the random collection of maps $\Phi$. Theorem~\ref{T:contour} then states that the event $\overline X_0(0)=0$ implies the presence of a Toom contour in $\Phi$. This allows us to bound the probability that $\overline X_0(0)=0$ from above by the expected number of Toom contours that are present in $\Phi$. In later subsections, we will then discuss conditions under which this expectation can be controlled and derive explicit bounds.
Before we state the remaining definitions, which are mildly complicated, we explain the main idea of the construction. We will define presence of Toom contours in such a way that the space-time point $(0,0)$ is a source and all the sinks correspond to defective sites where the map $\phi^0$ is applied. Let $M_n$ denote the number of Toom contours that have $(0,0)$ as a source and that have $n$ sinks. One would like to show that if the map $\phi^0$ is applied with a sufficiently small probability $p$, then the expression $\sum_{n=1}^\infty M_np^n$ is small. This will not be true, however, unless one imposes additional conditions on the contours. In fact, it is rather difficult to control the number of contours with a given number of sinks. It is much easier to count contours with a given number of edges. Letting $N_n$ denote the number of contours with $n$ edges (rather than sinks), it is not hard to show that $N_n$ grows at most exponentially as a function of $n$.
To complete the argument, therefore, it suffices to impose additional conditions on the contours that bound the number of edges in terms of the number of sinks. If at a certain space-time point $(i,t)$, the stationary process satisfies $\overline X_t(i)=0$, and the map $\Phi_{(i,t)}$ that is applied there is $\phi_k$, then for each set $A\in{\cal A}(\phi_k)$ (with ${\cal A}(\phi_k)$ defined in (\ref{Aphi})), at least one of the sites $j\in A$ must have the property that $\overline X_{t-1}(j)=0$. We will use this to steer edges in a certain direction, in such a way that different charges tend to move away from each other, \emph{except for edges that originate in a source}.
Since in the end, edges of all charges must convene in each sink, this will allow us to bound the total number of edges in terms of the ``bad'' edges that originate in a source. Equivalently, this allows us to bound the total number of edges in terms of the number of sources, which is the same as the number of sinks. This is the main idea of the argument. We now continue to give the precise definitions.
\subsubsection*{The contour argument}
Having defined the right sort of contours, we now come to the core of the argument: the fact that $\overline X_0(0)=0$ implies the existence of a Toom contour with certain properties. We first need a special construction of the stationary process $(\overline X_t)_{t\in{\mathbb Z}}$. We let $\{0,1\}^{{\mathbb Z}^{d+1}}$ denote the space of all space-time configurations $x=(x_t(i))_{(i,t)\in{\mathbb Z}^{d+1}}$. For $x\in\{0,1\}^{{\mathbb Z}^{d+1}}$ and $t\in{\mathbb Z}$, we define $x_t\in\{0,1\}^{{\mathbb Z}^d}$ by $x_t:=(x_t(i))_{i\in{\mathbb Z}^d}$. We will call a collection ${\bm{\phh}}=(\varphi_{(i,t)})_{(i,t)\in{\mathbb Z}^{d+1}}$ of monotonic maps from $\{0,1\}^{{\mathbb Z}^d}$ to $\{0,1\}$ a \emph{monotonic flow}. By definition, a \emph{trajectory} of ${\bm{\phh}}$ is a space-time configuration $x$ such that
\begin{equation}
x_t(i)=\varphi_{(i,t)}(\theta_ix_{t-1})\qquad\big((i,t)\in{\mathbb Z}^{d+1}\big).
\end{equation}
We need the following two simple lemmas.
\begin{lemma}[Minimal and maximal trajectories]
Let\label{L:maxtraj} ${\bm{\phh}}$ be a monotonic flow. Then there exist trajectories $\underline x$ and $\overline x$ that are uniquely characterised by the property that each trajectory $x$ of ${\bm{\phh}}$ satisfies $\underline x\leq x\leq\overline x$ (pointwise).
\end{lemma}
\begin{lemma}[The lower and upper invariant laws]
Let\label{L:maxup} $\phi_0,\ldots,\phi_m$ be monotonic functions, let $p_0,\ldots,p_m$ be a probability distribution, and let $\underline\nu$ and $\overline\nu$ denote the lower and upper invariant laws of the corresponding monotone random cellular automaton. Let $\Phi=\big(\Phi_{(i,t)}\big)_{(i,t)\in{\mathbb Z}^{d+1}}$ be an i.i.d.\ collection of monotonic maps such that $\P[\Phi_{(i,t)}=\phi_k]=p_k$ $(0\leq k\leq m)$, and let $\underline X$ and $\overline X$ be the minimal and maximal trajectories of $\Phi$. Then for each $t\in{\mathbb Z}$, the random variables $\underline X_t$ and $\overline X_t$ are distributed according to the laws $\underline\nu$ and $\overline\nu$, respectively.
\end{lemma}
From now on, we fix a monotonic flow ${\bm{\phh}}$ that takes values in $\{\phi_0,\ldots,\phi_m\}$, of which $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. Recall that ${\cal A}(\phi_k)$, defined in (\ref{Aphi}), corresponds to the set of minimal configurations on which $\phi_k$ gives the outcome~1. We fix an integer $\sigma\geq 2$ and for each $1\leq k\leq m$ and $1\leq s\leq\sigma$, we choose a set
\begin{equation}\label{As}
A_s(\phi_k)\in{\cal A}(\phi_k).
\end{equation}
Informally, the aim of these sets is to steer edges of different charges away from each other. In later subsections, when we derive bounds for concrete models, we will make an explicit choice for $\sigma$ and sets $A_s(\phi_k)$. For the moment, we allow these to be arbitrary. The integer $\sigma$ corresponds to the number of charges. The definition of what it means for a contour to be present will depend on the choice of the sets in (\ref{As}).
As a concrete example, consider the case $m=1$ and $\phi_1=\phi^{\rm coop}$, the cooperative branching map defined in (\ref{phiNEC}). The set ${\cal A}(\phi^{\rm coop})$ from (\ref{Aphi}) is given by ${\cal A}(\phi^{\rm coop})=\{A_1,A_2\}$ with $A_1:=\{(0,0)\}$ and $A_2:=\{(0,1),(1,0)\}$. Using (\ref{erosion}) we see that $\phi^{\rm coop}$ is an eroder. In this concrete example, we will set $\sigma:=2$ and for the sets $A_s(\phi_1)$ $(s=1,2)$ of (\ref{As}) we choose the sets $A_1,A_2$ we have just defined.
\begin{defi}\label{D:present}
A Toom contour $(V,{\cal E},v_\circ,\psi)$ with $\sigma$ charges is \emph{present} in the monotonic flow ${\bm{\phh}}$ if:
\begin{enumerate}
\item $\displaystyle\varphi_{\psi(v)}=\phi^0$ for all $\displaystyle v\in V_\ast$,
\item $\displaystyle\varphi_{\psi(v)}\in\{\phi_1,\ldots,\phi_m\}$ for all $\displaystyle v\in V\backslash V_\ast$,
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_s(\varphi_{\psi(v)})$ for all $(v,w)\in\vec E^\ast_s$ $(1\leq s\leq\sigma$),
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in\bigcup_{s=1}^\sigma A_s(\varphi_{\psi(v)})$ for all $(v,w)\in\vec E^\circ$,
\end{enumerate}
where $\vec E^\ast_s$ and $\vec E^\circ$ are defined in (\ref{Ecirc}) and $\vec\psi(v)$, defined in (\ref{psi}), denotes the spatial coordinates of the space-time point $\psi(v)$.
\end{defi}
Note that the definition of what it means for a contour to be present depends on the choice of the sets $A_s(\phi_k)$ in (\ref{As}). Conditions (i) and (ii) say that sinks of $(V,{\cal E})$ are mapped to defective space-time points, where the constant map $\phi^0$ is applied, and all other vertices are mapped to space-time points where one of the non-constant maps $\phi_1,\ldots,\phi_m$ is applied. Together with our earlier definition of an embedding, condition~(iii) says that if $(v,w)$ is an edge with charge $s$ that comes out of the root or an internal vertex, then $(v,w)$ is mapped to a pair of space-time points of the form $\big((i,t),(i+j,t-1)\big)$ with $j\in A_s(\varphi_{\psi(v)})$. Condition~(iv) is similar, except that if $v$ is a source different from the root, then we only require that $j\in\bigcup_{s=1}^\sigma A_s(\varphi_{\psi(v)})$. It is clear from this definition that if $(V,{\cal E},v_\circ,\psi)$ and $(V',{\cal E}',v'_\circ,\psi')$ are equivalent Toom contours, then $(V,{\cal E},v_\circ,\psi)$ is present in ${\bm{\phh}}$ if and only if the same is true for $(V',{\cal E}',v'_\circ,\psi')$.
For our example of the monotone cellular automaton with $\phi_1=\phi^{\rm coop}$, Definition~\ref{D:present} is demonstrated in Figure~\ref{fig:minexpl}. Directed edges of charge 1 and 2 are indicated in red and blue, respectively. Because of our choice $A_2(\phi_1):=\{(0,1),(1,0)\}$, blue edges that start at internal vertices or the root point in directions where one of the spatial coordinates increases by one. Likewise, since $A_1(\phi_1):=\{(0,0)\}$, red edges that start at internal vertices or the root point straight up, i.e., in the direction of decreasing time. Sinks of the Toom contour correspond to defective sites, as indicated in Figure~\ref{fig:minexpl} on the right.
In view of Lemma~\ref{L:maxup}, the following crucial theorem links the upper invariant law to Toom contours.
\begin{theorem}[Presence of a Toom contour]
Let\label{T:contour} ${\bm{\phh}}$ be a monotonic flow on $\{0,1\}^{{\mathbb Z}^d}$ that take values in $\{\phi_0,\ldots,\phi_m\}$, where $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. Let $\overline x$ denote the maximal trajectory of ${\bm{\phh}}$. Let $\sigma\geq 2$ be an integer and for each $1\leq s\leq\sigma$ and $1\leq k\leq m$, let $A_s(\phi_k)\in{\cal A}(\phi_k)$ be fixed. If $\overline x_0(0)=0$, then, with respect to the given choice of $\sigma$ and the sets $A_s(\phi_k)$, a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ is present in ${\bm{\phh}}$.
\end{theorem}
We note that the converse of Theorem~\ref{T:contour} does not hold, i.e., the presence in ${\bm{\phh}}$ of a Toom contour $(V,{\cal E},v_\circ,\psi)$ that is rooted at $(0,0)$ does not imply that $\overline X_0(0)=0$. This can be seen from Figure~\ref{fig:minexpl}. In this example, if there would be no other defective sites apart from the sinks of the Toom contour, then the origin would have the value one. This is a difference with the Peierls arguments used in percolation theory, where the presence of a contour is a necessary and sufficient condition for the absence of percolation.
Let ${\cal T}_0$ denote the set of Toom contours rooted at $(0,0)$ (up to equivalence). We formally denote a Toom contour by $T=(V,E,v_\circ,\psi)$. Let $\Phi=(\Phi_{(i,t)})_{(i,t)\in{\mathbb Z}^{d+1}}$ be an i.i.d.\ collection of monotonic maps taking values in $\{\phi_0,\ldots,\phi_m\}$. Then Theorem~\ref{T:contour} implies the Peierls bound:
\begin{equation}\label{Pei}
1-\overline\rho=\P[\overline X_0(0)=0]
\leq\sum_{T\in{\cal T}_0}\P\big[T\mbox{ is present in }\Phi\big].
\end{equation}
In Section~\ref{S:erod} below, we will show how (\ref{Pei}) can be used to prove the most difficult part of Toom's stability theorem, namely, that the upper invariant law of eroders is stable under small random perturbations.
\subsubsection*{Toom contours with two charges}
Although Theorem~\ref{T:contour} is sufficient to prove stability of eroders, when deriving explicit bounds, it is often useful to have stronger versions of Theorem~\ref{T:contour} at one's disposal that establish the presence of Toom contours with certain additional properties that restrict the sum on the right-hand side in (\ref{Pei}) and hence lead to improved bounds. Here we formulate one such result that holds specifically for Toom contours with two charges.
As before, we let ${\bm{\phh}}$ be a monotonic flow taking values in $\{\phi_0,\ldots,\phi_m\}$, of which $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. We set $\sigma=2$ and choose sets $A_s(\phi_k)\in{\cal A}(\phi_k)$ $(1\leq k\leq m,\ 1\leq s\leq 2)$ as in (\ref{As}).
\begin{defi}\label{D:strongpres}
A Toom contour $(V,{\cal E},v_\circ,\psi)$ with $2$ charges is \emph{strongly present} in the monotonic flow ${\bm{\phh}}$ if in addition to conditions (i)--(iv) of Definition~\ref{D:present}, for each $v\in V_\circ\backslash\{v_\circ\}$ and $w_1,w_2\in V$ with $(v,w_s)\in\vec E_{s,{\rm out}}(v)$ $(s=1,2)$, one has:
\begin{enumerate}\addtocounter{enumi}{4}
\item $\vec\psi(w_1)-\vec\psi(v)\in A_2(\varphi_{\psi(v)})$ and $\vec\psi(w_2)-\vec\psi(v)\in A_1(\varphi_{\psi(v)})$,
\item $\vec\psi(w_1)\neq\vec\psi(w_2)$.
\end{enumerate}
\end{defi}
Condition~(v) can informally be described by saying that charged edges pointing out of any source other than the root must always point in the ``wrong'' direction, compared to charged edges pointing out of an internal vertex or the root. Note that for the Toom contour in Figure~\ref{fig:minexpl}, this is indeed the case. With this definition, we can strengthen Theorem~\ref{T:contour} as follows.
\begin{theorem}[Strong presence of a Toom contour]
If\label{T:strongpres} $\sigma=2$, then the Toom contour $(V,{\cal E},v_\circ,\psi)$ from Theorem~\ref{T:contour} can be chosen such that it is strongly present in ${\bm{\phh}}$.
\end{theorem}
Our proof of Theorem~\ref{T:strongpres} follows quite a different strategy from the proof of Theorem~\ref{T:contour}. We do not know to what extent Theorem~\ref{T:strongpres} can be generalised to Toom contours with three or more charges.
In the following subsections, we will show how the results of the present subsection can be applied in concrete situations. In Subsection~\ref{S:erod}, we show how Theorem~\ref{T:contour} can be used to prove stability of eroders, which is the difficult implication in Toom's stability theorem. In Subsection~\ref{S:twochar}, building on the results of Subsection~\ref{S:erod}, we show how for Toom contours with two charges, the bounds can be improved by applying Theorem~\ref{T:strongpres} instead of Theorem~\ref{T:contour}. In Subsection~\ref{S:explic}, we derive explicit bounds for two concrete eroders. In Subsection~\ref{S:intrins}, we leave the setting of Toom's stability theorem and discuss monotone random cellular automata whose definition involves more than one non-constant monotonic map. In Subsection~\ref{S:contbounds} we derive bounds for monotone interacting particle systems in continuous time.
\subsection{Stability of eroders}\label{S:erod}
In this subsection, we restrict ourselves to the special set-up of Toom's stability theorem. We fix a non-constant monotonic map $\phi$ that is an eroder and let $\Phi^p=(\Phi^p_{(i,t)})_{(i,t)\in{\mathbb Z}^d}$ be an i.i.d.\ collection of monotonic maps that assume the values $\phi^0$ and $\phi$ with probabilities $p$ and $1-p$, respectively. We let $(\overline X^p_t)_{t\in{\mathbb Z}}$ denote the maximal trajectory of $\Phi^p$ and let $\overline\rho(p):=\P[\overline X^p_0(0)=1]$ denote the intensity of the upper invariant law. We will show how the Peierls bound (\ref{Pei}) can be used to prove that $\overline\rho(p)\to 1$ as $p\to 0$, which is the most difficult part of Toom's stability theorem.
To do this, first we will need another equivalent formulation of the eroder property~\eqref{erosion}. By definition, a \emph{polar function} is a linear function ${\mathbb R}^d\ni z\mapsto L(z)=(L_1(z),\ldots,L_\sigma(z))\in{\mathbb R}^\sigma$ such that
\begin{equation}\label{polar}
\sum_{s=1}^\sigma L_s(z)=0\qquad(z\in{\mathbb R}^d).
\end{equation}
We call $\sigma\geq 2$ the \emph{dimension} of $L$. The following lemma is adapted from \cite[Lemma~12]{Pon13}, with the basic idea going back to \cite{Too80}. Recall the definition of ${\cal A}(\phi)$ in (\ref{Aphi}).
\begin{lemma}[Erosion criterion]
A\label{L:erode} non-constant monotonic function $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ is an eroder if and only if there exists a polar function $L$ of dimension $\sigma\geq 2$ such that
\begin{equation}\label{erode}
\sum_{s=1}^\sigma\sup_{A\in{\cal A}(\phi)}\inf_{i\in A}L_s(i)>0.
\end{equation}
If $\phi$ is an eroder, then $L$ can moreover be chosen so that its dimension $\sigma$ is at most $d+1$.
\end{lemma}
To understand why the condition (\ref{erode}) implies that $\phi$ is an eroder, for $1\leq s\leq\sigma$, let
\begin{equation}
\delta_s:=\sup_{A\in{\cal A}(\phi)}\inf_{i\in A}L_s(i)\quad\mbox{and}\quad
r_s(x):=\sup\big\{L_s(i):i\in{\mathbb Z}^d,\ x(i)=0\big\}
\qquad\big(x\in\{0,1\}^{{\mathbb Z}^d}\big),
\end{equation}
with $r_s(\underline 1):=-\infty$, and let $(X^0_k)_{k\geq 0}$ denote the deterministic cellular automaton that applies the map $\phi$ in each space-time point, started in an arbitrary initial state. In the proof of Lemma~\ref{L:Lerod} below, we will show that
\begin{equation}\label{edgespeed}
r_s(X^0_n)\leq r_s(X^0_0)-\delta_sn\qquad(n\geq 0).
\end{equation}
This says that $\delta_s$ has the interpretation of an \emph{edge speed} in the direction defined by the linear function $L_s$. If $x$ is a configuration containing finitely many zeros, then we define the \emph{extent} of $x$ by
\begin{equation}
{\rm ext}(x):=\sum_{s=1}^\sigma r_s(x).
\end{equation}
Then ${\rm ext}(\underline 1)=-\infty$, while on the other hand, by the defining property (\ref{polar}) of a polar function, ${\rm ext}(x)\geq 0$ for each $x$ that contains at least one zero. Now (\ref{edgespeed}) implies that if $X^0_0$ contains finitely many zeros, then
\begin{equation}
{\rm ext}(X^0_n)\leq{\rm ext}(X^0_0)-n\delta
\quad\mbox{with}\quad
\delta:=\sum_{s=1}^\sigma\delta_s.
\end{equation}
It follows that $X^0_n=\underline 1$ for all $n$ such that ${\rm ext}(X^0_0)-n\delta<0$. Since $\delta>0$ by (\ref{erode}), we conclude that $\phi$ is an eroder.
We use Lemma~\ref{L:erode} and the polar functions to choose the number of charges $\sigma$ and to make a choice for the sets $A_s(\phi)\in{\cal A}(\phi)$ $(1\leq s\leq\sigma)$ as in (\ref{As}) when defining Toom contours. For a given choice of a polar function $L$ and sets $A_s(\phi)$, let us set
\begin{equation}\label{Bphi}
B(\phi):=\bigcup_{s=1}^\sigma A_s(\phi),
\end{equation}
and define
\begin{equation}\begin{array}{r@{\,}c@{\,}lcr@{\,}c@{\,}ll}\label{epsR}
\displaystyle\varepsilon&:=&\displaystyle\sum_{s=1}^\sigma\varepsilon_s&\quad\mbox{with}\quad&
\displaystyle\varepsilon_s&:=&\displaystyle\inf_{i\in A_s(\phi)}L_s(i)\qquad&\displaystyle(1\leq s\leq\sigma),\\[5pt]
\displaystyle R&:=&\displaystyle\sum_{s=1}^\sigma R_s&\quad\mbox{with}\quad&
\displaystyle R_s&:=&\displaystyle-\inf_{i\in B(\phi)}L_s(i)\qquad&\displaystyle(1\leq s\leq\sigma).
\end{array}\ee
Then Lemma~\ref{L:erode} tells us that since $\phi$ is an eroder, we can choose the polar function $L$ and sets $A_s(\phi)$ in such a way that $\varepsilon>0$, which we assume from now on.
Recall that in the example where $\phi=\phi^{\rm coop}$, we earlier made the choices $\sigma:=2$, $A_1(\phi):=\{(0,0)\}$, and $A_2(\phi):=\{(0,1),(1,0)\}$. We will now also choose a polar function by setting
\begin{equation}\label{Lcoop}
L_1(z_1,z_2):=-z_1-z_2\quad\mbox{and}\quad L_2:=-L_1\qquad\big((z_1,z_2)\in{\mathbb R}^2\big),
\end{equation}
One can check that for this choice of $L$ the constants from~\eqref{epsR} are given by
\begin{equation}\label{coopepsR}
\varepsilon=1\quad\mbox{and}\quad R=1.
\end{equation}
Returning to the setting where $\phi$ is a general eroder, we let ${\cal T}_0$ denote the set of Toom contours rooted at $(0,0)$ (up to equivalence). Since we apply only one non-constant monotonic map, conditions (iii) and (iv) of Definition~\ref{D:present} of what it means for a contour to be present in $\Phi^p$ do not involve any randomness, i.e., these conditions now simplify to the deterministic conditions:
\begin{itemize}
\item[{\rm(iii)'}] $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_s(\phi)$ for all $(v,w)\in\vec E^\ast_s$ $(1\leq s\leq\sigma$),
\item[{\rm(iv)'}] $\displaystyle\vec\psi(w)-\vec\psi(v)\in B(\phi)$ for all $(v,w)\in\vec E^\circ$.
\end{itemize}
\begin{defi}
We\label{D:Tac} let ${\cal T}'_0$ denote the set of Toom contours rooted at $(0,0)$ (up to equivalence) that satisfy conditions (iii)' and (iv)'.
\end{defi}
For each $T=(V,{\cal E},v_\circ,\psi)\in{\cal T}'_0$, let
\begin{equation}\label{eq:nstartne}
n_\ast(T):=|V_\circ|=|V_\ast|\quad\mbox{and}\quad n_{\rm e}(T):=|\vec E_1|=\cdots=|\vec E_\sigma|
\end{equation}
denote its number of sinks and sources, each, and its number of directed edges of each charge. As already explained informally, the central idea of Toom contours is that differently charged edges move away from each other except for edges starting at a source, which allows us to bound the number $n_{\rm e}(T)$ of edges in terms of the number $n_\ast(T)$ of sources (or equivalently sinks). We now make this informal idea precise. It is at this point of the argument that the eroder property is used in the form of Lemma~\ref{L:erode} which allowed us to choose the sets $A_s(\phi)$ and the polar function $L$ such that the constant $\varepsilon$ from (\ref{epsR}) is positive. We also need the following simple lemma.\footnote{Lemmas \ref{L:zerosum} and \ref{L:edgebnd} are similar to \cite[Lemmas 1 and 2]{Too80}. The main difference is that in Toom's construction, the number of incoming edges of each charge equals the number of outgoing edges of that charge at all vertices of the contour, i.e., there are no sources and sinks.}
\begin{lemma}[Zero sum property]
Let\label{L:zerosum} $(V,{\cal E})$ be a Toom graph with $\sigma$ charges, let $\psi:V\to{\mathbb Z}^{d+1}$ be an embedding of $(V,{\cal E})$, and let $L:{\mathbb R}^d\to{\mathbb R}^\sigma$ be a polar function with dimension $\sigma$. Then
\begin{equation}\label{zerosum}
\sum_{s=1}^\sigma\sum_{(v,w)\in\vec E_s}\big(L_s(\vec\psi(w))-L_s(\vec\psi(v))\big)=0.
\end{equation}
\end{lemma}
\begin{Proof}
We can rewrite the sum in (\ref{zerosum}) as
\begin{equation}
\sum_{v\in V}\Big\{\sum_{s=1}^\sigma\sum_{(u,v)\in\vec E_{s,{\rm in}}(v)}L_s(\vec\psi(v))
-\sum_{s=1}^\sigma\sum_{(v,w)\in\vec E_{s,{\rm out}}(v)}L_s(\vec\psi(v))\Big\}.
\end{equation}
At internal vertices, the term inside the brackets is zero because the number of incoming edges of each charge equals the number of outgoing edges of that charge. At the sources and sinks, the term inside the brackets is zero by the defining property (\ref{polar}) of a polar function, since there is precisely one outgoing (resp.\ incoming) edge of each charge.
\end{Proof}
As a consequence of Lemma~\ref{L:zerosum}, we can estimate $n_{\rm e}(T)$ from above in terms of $n_\ast(T)$.
\begin{lemma}[Upper bound on the number of edges]
Let\label{L:edgebnd} $\varepsilon$ and $R$ be defined in~\eqref{epsR}. Then each $T\in{\cal T}'_0$ satisfies $n_{\rm e}(T)\leq(1+R/\varepsilon)\big(n_\ast(T)-1\big)$.
\end{lemma}
\begin{Proof}
Since $|\vec E^\circ_s|=n_\ast(T)-1$ and $|\vec E^\ast_s|=n_{\rm e}(T)-n_\ast(T)+1$ $(1\leq s\leq\sigma)$, Lemma~\ref{L:zerosum} and rules (iii)' and (iv)' imply that
\be\begin{array}{r@{\,}c@{\,}l}
\displaystyle 0
&=&\displaystyle\sum_{s=1}^\sigma\Big(\sum_{(v,w)\in \vec E^\ast_s}
\big(L_s(\vec\psi(w))-L_s(\vec\psi(v))\big)
+\sum_{(v,w)\in\vec E^\circ_s}
\big(L_s(\vec\psi(w))-L_s(\vec\psi(v))\big)\Big)\\[5pt]
&\geq&\displaystyle\sum_{s=1}^\sigma\big[\big(n_{\rm e}(T)-n_\ast(T)+1\big)\varepsilon_s-\big(n_\ast(T)-1\big)R_s\big]
=\varepsilon n_{\rm e}(T)-(\varepsilon+R)\big(n_\ast(T)-1\big),
\end{array}\ee
where we have used that $L_s(\vec\psi(w))-L_s(\vec\psi(v))=L_s\big(\vec\psi(w)-\vec\psi(v)\big)$ by the linearity of $L_s$.
\end{Proof}
By condition~(ii) of Definition~\ref{def:embedding} of an embedding, sinks of a Toom contour do not overlap. By condition~(i) of Definition~\ref{D:present} of what it means for a Toom contour to be present, each sink corresponds to a space-time point $(i,t)$ that is defective, meaning that $\Phi_{(i,t)}=\phi^0$, which happens with probability $p$, independently for all space-time points.
By Lemma~\ref{L:edgebnd}, we can then estimate the right-hand side of (\ref{Pei}) from above by
\begin{equation}\begin{array}{l}\label{Peierls}
\displaystyle\sum_{T\in{\cal T}_0}\P\big[T\mbox{ is present in }\Phi\big]
\leq\sum_{T\in{\cal T}'_0}p^{n_\ast(T)}=p\sum_{T\in{\cal T}'_0}p^{n_\ast(T)-1}\\[5pt]
\displaystyle\quad\leq p\sum_{T\in{\cal T}'_0}p^{n_{\rm e}(T)/(1+R/\varepsilon)}
=p\sum_{n=0}^\infty N_n p^{n/(1+R/\varepsilon)},
\end{array}\ee
where
\begin{equation}\label{eq:Nn}
N_n:=\big|\{T\in{\cal T}'_0:n_{\rm e}(T)=n\}\big|\qquad(n\geq 0)
\end{equation}
denotes the number of (nonequivalent) contours in ${\cal T}'_0$ that have $n$ edges of each charge. The following lemma gives a rough upper bound on $N_n$. Recall the definition of $B(\phi)$ in (\ref{Bphi}).
\begin{lemma}[Exponential bound]
Let\label{L:expbd} $M:=\big|B(\phi)\big|$ and let $\tau:=\lceil\ffrac{1}{2}\sigma\rceil$ denote $\ffrac{1}{2}\sigma$ rounded up to the next integer. Then
\begin{equation}
N_n\leq n^{\tau-1}(\tau+1)^{2\tau n}M^{\sigma n}\qquad(n\geq 1).
\end{equation}
\end{lemma}
Combining (\ref{Peierls}) and Lemma~\ref{L:expbd}, we see that the right-hand side of (\ref{Pei}) is finite for $p$ sufficiently small and hence (by dominated convergence) tends to zero as $p\to 0$. This proves that $\overline\rho(p)\to 1$ as $p\to 0$, which is the most difficult part of Toom's stability theorem.
\subsection{Contours with two charges}\label{S:twochar}
For Toom contours with two charges, the bounds derived in the previous subsection can be improved by using Theorem~\ref{T:strongpres} instead of Theorem~\ref{T:contour}. To make this precise, for Toom contours with two charges, we define a subset ${\cal T}''_0$ of the set of contours ${\cal T}'_0$ from Definition~\ref{D:Tac} as follows:
\begin{defi}
For\label{D:Tacc} Toom contours with $\sigma=2$ charges, we let ${\cal T}''_0$ denote the set of Toom contours rooted at $(0,0)$ (up to equivalence) that satisfy:
\begin{itemize}
\item[{\rm(iii)'}] $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_s(\phi)$ for all $(v,w)\in\vec E^\ast_s$ $(1\leq s\leq 2$),
\item[{\rm(iv)''}] $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_{3-s}(\phi)$ for all $(v,w)\in\vec E^\circ_s$ $(1\leq s\leq 2$),
\item[{\rm(v)''}] $\displaystyle\vec\psi(w_1)\neq\vec\psi(w_2)$ for all $v\in V_\circ\backslash\{v_\circ\}$, $w_1\in\vec E_{1,{\rm out}}$, and $w_2\in\vec E_{2,{\rm out}}$.
\end{itemize}
\end{defi}
Note that condition (iii)' above is the same condition as (iii)' of Definition~\ref{D:Tac}. Condition (iv)'' strengthens condition (iv)' of Definition~\ref{D:Tac}. Conditions (iv)'' and (v)'' correspond to conditions (v) and (vi) of Definition~\ref{D:strongpres}, which in our present set-up do not involve any randomness. We will need analogues of Lemmas~\ref{L:edgebnd} and~\ref{L:expbd} with ${\cal T}'_0$ replaced by ${\cal T}''_0$. We define
\begin{equation}\label{eq:Rprime}
R'':=\sum_{s=1}^\sigma R''_s\qquad\mbox{with}\quad R''_1:=-\inf_{i\in A_2(\phi)}L_1(i)\quad\mbox{and}\quad R''_2:=-\inf_{i\in A_1(\phi)}L_2(i).
\end{equation}
The following lemma is similar to Lemma~\ref{L:edgebnd}.
\begin{lemma}[Upper bound on the number of edges for $\sigma=2$]
Let\label{L:edgebndcycle} $\varepsilon$ and $R''$ be defined in~\eqref{epsR} and~\eqref{eq:Rprime}. Then each $T\in{\cal T}''_0$ satisfies $n_{\rm e}(T)\leq(1+R''/\varepsilon)\big(n_\ast(T)-1\big)$.
\end{lemma}
\begin{Proof}
The proof is the same as that of Lemma~\ref{L:edgebnd}, with the only difference that condition (iv)'' of Definition~\ref{D:Tacc} allows us to use $R''_s$ instead of $R_s$ $(s=1,2)$ as upper bounds.
\end{Proof}
Similarly to (\ref{eq:Nn}), we let
\begin{equation}
N''_n:=\big|\{T\in{\cal T}''_0:n_{\rm e}(T)=n\}\big|\qquad(n\geq 0)
\end{equation}
denote the number of (nonequivalent) contours in ${\cal T}''_0$ that have $n$ edges of each charge. Then Theorem~\ref{T:strongpres} implies the Peierls bound:
\begin{equation}\label{Peierlscycle}
1-\overline\rho(p)\leq
\sum_{T\in{\cal T}_0}\P\big[T\mbox{ is strongly present in }\Phi\big]
\leq \sum_{T\in{\cal T}''_0}p^{n_\ast(T)}
\leq p\sum_{n=0}^\infty N''_n p^{n/(1+R''/\varepsilon)}.
\end{equation}
The following lemma is similar to Lemma~\ref{L:expbd}.
\begin{lemma}[Exponential bound for $\sigma=2$]
Let\label{L:expbdcycle} $M_s:=\big|A_s(\phi)\big|$ $(s=1,2)$. Then
\begin{equation}
N''_n\leq\ffrac{1}{2}(4M_1M_2)^{n} \qquad(n\geq 1).
\end{equation}
\end{lemma}
\subsection{Some explicit bounds}\label{S:explic}
We continue to work in the set-up of the previous subsections, i.e., we consider monotone random cellular automata that apply the maps $\phi^0$ and $\phi$ with probabilities $p$ and $1-p$, respectively, where $\phi$ is an eroder. An easy coupling argument shows that the intensity $\overline\rho(p)$ of the upper invariant law is a nonincreasing function of $p$, so there exists a unique $p_{\rm c}\in[0,1]$ such that $\overline\rho(p)>0$ for $p<p_{\rm c}$ and $\overline\rho(p)=0$ for $p>p_{\rm c}$. Since $\phi$ is an eroder, Toom's stability theorem tells us that $p_{\rm c}>0$. In this subsection, we derive explicit lower bounds on $p_{\rm c}$ for two concrete choices of the eroder $\phi$.
If one wants to use (\ref{Pei}) to show that $\overline\rho>0$, then one must show that the right-hand side of (\ref{Pei}) is less than one. In practice, when deriving explicit bounds, it is often easier to show that a certain sum is finite than showing that it is less than one. We will prove a generalisation of Theorems \ref{T:contour} and \ref{T:strongpres} that can in many cases be used to show that if a certain sum is finite, then $\overline\rho>0$.
In the set-up of Theorem~\ref{T:contour}, we choose $j_s\in A_s(\phi_1)$ $(1\leq s\leq\sigma)$. We fix an integer $r\geq 0$ and we let ${\bm{\phh}}^{(r)}$ denote the modified monotonic flow defined by
\begin{equation}\label{eq:modifiedbooleanmaps}
\varphi^{(r)}_{(i,t)}:=\left\{\begin{array}{ll}
\phi_1\quad&\mbox{if }-r<t\leq 0,\\[5pt]
\varphi_{(i,t)}&\mbox{otherwise.}
\end{array}\right.
\end{equation}
Below, we let $\overline x^{(r)}$ denote the maximal trajectory of the modified monotonic flow ${\bm{\phh}}^{(r)}$. As before, we let ${\rm Conv}(A)$ denote the convex hull of a set $A$.
\begin{proposition}[Presence of a large contour]
In\label{P:Peifin} the set-up of Theorem~\ref{T:contour}, on the event that $\overline x^{(r)}_{-r}(i)=0$ for all $i\in{\rm Conv}(\{rj_1,\ldots,rj_\sigma\})$, there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in ${\bm{\phh}}^{(r)}$ such that $\psi_{d+1}(v)\leq-r$ for all $v\in V_\ast$ and $\psi_{d+1}(v)\leq 1-r$ for all $v\in V_\circ\backslash\{v_\circ\}$. If $\sigma=2$, then such a Toom contour is strongly present in ${\bm{\phh}}^{(r)}$.
\end{proposition}
As a simple consequence of this proposition, we obtain the following lemma.
\begin{lemma}[Finiteness of the Peierls sum]
If\label{L:Peifin} $\displaystyle\sum_{T\in{\cal T}'_0}p^{n_\ast(T)}<\infty$, then $\overline\rho(p)>0$. If $\sigma=2$,\\[-15pt]
\noindent
then similarly $\displaystyle\sum_{T\in{\cal T}''_0}p^{n_\ast(T)}<\infty$ implies $\overline\rho(p)>0$.
\end{lemma}
We prove Proposition~\ref{P:Peifin} and Lemma~\ref{L:Peifin} in Section~\ref{S:finP}.
\vspace{0.2cm}
\noindent
\textbf{Cooperative branching} Generalizing the definition in (\ref{phiNEC}), for each dimension $d\geq 1$, we define a monotonic map $\phi^{{\rm coop},d}:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ by
\begin{equation}\label{phicoopddim}
\phi^{{\rm coop},d}(x):=\displaystyle x(0)\vee\big(x(e_1)\wedge\dots\wedge x(e_d)\big),
\end{equation}
where 0 is the origin and $e_i$ denotes the $i$th unit vector in ${\mathbb Z}^d$. In particular, in dimension $d=2$, this is the cooperative branching rule $\phi^{\rm coop}$ defined in (\ref{phiNEC}). We chose $\sigma:=2$, $A_1(\phi):=\{0\}$, and $A_2(\phi_1):=\{e_1,\dots,e_d\}$, and as our polar function $L$ we chose
\begin{equation}
L_1(z_1,\dots, z_d):=-\sum_{i=1}^dz_i\quad\mbox{and}\quad
L_2(z_1,\dots, z_d):=\sum_{i=1}^dz_i,
\end{equation}
which has the result that the constants from~\eqref{epsR} and~\eqref{eq:Rprime} are given by $\varepsilon=1$, $R=1$ and $R''=1$. Arguing as in (\ref{Peierls}), using Lemmas \ref{L:edgebnd} and \ref{L:expbd} with $M=d+1$, $\sigma=2$ and $\tau=1$, we obtain the Peierls bound:
\begin{equation}
\sum_{T\in{\cal T}_0}\P\big[T\mbox{ is present in }\Phi\big]
\leq\sum_{T\in{\cal T}'_0}p^{n_\ast(T)}
\leq p\sum_{n=0}^\infty 2^{2n}(d+1)^{2n}p^{n/2}.
\end{equation}
This is finite when $4(d+1)^2p^{1/2}<1$, so using Lemma~\ref{L:Peifin} we obtain the bound $p_{\rm c}(d)\geq 16^{-1}(d+1)^{-4}$. This bound can be improved by using Theorem~\ref{T:strongpres} and its consequences. Applying Lemmas \ref{L:edgebndcycle} and \ref{L:expbdcycle} with $M_1=d$, $M_2=1$, we obtain the Peierls bound:
\begin{equation}
\sum_{T\in{\cal T}_0}\P\big[T\mbox{ is strongly present in }\Phi\big]
\leq\sum_{T\in{\cal T}''_0}p^{n_\ast(T)}
\leq \frac p 2 \sum_{n=0}^\infty 4^n d^n p^{n/2}.
\end{equation}
This is finite when $4d p^{1/2}<1$, so using Lemma~\ref{L:Peifin} we obtain the bound
\begin{equation}
p_{\rm c}(d)\geq \frac 1 {16 d^2}.
\end{equation}
In particular, in two dimensions this yields $p_{\rm c}(2)\geq 1/64$. This is still some way off the estimated value $p_{\rm c}(2)\approx 0.105$ coming from numerical simulations but considerably better than the bound obtained from Lemmas \ref{L:edgebnd} and \ref{L:expbd}.\medskip
\noindent
\textbf{Toom's model} We take for $\phi$ the map $\phi^{\rm NEC}$. Then the set ${\cal A}(\phi)$ from (\ref{Aphi}) is given by ${\cal A}(\phi)=\{A_1,A_2,A_3\}$ with $A_1:=\{(0,0),(0,1)\}$, $A_2:=\{(0,0),(1,0)\}$, and $A_3:=\{(0,1),(1,0)\}$. Using (\ref{erosion}) we see that $\phi^{\rm NEC}$ is an eroder. We set $\sigma:=3$ and for the sets $A_s(\phi^{\rm NEC})$ $s=1,2,3$ of (\ref{As}) we choose the sets $A_1,A_2,A_3$ we have just defined. We define a polar function $L$ with dimension $\sigma=3$ by
\begin{equation}\label{eq:Toompolar}
L_1(z_1,z_2):=-z_1,\quad
L_2(z_1,z_2):=-z_2,\quad
L_3(z_1,z_2):=z_1+z_2,
\end{equation}
$\big((z_1,z_2)\in{\mathbb R}^2\big)$. One can check that for this choice of $L$ and the sets $A_s(\phi^{\rm NEC})$ $(1\leq s\leq 3)$, the constants from~\eqref{epsR} are given by
\begin{equation}
\varepsilon=1\quad\mbox{and}\quad R=2.
\end{equation}
Using Lemma~\ref{L:expbd} with $M=3$, $\sigma=3$, and $\tau=2$, we can estimate the Peierls sum in (\ref{Peierls}) from above by
\begin{equation}
p\sum_{n=0}^\infty n3^{4n}3^{3n}p^{n/3}.
\end{equation}
This is finite when $3^7p^{1/3}<1$, so using Lemma~\ref{L:Peifin} we obtain the bound
\begin{equation}
p_{\rm c}\geq 3^{-21},
\end{equation}
which does not compare well to the estimated value $p_{\rm c}\approx 0.053$ coming from numerical simulations. Nevertheless, this is probably the best rigorous bound currently available.
\subsection{Cellular automata with intrinsic randomness}\label{S:intrins}
In this subsection we will be interested in monotone random cellular automata whose definition involves more than one non-constant monotonic map. We fix a dimension $d\geq 1$, a collection $\phi_1,\ldots,\phi_m$ of non-constant monotonic maps $\phi_k:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$, and a probability distribution $p_1,\ldots,p_m$. Let $(X_k)_{k\geq 0}$ denote the monotone random cellular automaton that applies the maps $\phi_1,\ldots,\phi_m$ with probabilities $p_1,\ldots,p_m$
and let $\phi_0:=\phi^0$ be the constant map that always gives the outcome zero. By definition, an \emph{$\delta$-perturbation} of $(X_k)_{k\geq 0}$ is a monotone random cellular automaton $(X'_k)_{k\geq 0}$ that applies the maps $\phi_0,\ldots,\phi_m$ with probabilities $p'_0,\ldots,p'_m$ that satisfy $p'_0\leq\delta$ and $p'_k\leq p_k$ for all $k=1,\ldots,m$. We say that $(X_k)_{k\geq 0}$ is \emph{stable} if for each $\varepsilon>0$, there exists a $\delta>0$ such that the density $\overline\rho'$ of the upper invariant law of any $\delta$-perturbation of $(X_k)_{k\geq 0}$ satisfies $\overline\rho'\geq 1-\varepsilon$. Note that in the special case that $m=1$, which corresponds to the set-up of Toom's stability theorem, these definitions coincide with our earlier definition.
For deterministic monotone cellular automata, which in our set-up corresponds to the case $m=1$, we have seen in Lemma~\ref{L:erode} and formula (\ref{edgespeed}) that the eroder property can equivalently be formulated in terms of edge speeds. For a random monotone cellular automaton $(X_k)_{k\geq 0}$, the intuition is similar, but it is not entirely clear how to define edges speeds in the random setting and it can be more difficult to determine whether $(X_k)_{k\geq 0}$ is an eroder. Fix a polar function $L$ of dimension $\sigma\geq 2$ and let
\begin{equation}\label{eq:speeds}
\varepsilon^k_s:=\sup_{A\in{\cal A}(\phi_k)}\inf_{i\in A}L_s(i)\qquad(1\leq k\leq m,\ 1\leq s\leq\sigma)
\end{equation}
denote the edge speed in the direction defined by the linear function $L_s$ of the deterministic automaton that only applies the map $\phi_k$ . If
\begin{equation}\label{unispeed}
\sum_{s=1}^\sigma\varepsilon_s>0\quad\mbox{with}\quad\varepsilon_s:=\inf_{1\leq k\leq m}\varepsilon^k_s,
\end{equation}
then (\ref{edgespeed}) remains valid almost surely.
In such a situation, it is not very hard to adapt the arguments of Section~\ref{S:erod} to see that $(X_k)_{k\geq 0}$ is stable.
The condition (\ref{unispeed}) is, however, very restrictive and excludes many interesting cases. In particular, it excludes the case when one of the maps $\phi_1,\ldots,\phi_m$ is the identity map $\phi^{\rm id}$, which, as explained below (\ref{phiid}) is relevant in view of treating continuous-time interacting particle systems. Indeed, observe that, if $\phi_k=\phi^{\rm id}$, then $\varepsilon_s^k=0$ for each polar function $L$ of dimension $\sigma$ and each $1\leq s\leq \sigma$, implying $\sum_{s=1}^\sigma \varepsilon_s\leq 0$. The following example, which is an adaptation of \cite[Example~18.3.5]{Gra99}, shows that in such situations it can be much more subtle whether a random monotone cellular automaton is stable.
Fix an integer $n\geq 1$ and let $\phi_1:\{0,1\}^{{\mathbb Z}^2}\to\{0,1\}$ be the monotonic map defined as in (\ref{Aphi}) by the set of minimal configurations
\begin{equation}
{\cal A}(\phi_1):=\big\{\{(-1,0),(0,0)\},\{(-2,0),(0,0)\},\{(m,k):-3\leq m\leq-2,\ |k|\leq n\}\big\}.
\end{equation}
Using (\ref{erosion}), it is straightforward to check that $\phi_1$ is an eroder. Now consider the random monotone cellular automaton $(X_k)_{k\geq 0}$ that applies the maps $\phi_1$ and $\phi^{\rm id}$ with probabilities $p$ and $1-p$, respectively, for some $0\leq p\leq 1$. We claim that if $p<1$, then for $n$ sufficiently large, $(X_k)_{k\geq 0}$ is not stable. To see this, fix $l\geq 2$ and consider an initial state such that $X_0(i)=0$ for $i\in\{0,\ldots,l\}\times\{0,\ldots,n\}$ and $X_0(i)=1$ otherwise. Set
\begin{equation}
\alpha_k:=\inf_{0\leq i_2\leq n}\inf\{i_1:X_k(i_1,i_2)=0\}\quad\mbox{and}\quad
\beta^j_k:=\sup\{i_1:X_k(i_1,j)=0\}\quad(0\leq j\leq n).
\end{equation}
As long as at each height $0\leq j\leq n$, there are at least two sites of type 0, the right edge processes $(\beta^j_k)_{k\geq 0}$ with $0\leq j\leq n$ behave as independent random walks that make one step to the right with probability $p$. Therefore, the right edge of the zeros moves with speed $p$ to the right. In each time step, all sites in $\{\alpha_k,\alpha_k+1\}\times\{0,\ldots,n\}$ that are of type $0$ switch to type 1 with probability $p$. When $p=1$, the effect of this is that the left edge of the zeros moves with speed two to the right and eventually catches up with the right edge, which explains why $\phi_1$ is an eroder. However, when $p<1$, the left edge can move to the right only once all sites in $\{\alpha_k\}\times\{0,\ldots,n\}$ have switched to type 1. For $n$ large enough, this slows down the speed of the left edge with the result that in $(X_k)_{k\geq 0}$ the initial set of zeros will never disappear. It is not difficult to prove that this implies that $(X_k)_{k\geq 0}$ is not stable.
To see a second example that demonstrates the complications that can arise when we replace deterministic monotone cellular automata by random ones, recall the maps $\phi^{\rm NEC}$, $\phi^{\rm NWC}$, $\phi^{\rm SWC}$, and $\phi^{\rm SEC}$ defined in and below (\ref{phiNEC}). For the map $\phi^{\rm NEC}$, the edge speeds in the directions defined by the linear functions $L_1$ and $L_2$ from (\ref{eq:Toompolar}) are zero but the edge speed corresponding to $L_3$ is not, which we used in Subsection~\ref{S:explic} to prove that the deterministic monotone cellular automaton that always applies the map $\phi^{\rm NEC}$ is stable. By contrast, for the cellular automaton that applies the maps $\phi^{\rm NEC}$, $\phi^{\rm NWC}$, $\phi^{\rm SWC}$, and $\phi^{\rm SEC}$ with equal probabilities, by symmetry in space and since these maps treat the types 0 and 1 symmetrically, the edge speed in each direction is zero. As a result, we conjecture that, although each map applied by this random monotone cellular automaton is an eroder, it is not stable.
In spite of these complications, Toom contours can sometimes be used to prove stability of random monotone cellular automata, even in situations where the simplifying assumption (\ref{unispeed}) does not hold. In these cases we cannot rely on the use of polar functions, instead we have to carefully examine the structure of the contour to be able to bound the number of contours in terms of the number of defective sites. Furthermore, one can generally take $\sigma:=\bigvee_{k=1}^m|{\cal A}(\phi_k)|$. We will demonstrate this on a cellular automaton that combines the cooperative branching map defined in (\ref{phicoopddim}) with the identity map.\medskip
\noindent
\textbf{Cooperative branching with identity map} We consider the monotone random cellular automaton on ${\mathbb Z}^d$ that applies the maps $\phi^0,\phi^{\rm id}$, and $\phi^{{\rm coop},d}$ with probabilities $p,q,r$, respectively with $q=1-p-r$. For each $p,r\geq 0$ such that $p+r\leq 1$, let $\overline\rho(p,r)$ denote the intensity of the upper invariant law of the process with parameters $p,1-p-r,r$. A simple coupling argument shows that for fixed $0\leq r<1$, the function $p\mapsto\overline\rho(p,r)$ is nonincreasing on $[0,1-r]$, so for each $0\leq r<1$, there exists a $p_{\rm c}(r)\in[0,1-r]$ such that $\overline\rho(p,r)>0$ for $0\leq p<p_{\rm c}(r)$ and $\overline\rho(p,r)=0$ for $p_{\rm c}(r)<p\leq 1-r$. We will derive a lower bound on $p_{\rm c}(r)$. Recall that setting $p:=\varepsilon$ and $r:=\lambda\varepsilon$, rescaling time by a factor $\varepsilon$, and sending $\varepsilon\to 0$ corresponds to taking the continuous-time limit, where in the limiting interacting particle system the maps $\phi^0$ and $\phi^{{\rm coop},d}$ are applied with rates 1 and $\lambda$, respectively. For this reason, we are especially interested in the asymptotics of $p_{\rm c}(r)$ when $r$ is small.
In line with notation introduced in Subsection~\ref{S:explic}, we define $A_1:=\{0\}$ and $A_2:=\{e_1,\dots, e_d\}$. We have
\begin{equation}
{\cal A}(\phi^{\rm id})=\big\{A_1\big\}\quad\mbox{and}\quad
{\cal A}(\phi^{\rm coop, d})=\big\{A_1,A_2\big\},
\end{equation}
thus we set $\sigma:=|{\cal A}(\phi^{\rm id})|\vee|{\cal A}(\phi^{\rm coop, d})|=2$, and for the sets $A_s(\phi_k)$ in (\ref{As}) we make the choices
\begin{equation}\begin{array}{ll}\label{coopA12}
\displaystyle A_1(\phi^{\rm id}):=A_1,\quad& A_2(\phi^{\rm id}):=A_1,\\[5pt]
\displaystyle A_1(\phi^{\rm coop,d}):=A_1,\quad& A_2(\phi^{\rm coop, d}):=A_2.
\end{array}\ee
Let $\Phi=(\Phi_{(i,t)})_{(i,t)\in{\mathbb Z}^3}$ be an i.i.d.\ collection of monotonic maps so that $\P[\Phi_{(i,t)}=\phi^0]=p$, $\P[\Phi_{(i,t)}=\phi^{\rm id}]=q$, and $\P[\Phi_{(i,t)}=\phi^{\rm coop, d}]=r$. We let ${\cal T}_0$ denote the set of Toom contours $(V, \mathcal E, 0, \psi)$ rooted at the origin with respect to the given choice of $\sigma$ and the sets $A_s(\phi_k)$ in~\eqref{coopA12}. Theorem~\ref{T:contour} then implies the Peierls bound
\begin{equation}\label{strPei}
1-\overline\rho \leq \sum_{T\in{\cal T}_0}\P\big[T\mbox{ is strongly present in }\Phi\big].
\end{equation}
In Section~\ref{S:intbd}, we give an upper bound on this expression by carefully examining the structure of Toom contours for this model. We will prove the following lower bound on $p_{\rm c}(r)$ for each $r\in[0,1)$:
\[p_{\rm c}(r)>\big(\sqrt{(d+0.5)^2+1/(16d)}-d-0.5\big)r.\]
In particular for $d=2$ we obtain the bound $p_c(r)> 0.00624 r$.
\subsection{Continuous time}\label{S:contfirst}
In this subsection, we consider monotone interacting particle systems of the type described in (\ref{traj}). We briefly recall the set-up described there. We are given a finite collection $\phi_1,\ldots,\phi_m$ of non-constant monotonic maps $\phi_k:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ and a collection of nonnegative rates $r_1,\ldots,r_m$, and we are interested in interacting particle systems $(X_t)_{t\geq 0}$ taking values in $\{0,1\}^{{\mathbb Z}^d}$ that evolve in such a way that independently for each $i\in{\mathbb Z}^d$,
\begin{equation}
X_t(i)\mbox{ is replaced by }\phi_k(\theta_iX_t)\mbox{ at the times of a Poisson process with rate }r_k
\end{equation}
$(1\leq k\leq m)$. Without loss of generality we can assume that $\phi_k\neq\phi^{\rm id}$ for all $0\leq k\leq m$.
For each $r\geq 0$, let $(X^r_t)_{t\geq 0}$ denote the perturbed monotone interacting particle system that apart from the non-constant monotonic maps $\phi_1,\ldots,\phi_m$, that are applied with rates $r_1,\ldots,r_m$, also applies the constant monotonic map $\phi_0:=\phi^0$ with rate $r_0:=r$. We let $\overline\rho(r)$ denote the density of its upper invariant law. We say that the unperturbed interacting particle system $(X_t)_{t\geq 0}$ is \emph{stable} if $\overline\rho(r)\to 1$ as $r\to 0$.
Gray \cite[Theorem~18.3.1]{Gra99} has given (mutually non-exclusive) sufficient conditions on the edge speeds for a monotone interacting particle system to be either stable or unstable. Furthermore, \cite[Examples~18.3.5 and 6]{Gra99} he has shown that $(X_t)_{t\geq 0}$ may fail to be stable even when $m=1$ and the map $\phi_1$ is an eroder in the sense of (\ref{erosion}), and conversely, in such a situation, $(X_t)_{t\geq 0}$ be stable even $\phi_1$ is not an eroder. The reason for this is that we can think of interacting particle systems as continuous-time limits of cellular automata that apply the identity map $\phi^{\rm id}$ most of the time, and, as we have seen in the previous subsection, combining an eroder $\phi_1$ with the identity map $\phi^{\rm id}$ can change the stability of a cellular automaton in subtle ways. However, for a certain type of interacting particle system called generalized contact process Gray's conditions on the edge speed turn out to be sufficient and necessary for the stability of $(X_t)_{t\geq 0}$. We now briefly describe this argument, as it is not present in~\cite{Gra99}.
Recall that $\mathcal A(\phi_k)$ defined in~\eqref{Aphi} denotes the set of minimal configurations on which $\phi_k$ gives the outcome 1. We say that a monotone interacting particle system that applies the non-constant monotonic maps $\phi_1,\ldots,\phi_m$ is a \emph{generalized contact process}, if $\{0\}\in \mathcal A(\phi_k)$ for each $1\leq k\leq m$. The perturbed system $(X^r_t)_{t\geq 0}$ then can be seen as a model for the spread of epidemics: vertices represent individuals that can be healthy (state 0) or infected (state 1). Each healthy vertex can get infected, if a certain set of vertices in its neighbourhood is entirely infected, and each infected vertex can recover at rate $r$ independently of the state of the other vertices.
For a monotone interacting particle system that applies the non-constant monotonic maps $\phi_1,\ldots,\phi_m$ Gray defines the \emph{Toom operator} $\phi(x):\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ as the map
\begin{equation}\label{eq:Toomoperator}
\phi(x):=\big(1-x(0)\big)\bigwedge_{k=1}^m \phi_k(x) + x(0)\bigvee_{k=1}^m \phi_k(x) \qquad \big(x\in \{0,1\}^{{\mathbb Z}^d}\big).
\end{equation}
That is, $\phi$ flips the state of the origin if at least one of the maps $\phi_1, \dots, \phi_m$ would flip its state in configuration $x$. As each $\phi_k$ is monotonic, it is easy to see that $\phi$ is monotonic as well.
Recall from~\eqref{epsR} that for each fixed polar function $L$ of dimension $\sigma$ we defined
\begin{equation}
\varepsilon:=\sum_{s=1}^\sigma \varepsilon_s, \qquad \varepsilon_s:=\inf_{i\in A_s(\phi)}L_s(i) \quad(1\leq s\leq \sigma).
\end{equation}
For a Toom operator $\phi$ with $\{0\}\in \mathcal A(\phi)$ we have $\varepsilon_s\geq 0$ for each $s$. In this case, Gray's condition for stability simplifies as follows. A monotone interacting particle system with Toom operator $\phi$ satisfying $\{0\}\in \mathcal A(\phi)$ is stable if and only if there exists a polar function $L$ for which $\varepsilon>0$. It is easy to see, that finding such a polar function is equivalent to finding a set $A\in{\cal A}(\phi)$ which is entirely contained in an open halfspace in ${\mathbb Z}^d$. As $\{0\}\subset \mathcal A(\phi)$, this is further equivalent to $\bigcap_{A\in{\cal A}(\phi)}{\rm Conv}(A)=\emptyset$, which is the eroder condition in~\eqref{erosion}.
Let $(X_t)_{t\geq 0}$ be a generalized contact process. As $\{0\}\subset \mathcal A(\phi_k)$ for each $1\leq k\leq m$, we clearly have $\{0\}\subset \mathcal A(\phi)$ for the corresponding Toom operator $\phi$ in~\eqref{eq:Toomoperator}.
Thus in this case we can formulate Gray's theorem \cite[Theorem~18.3.1]{Gra99} as follows.
\begin{quote}
The generalized contact process $(X_t)_{t\geq 0}$ is stable if and only if the corresponding Toom operator $\phi$ is an eroder.
\end{quote}
While Gray's results can be used to show stability of certain models, his ideas do not lend themselves well to the derivation of explicit bounds. It is with this goal in mind that we have extended Toom's framework to continuous time. Toom contours in continuous time are defined similarly as in the discrete time setting and can be thought of as the limit of the latter. Since this is very simiar to what we have already seen in Subsection~\ref{S:Peierls}, we do not give the precise definitions in the continuous-time setting here but refer to Section~\ref{S:cont} instead. We will demonstrate how Toom contours can be used to give bounds on the critical parameters of some monotone interacting particle systems. As mentioned in the previous subsection, in our methods we cannot rely on the use of polar functions. Again, one can generally take $\sigma:=\bigvee_{k=1}^m|{\cal A}(\phi_k)|$. \medskip
\noindent
\textbf{Sexual contact process on $\mathbb Z^d \; (d\geq 1)$} We consider the interacting particle system on ${\mathbb Z}^d$ that applies the monotonic maps $\phi^0$ and $\phi^{{\rm coop},d}$ defined in (\ref{phiconst}) and (\ref{phicoopddim}) with rates $1$ and $\lambda$, respectively. We let $\overline\rho(\lambda)$ denote the intensity of the upper invariant law as a function of $\lambda$ and we define the critical parameter as $\lambda_{\rm c}:=\inf\{\lambda\geq 0:\overline\rho(\lambda)>0\}$.
In line with notation introduced in Subsection~\ref{S:explic}, we define $A_1:=\{0\}$ and $A_2:=\{e_1,\dots, e_d\}$. We have
\begin{equation}
{\cal A}(\phi^{\rm coop, d})=\big\{A_1,A_2\big\},
\end{equation}
thus we set $\sigma:=|{\cal A}(\phi^{\rm coop, d})|=2$, and for the sets $A_s(\phi_k)$ in (\ref{As}) we make the choices
\begin{equation}
A_1(\phi^{\rm coop,d}):=A_1,\quad A_2(\phi^{\rm coop, d}):=A_2.
\end{equation}
In Section~\ref{S:cont} we will show that $X_t(i)=0$ implies the presence of a continuous Toom contour rooted at $(i, t)$ with respect to the given choice of $\sigma$ and sets $A_s(\phi^{\rm coop,d})$, and use these contours to carry out a similar Peierls argument as in the discrete time case.
In one dimension, this process is called the one-sided contact process, and our computation yields the bound
\begin{equation}
\lambda_c(1)\leq 49.3242\dots .
\end{equation}
There are already better estimates in the literature: in \cite{TIK97} the authors prove the bound $\lambda_c(1)\leq3.882$ and give the numerical estimate $\lambda_c(1)\approx3.306$.
In two dimensions this is the sexual contact process defined in~\cite{Dur86}, and we prove the bound
\begin{equation}\label{coopbd}
\lambda_c(2)\leq 161.1985\dots .
\end{equation}
In \cite{Dur86} Durrett claimed a proof that $\lambda_{\rm c}(2)\leq 110$, while numerical simulations suggest the value $\lambda_c(2)\approx 12.4$.\medskip
\section{Toom contours}\label{S:contour}
\subsubsection*{Outline}
In this section, we develop the basic abstract theory of Toom contours. In particular, we prove all results stated in Subsection~\ref{S:Peierls}. In Subsection~\ref{S:max}, we prove the preparatory Lemmas \ref{L:maxtraj} and \ref{L:maxup}. Theorems \ref{T:contour} and \ref{T:strongpres} about the (strong) presence of Toom contours are proved in Subsections \ref{S:constr} and \ref{S:Tcycles}, respectively.
In Section~\ref{S:fork}, we briefly discuss ``forks'' which played a prominent role in Toom's \cite{Too80} original formulation of Toom contours and which can be used to prove a somewhat stronger version of Theorem~\ref{T:contour}.
\subsection{The maximal trajectory}\label{S:max}
In this subsection we prove Lemmas \ref{L:maxtraj} and \ref{L:maxup}.\medskip
\begin{Proof}[of Lemma~\ref{L:maxtraj}]
By symmetry, it suffices to show that there exists a trajectory $\overline x$ that is uniquely characterised by the property that each trajectory $x$ of ${\bm{\phh}}$ satisfies $x\leq\overline x$. For each $s\in{\mathbb Z}$, we inductively define a function $x^s:{\mathbb Z}^d\times\{s,s+1,\ldots\}\to\{0,1\}$ by
\begin{equation}\label{maxs}
x^s_s(i):=1\quad(i\in{\mathbb Z}^d)\quad\mbox{and}\quad
x^s_t(i)=\varphi_{(i,t)}(\theta_ix^s_{t-1})\qquad\big(i\in{\mathbb Z}^d,\ s<t\big).
\end{equation}
Then $x^{s-1}_s(i)\leq 1=x^s_s(i)$ and hence by induction $x^{s-1}_t(i)\leq x^s_t(i)$ for all $s\leq t$, which implies that the pointwise limit
\begin{equation}\label{maxconv}
\overline x_t(i):=\lim_{s\to-\infty}x^s_t(i)\qquad\big((i,t)\in{\mathbb Z}^{d+1}\big)
\end{equation}
exists. It is easy to see that $\overline x$ is a trajectory. If $x$ is any other trajectory, then $x_s(i)\leq 1=x^s_s(i)$ and hence by induction $x_t(i)\leq x^s_t(i)$ for all $s\leq t$, which implies that $x\leq\overline x$. Thus, $\overline x$ is the maximal trajectory, and such a trajectory is obviously unique.
\end{Proof}
\begin{Proof}[of Lemma~\ref{L:maxup}]
By symmetry, it suffices to prove the claim for the upper invariant law. We recall that two probability measures $\nu_1,\nu_2$ on $\{0,1\}^{{\mathbb Z}^d}$ are stochastically ordered, which we denoted as $\nu_1\leq\nu_2$, if and only if random variables $X_1,X_2$ with laws $\nu_1,\nu_2$ can be coupled such that $X_1\leq X_2$. The law $\mu$ of $\overline X_t$ clearly does not depend on $t$ and hence is an invariant law. The proof of Lemma~\ref{L:maxtraj} shows that $\P^{\overline 1}[X_t\in\,\cdot\,]\Rightarrow\overline\mu$ as $t\to\infty$ as claimed in (\ref{upconv}). Alternatively, $\mu$ is uniquely characterised by the fact that it is maximal with respect to the stochastic order, i.e., if $\nu$ is an arbitrary invariant law, then $\nu\leq\mu$. Indeed, if $\nu$ is an invariant law, then for each $s\in{\mathbb Z}$, we can inductively define a stationary process $(X^s_t)_{t\geq s}$ by
\begin{equation}
X^s_t(i)=\varphi_{(i,t)}(\theta_iX^s_{t-1})\qquad\big(i\in{\mathbb Z}^d,\ s<t\big),
\end{equation}
where $X^s_s$ has the law $\nu$ and is independent of $\Phi$. Since $\nu$ is an invariant law, the laws of the processes $X^s$ are consistent in the sense of Kolmogorov's extension theorem and therefore we can almost surely construct a trajectory $X$ of $\Phi$ such that $X_t$ has the law $\nu$ and is independent of $(\Phi_{(i,s)})_{i\in{\mathbb Z}^d,\ t<s}$ for each $t\in{\mathbb Z}$. By Lemma~\ref{L:maxtraj}, $X\leq\overline X$ a.s.\ and hence $\nu\leq\mu$ in the stochastic order. We conclude that as claimed, $\mu=\overline\nu$, the upper invariant law.
\end{Proof}
\subsection{Explanation graphs}
In this subsection we start preparing for the proof of Theorem~\ref{T:contour}. We fix a monotonic flow ${\bm{\phh}}$ on $\{0,1\}^{{\mathbb Z}^d}$ that take values in $\{\phi_0,\ldots,\phi_m\}$, where $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. We also fix an integer $\sigma\geq 2$ and for each $1\leq s\leq\sigma$ and $1\leq k\leq m$, we fix $A_s(\phi_k)\in{\cal A}(\phi_k)$. Letting $\overline x$ denote the maximal trajectory of ${\bm{\phh}}$, our aim is to prove that almost surely on the event that $\overline x_0(0)=0$, there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in ${\bm{\phh}}$. As a first step towards this aim, in the present subsection, we will show that the event that $\overline x_0(0)=0$ almost surely implies the presence of a simpler structure, which we will call an \emph{explanation graph}.
Recall from Subsection~\ref{S:Peierls} that a directed graph with $\sigma$ types of edges is a pair $(U,{\cal H})$, where ${\cal H}=(\vec H_1,\ldots,\vec H_\sigma)$ is a sequence of subsets of $U\times U$. We interpret $\vec H_s$ as the set of directed edges of type $s$. For such a directed graph with $\sigma$ types of edges, we let $\vec H_{s,{\rm in}}(u)$ and $\vec H_{s,{\rm out}}(u)$ denote the set of vertices with type $s$ that end and start in a vertex $u\in U$, respectively. We also use the notation $\vec H:=\bigcup_{s=1}^\sigma\vec H_s$. Then $(U,\vec H)$ is a directed graph in the usual sense of the word.
The following two definitions introduce the concepts we will be interested in. Although they look a bit complicated at first sight, in the proof of Lemma~\ref{L:explan} we will see that they arise naturally in the problem we are interested in. Further motivation for these definitions is provided in Section~\ref{S:expla} below, where it is shown that explanation graphs naturally arise from an even more elementary concept, which we will call a \emph{minimal explanation}.
\begin{defi}\label{def:finiteexpl}
An \emph{explanation graph} for $(0,0)$ is a directed graph with $\sigma$ types of edges $(U,{\cal H})$ with $U\subset{\mathbb Z}^{d+1}$ for which there exists a subset $U_\ast\subset U$ such that the following properties hold:
\begin{enumerate}
\item each element of $\vec H$ is of the form $\big((j,t),(i,t-1)\big)$ for some $i,j\in{\mathbb Z}^d$ and $t\in{\mathbb Z}$,
\item $(0,0)\in U\subset{\mathbb Z}^{d+1}$ and $t<0$ for all $(i,t)\in U\backslash\{(0,0)\}$,
\item for each $(i,t)\in U\backslash\{(0,0)\}$, there exists a $(j,t+1)\in U$ such that $\big((j,t+1),(i,t)\big)\in\vec H$,
\item if $u\in U_\ast$, then $\vec H_{s,{\rm out}}(u)=\emptyset$ for all $1\leq s\leq\sigma$,
\item if $u\in U\backslash U_\ast$, then $\big|\vec H_{s,{\rm out}}(u)\big|=1$ for all $1\leq s\leq\sigma$.
\end{enumerate}
\end{defi}
Note that $U_\ast$ is uniquely determined by $(U,{\cal H})$. We call $U_\ast$ the set of \emph{sinks} of the explanation graph $(U,{\cal H})$.
\begin{defi}\label{def:finexpres}
An explanation graph $(U,{\cal H})$ is \emph{present} in ${\bm{\phh}}$ if:
\begin{enumerate}
\item $\overline x_t(i)=0$ for all $(i,t)\in U$,
\item $U_\ast=\big\{u\in U:\varphi_u=\phi^0\big\}$,
\item $j-i\in A_s(\varphi_{(i,t)})$ for all $\big((i,t),(j,t-1)\big)\in\vec H_s$ $(1\leq s\leq\sigma)$.
\end{enumerate}
\end{defi}
\begin{lemma}[Presence of an explanation graph]
The\label{L:explan} maximal trajectory $\overline x$ of a monotonic flow ${\bm{\phh}}$ satisfies $\overline x_0(0)=0$ if and only if there is an explanation graph $(U,{\cal H})$ for $(0,0)$ present in ${\bm{\phh}}$.
\end{lemma}
\begin{Proof}
By condition~(i) of Definition~\ref{def:finexpres}, the presence of an explanation graph clearly implies $\overline x_0(0)=0$. To prove the converse implication, let $x^r:{\mathbb Z}^d\times\{r,r+1,\ldots\}\to\{0,1\}$ be defined as in (\ref{maxs}). We have seen in the proof of Lemma~\ref{L:maxtraj} that $x^r_t(i)$ decreases to $\overline x_t(i)$ as $r\to-\infty$. Therefore, since $\overline x_0(0)=0$, there must be an $r<0$ such that $x^r_0(0)=0$. We fix such an $r$ from now on.
We will inductively construct a finite explanation for $(0,0)$ with the desired properties. At each point in our construction, $(U,{\cal H})$ will be a finite explanation for $(0,0)$ such that:
\begin{itemize}
\item[{\rm(i)}] $x^r_t(i)=0$ for all $(i,t)\in U$,
\item[{\rm(ii)}'] $\varphi_{(i,t)}\neq\phi^0$ for all $(i,t)\in U\backslash U_\ast$,
\item[{\rm(iii)}] $j-i\in A_s(\varphi_{(i,t)})$ for all $\big((i,t),(j,t-1)\big)\in\vec H_s$ $(1\leq s\leq\sigma)$.
\end{itemize}
The induction stops as soon as:
\begin{itemize}
\item[{\rm(ii)}] $U_\ast=\big\{u\in U:\varphi_u=\phi^0\big\}$.
\end{itemize}
We start with $U=\{(0,0)\}$ and $\vec H_s=\emptyset$ for all $1\leq s\leq\sigma$. In each step of the construction, we select a vertex $(i,t)\in U_\ast$ such that $\varphi_{(i,t)}\neq\phi^0$. Since $x^r_t(i)=0$ and $A_s(\varphi_{(i,t)})\in{\cal A}(\varphi_{(i,t)})$ as defined in (\ref{Aphi}), for each $1\leq s\leq\sigma$ we can choose $j_s\in A_s(\varphi_{(i,t)})$ such that $x^r_{t-1}(j_s)=0$. We now replace $U$ by $U\cup\{(j_s,t-1):1\leq s\leq\sigma\}$ and we replace $\vec H_s$ by $\vec H_s\cup\{\big((i,t),(j_s,t-1))\}$ $(1\leq s\leq\sigma)$, and the induction step is complete.
At each step in our construction, $r<t\leq 0$ for all $(i,t)\in U$, since at time $r$ one has $x^r_r(i)=1$ for all $i\in{\mathbb Z}^d$. Since $U$ can contain at most $\sigma^{-t}$ elements with time coordinate $t$, we see that the inductive construction ends after a finite number of steps. It is straightforward to check that the resulting graph is an explanation graph in the sense of Definition~\ref{def:finiteexpl}.
\end{Proof}
\subsection{Toom matchings}\label{S:match}
In this subsection, we continue our preparations for the proof of Theorem~\ref{T:contour}. Most of the proof of Theorem~\ref{T:contour} will consist, informally speaking, of showing that to each explanation graph, it is possible to add a suitable set of sources, such that the sources and sinks together define a Toom contour.
It follows from the definition of an explanation graph that for each $w\in U$ and $1\leq s\leq\sigma$, there exist a unique $n\geq 0$ and $w_0,\ldots,w_n$ such that
\begin{enumerate}
\item $w_0=w$ and $(w_{i-1},w_i)\in\vec H_s$ for all $0<i\leq n$,
\item $w_n\in U_\ast$ and $w_i\in U\backslash U_\ast$ for all $0\leq i<n$.
\end{enumerate}
In other words, this says that starting at each $w\in U$, there is a unique directed path that uses only directed edges from $\vec H_s$ and that ends at some vertex $w_n\in U_\ast$.
We will use the following notation:
\begin{equation}\left.\begin{array}{r@{\,}c@{\,}l}\label{Ww}
\displaystyle P_s(w)&:=&\displaystyle\big\{w_0,\ldots,w_n\big\},\\[5pt]
\displaystyle\pi_s(w)&:=&\displaystyle w_n,
\end{array}\quad\right\}\quad(w\in U,\ 1\leq s\leq\sigma).
\end{equation}
Then $P_s(w)$ is the path we have just described and $\pi_s(w)\in U_\ast$ is its endpoint.
By definition, we will use the word \emph{polar} to describe any sequence $(a_1,\ldots,a_\sigma)$ such that $a_s\in U$ for all $1\leq s\leq\sigma$ and the points $a_1=(i_1,t),\ldots,a_\sigma=(i_\sigma,t)$ all have the same time coordinate. We call $t$ the \emph{time} of the polar.
\begin{defi}\label{def:toommatching}
A \emph{Toom matching} for an explanation graph $(U,{\cal H})$ with $N:=|U_\ast|$ sinks is an $N\times\sigma$ matrix
\begin{equation}
\big(a_{i,s}\big)_{1\leq i\leq N,\ 1\leq s\leq\sigma}
\end{equation}
such that
\begin{enumerate}
\item $(a_{i,1},\ldots,a_{i,\sigma})$ is a polar for each $1\leq i\leq N$,
\item $\pi_s:\{a_{1,s},\ldots,a_{N,s}\}\to U_\ast$ is a bijection for each $1\leq s\leq\sigma$.
\end{enumerate}
\end{defi}
We will be interested in polars that have the additional property that all their elements lie ``close together'' in a certain sense. By definition, a \emph{point polar} is a polar $(a_1,\ldots,a_\sigma)$ such that $a_1=\cdots=a_\sigma$. We say that a polar $(a_1,\ldots,a_\sigma)$ is \emph{tight} if it is either a point polar, or there exists a $v\in U$ such that $(v,a_s)\in\vec H$ for all $1\leq s\leq\sigma$, where we recall that $\vec H:=\bigcup_{s=1}^\sigma\vec H_s$. The following proposition is the main result of this subsection.
\begin{proposition}[Toom matchings]
Let\label{P:match} $(U,{\cal H})$ be an explanation graph for $(0,0)$ with $N:=|U_\ast|$ sinks. Then there exists a Toom matching for $(U,{\cal H})$ such that in addition to the properties (i) and (ii) above,
\begin{enumerate}\addtocounter{enumi}{2}
\item $a_{1,1}=\cdots=a_{1,\sigma}=(0,0)$,
\item $(a_{i,1},\ldots,a_{i,\sigma})$ is a tight polar for each $1\leq i\leq N$.
\end{enumerate}
\end{proposition}
In the next subsection, we will derive Theorem~\ref{T:contour} from Proposition~\ref{P:match}. It is instructive to jump a bit ahead and already explain the main idea of the construction. Let $(a_{i,s})_{1\leq i\leq N,\ 1\leq s\leq\sigma}$ be the Toom matching from Proposition~\ref{P:match}. For each $i$ and $s$, we connect the vertices of the path $P_s(a_{i,s})$ defined in (\ref{Ww}) with directed edges of type $s$. By property~(ii) of a Toom matching, this has the consequence that each sink $u\in U_\ast$ of the explanation graph is the endvertex of precisely $\sigma$ edges, one of each type. Each point polar gives rise to a source where $\sigma$ charges emerge, one of each type, that then travel through the explanation graph until they arrive at a sink. For each polar $(a_{i,1},\ldots,a_{i,\sigma})$ that is not a point polar, we choose $v_i\in U$ such that $(v_i,a_{i,s})\in\vec H$ for all $1\leq s\leq\sigma$, and for each $1\leq s\leq\sigma$ we connect $v_i$ and $a_{i,s}$ with a directed edge of type $s$. These extra points $v_i$ then act as additional sources and, as will be proved in detail in the next subsection, our collection of directed edges now forms a Toom graph that is embedded in ${\mathbb Z}^{d+1}$, and the connected component of this Toom graph containing the origin forms a Toom contour that is present in ${\bm{\phh}}$. This is illustrated in Figure~\ref{fig:minexpl}. The picture on the right shows an explanation graph $(U,{\cal H})$, or rather the associated directed graph $(U,\vec H)$, with sinks indicated with a star. The embedded Toom graph in the middle picture of Figure~\ref{fig:minexpl} originates from a Toom matching of this explanation graph.
The proof of Proposition~\ref{P:match} takes up the remainder of this subsection. The proof is quite complicated and will be split over several lemmas. We fix an explanation graph $(U,{\cal H})$ for $(0,0)$ with $N:=|U_\ast|$ sinks. Because of our habit of drawing time downwards in pictures, it will be convenient to define a function $h:U\to{\mathbb N}$ by
\begin{equation}\label{eq:height}
h(i,t):=-t\qquad\big((i,t)\in U\big).
\end{equation}
We call $h(w)$ the \emph{height} of a vertex $w\in U$. For $u,v\in U$, we write $u\leadsto_{\vec H}v$ when there exist $u_0,\ldots,u_n\in U$ with $n\geq 0$, $u_0=u$, $u_n=v$, and $(u_{k-1},u_k)\in\vec H$ for all $0<k\leq n$. By definition, for $w_1,w_2\in U$, we write $w_1\approx w_2$ if $h(w_1)=h(w_2)$ and there exists a $w_3\in U$ such that $w_i\leadsto_{\vec H}w_3$ for $i=1,2$. Moreover, for $v,w\in U$, we write $v\sim w$ if there exist $m\geq 0$ and $v=v_0,\ldots,v_m=w$ such that $v_{i-1}\approx v_i$ for $1\leq i\leq m$. Then $\sim$ is an equivalence relation. In fact, if we view $U$ as a graph in which two vertices $v,w$ are adjacent if $v\approx w$, then the equivalence classes of $\sim$ are just the connected components of this graph. We let ${\cal C}$ denote the set of all (nonempty) equivalence classes.
It is easy to see that the origin $(0,0)$ and the sinks form equivalence classes of their own. With this in mind, we set ${\cal C}_\ast:=\big\{\{w\}:w\in U_\ast\big\}$. Each $C\in{\cal C}$ has a height $h(C)$ such that $h(v)=h(C)$ for all $v\in C$. For $C_1,C_2\in{\cal C}$, we write $C_1\to C_2$ if there exists a $(v_1,v_2)\in\vec H$ such that $v_i\in C_i$ $(i=1,2)$. Note that this implies that $h(C_2)=h(C_1)+1$. The following lemma says that ${\cal C}$ has the structure of a directed tree with the sinks as its leaves.
\begin{lemma}[Tree of equivalence classes]
For\label{L:Ctree} each $C\in{\cal C}$ with $C\neq\{(0,0)\}$, there exists a unique $C'\in{\cal C}$ such that $C'\to C$. Moreover, for each $C\in{\cal C}\backslash{\cal C}_\ast$, there exists at least one $C''\in{\cal C}$ such that $C\to C''$. Also, $C\in{\cal C}\backslash{\cal C}_\ast$ implies $C\cap U_\ast=\emptyset$.
\end{lemma}
\begin{Proof}
Since the sinks form equivalence classes of their own, $C\in{\cal C}\backslash{\cal C}_\ast$ implies $C\cap U_\ast=\emptyset$. If $C\in{\cal C}\backslash{\cal C}_\ast$, then condition~(v) in Definition~\ref{def:finiteexpl} of an explanation graph implies the existence of a $C''\in{\cal C}$ such that $C\to C''$. Similarly, if $C\in{\cal C}$ and $C\neq\{(0,0)\}$, then the existence of a $C'\in{\cal C}$ such that $C'\to C$ follows from condition~(iii) in Definition~\ref{def:finiteexpl}. It remains to show that $C'$ is unique.
Assume that, to the contrary, there exist $w,w'\in C$ and $(v,w),(v',w')\in\vec H$ so that $v$ and $v'$ do not belong to the same equivalence class. Since $w$ and $w'$ lie in the same equivalence class $C$, there exist $w_0,\ldots,w_m\in C$ with $w=w_0$, $w_m=w'$, and $w_{i-1}\approx w_i$ for all $0<i\leq m$. Using condition~(iii) in Definition~\ref{def:finiteexpl}, we can find $v_0,\ldots,v_m\in U$ such that $(v_i,w_i)\in\vec H$ $(0\leq i\leq m)$. In particular we can choose $v_0=v$ and $v_m=v'$. Since $v$ and $v'$ do not belong to the same equivalence class, there must exist an $0<i\leq m$ such that $v_{i-1}$ and $v_i$ do not belong to the same equivalence class. Since $w_{i-1}\approx w_i$, there exists a $u\in U$ such that $w_{i-1}\leadsto_{\vec H}u$ and $w_i\leadsto_{\vec H}u$. But then also $v_{i-1}\leadsto_{\vec H}u$ and $v_i\leadsto_{\vec H}u$, which contradicts the fact that $v_{i-1}$ and $v_i$ do not belong to the same equivalence class.
\end{Proof}
For $C,C'\in{\cal C}$, we describe the relation $C\to C'$ in words by saying that $C'$ is a direct descendant of $C$. We let ${\cal D}_C:=\{C'\in{\cal C}:C\to C'\}$ denote the set of all direct descendants of $C$. We will view ${\cal D}_C$ as an undirected graph with set of edges
\begin{equation}
{\cal E}_C:=\big\{\{C_1,C_2\}:\exists v\in C,\ w_1\in C_1 ,\ w_2\in C_2\mbox{ s.t.\ }
(v,w_i)\in\vec H\ \forall i=1,2\big\}.
\end{equation}
The fact that this definition is reminiscent of the definition of a tight polar is no coincidence and will become important in Lemma~\ref{L:tiso} below. We first prove the following lemma.
\begin{lemma}[Structure of the set of direct descendants]
For\label{L:DiC} each $C\in{\cal C}\backslash{\cal C}_\ast$, the graph $({\cal D}_C,{\cal E}_C)$ is connected.
\end{lemma}
\begin{Proof}
Let ${\cal D}_1,{\cal D}_2$ be nonempty disjoint subsets of ${\cal D}_C$ such that ${\cal D}_1\cup{\cal D}_2={\cal D}_C$ and let
\begin{equation}
D_i:=\big\{v\in C:\exists C'\in{\cal D}_i\mbox{ and }w\in C'\mbox{ s.t.\ }(v,w)\in\vec H\big\}
\qquad(i=1,2).
\end{equation}
To show that $({\cal D}_C,{\cal E}_C)$ is connected, we need to show that $D_1\cap D_2\neq\emptyset$ for all choices of ${\cal D}_1,{\cal D}_2$. By Lemma~\ref{L:Ctree}, $C\cap U_\ast=\emptyset$ and hence for each $v\in C$ there exists a $w\in U$ such that $(v,w)\in\vec H$. Therefore, since ${\cal D}_C$ contains all direct descendants of $C$, we have $D_1\cup D_2=C$. Since ${\cal D}_1$ and ${\cal D}_2$ are nonempty, so are $D_1$ and $D_2$. Assume that $D_1\cap D_2=\emptyset$. Then, since $C$ is an equivalence class, there must exist $v_i\in D_i$ $(i=1,2)$ such that $v_1\approx v_2$, i.e.,
\begin{equation}\label{comdesc}
\{w\in U:v_1\leadsto_{\vec H}w\}\cap\{w\in U:v_2\leadsto_{\vec H}w\}\neq\emptyset.
\end{equation}
However, for $i=1,2$, the set $\{w\in U:v_i\leadsto_{\vec H}w\}$ is entirely contained in the equivalence classes in ${\cal D}_i$ and their descendants. Since by Lemma~\ref{L:Ctree}, ${\cal C}$ has the structure of a tree, this contradicts (\ref{comdesc}).
\end{Proof}
We can now make the connection to the definition of tight polars. We say that a polar $(a_1,\ldots,a_\sigma)$ lies inside a set $D\subset U$ if $a_s\in D$ for all $1\leq s\leq\sigma$.
\begin{lemma}[Tight polars]
Let\label{L:tiso} $C\in{\cal C}\backslash{\cal C}_\ast$, let $M:=|{\cal D}_C|$ be the number of its direct descendants, and let $D_C:=\bigcup_{C'\in{\cal D}_C}C'$ be the union of all $C'\in{\cal D}_C$. Let $(a_{1,1},\ldots,a_{1,\sigma})$ be a polar inside $D_C$. Then, given that~$M\geq 2$, it is possible to choose tight polars $(a_{i,1},\ldots,a_{i,\sigma})$ $(2\leq i\leq M)$ inside $D_C$ such that:
\begin{equation}\label{tiso}
\mbox{For each $C'\in{\cal D}_C$ and $1\leq s\leq\sigma$, there is a unique $1\leq i\leq M$ such that $a_{i,s}\in C'$.}
\end{equation}
\end{lemma}
\begin{Proof}
By Lemma~\ref{L:DiC}, the graph ${\cal D}_C$ is connected in the sense defined there. To prove the claim of Lemma~\ref{L:tiso} will prove a slightly more general claim. Let ${\cal D}'_C$ be a connected subgraph of ${\cal D}_C$ with $M'$ elements, let $D'_C:=\bigcup_{C'\in{\cal D}'_C}C'$, and let $(a_{1,1},\ldots,a_{1,\sigma})$ be a polar inside $D'_C$. Then we claim that it is possible to choose tight polars $(a_{i,1},\ldots,a_{i,\sigma})$ $(2\leq i\leq M')$ inside $D'_C$ such that (\ref{tiso}) holds with ${\cal D}_C$ and $M$ replaced by ${\cal D}'_C$ and $M'$ respectively.
We will prove the claim by induction on $M'$. The claim is trivial for $M'=1$. We will now prove the claim for general $M'\geq 2$ assuming it proved for $M'-1$. Since ${\cal D}'_C$ is connected, we can find some $C'\in{\cal D}'_C$ so that ${\cal D}'_C\backslash\{C'\}$ is still connected. If none of the vertices $a_{1,1},\ldots,a_{1,\sigma}$ lies inside $C'$, then we can add a point polar inside $C'$, use the induction hypothesis, and we are done. Likewise, if all of the vertices $a_{1,1},\ldots,a_{1,\sigma}$ lie inside $C'$, then we can add a point polar inside $D'_C\backslash C'$, use the induction hypothesis, and we are done.
We are left with the case that some, but not all of the vertices $a_{1,1},\ldots,a_{1,\sigma}$ lie inside $C'$. Without loss of generality, we assume that $a_{1,1},\ldots,a_{1,m}\in C'$ and $a_{1,m+1},\ldots,a_{1,\sigma}\in D'_C\backslash C'$. Since ${\cal D}'_C$ is connected in the sense of Lemma~\ref{L:DiC}, we can find a $v\in C$ and $w_1\in C'$, $w_2\in D'_C\backslash C'$ such that $(v,w_i)\in\vec H$ $(i=1,2)$. Setting $a_{2,1}=\cdots=a_{2,m}:=w_2$ and $a_{2,m+1}=\cdots=a_{2,\sigma}:=w_1$ then defines a tight polar such that:
\begin{itemize}
\item For each $1\leq s\leq\sigma$, there is a unique $i\in\{1,2\}$ such that $a_{i,s}\in C'$.
\item For each $1\leq s\leq\sigma$, there is a unique $i\in\{1,2\}$ such that $a_{i,s}\in D'_C\backslash C'$.
\end{itemize}
In particular, the elements of $(a_{i,s})_{i\in\{1,2\},\ 1\leq s\leq\sigma}$ with $a_{i,s}\in D'_C\backslash C'$ form a polar in $D'_C\backslash C'$, so we can again use the induction hypothesis to complete the argument.
\end{Proof}
\begin{Proof}[of Proposition~\ref{P:match}]
We will use an inductive construction. Let $L:=\max\{h(w):w\in U\}$. For each $0\leq l\leq L$, we set $U_{\leq l}:=\{w\in U:h(w)\leq l\}$ and ${\cal C}_l:=\{C\in{\cal C}:h(C)=l\}$. We will inductively construct an increasing sequence of integers $1=N_0\leq N_1\leq\cdots\leq N_L$ and for each $0\leq l\leq L$, we will construct an $N_l\times \sigma$ matrix $\big(a_{i,s}(l)\big)_{1\leq i\leq N_l,\ 1\leq s\leq\sigma}$ such that $a_{i, s}(l)\in U_{\leq l}$ for all $1\leq i\leq N_l$ and $1\leq s\leq\sigma$. Our construction will be consistent in the sense that
\b
a_{i,s}(l+1)=a_{i,s}(l)\quad\forall 1\leq i\leq N_l,\ 1\leq s\leq\sigma,\ 0\leq l<L,
\end{equation}
that is at each step of the induction we add rows to the matrix we have constructed so far. In view of this, we can unambiguously drop the dependence on $l$ from our notation. We will choose the matrices
\begin{equation}\label{indmat}
\big(a_{i,s}\big)_{1\leq i\leq N_l,\ 1\leq s\leq\sigma}
\end{equation}
in such a way that for each $0\leq l\leq L$:
\begin{enumerate}
\item $a_{1,1}=\cdots=a_{1,\sigma}=(0,0)$,
\item $(a_{i,1},\ldots,a_{i,\sigma})$ is a tight polar for each $2\leq i\leq N_l$,
\item For all $C\in{\cal C}_l$ and $1\leq s\leq\sigma$, there is a unique $1\leq i\leq N_l$ such that $P_s(a_{i,s})\cap C\neq\emptyset$,
\end{enumerate}
where $P_s(a_{i,s})$ is defined as in (\ref{Ww}). We claim that setting $N:=N_L$ then yields a Toom matching with the additional properties described in the proposition. Property~(i) of Definition~\ref{def:toommatching} of a Toom matching and the additional properties (iii) and (iv) from Proposition~\ref{P:match} follow trivially from conditions (i) and (ii) of our inductive construction, so it remains to check property~(ii) of Definition~\ref{def:toommatching}, which can be reformulated by saying that for each $w\in U_\ast$ and $1\leq s\leq\sigma$, there exists a unique $1\leq i\leq N$ such that $w\in P_s(a_{i,s})$. Since $\{w\}\in{\cal C}$ for each $w\in U_\ast$ (vertices in $U_\ast$ form an equivalence class of their own), this follows from condition~(iii) of our inductive construction.
We start the induction with $N_0=1$ and $a_{1,1}=\cdots=a_{1,\sigma}=(0,0)$. Since $(0,0)$ is the only vertex in $U$ with height zero, this obviously satisfies the induction hypotheses (i)--(iii). Now assume that (i)--(iii) are satisfied for some $0\leq l<L$. We need to define $N_{l+1}$ and choose polars $(a_{i,1},\ldots,a_{i,\sigma})$ with $N_l<i\leq N_{l+1}$ so that (i)--(iii) are satisfied for $l+1$. We note that by Lemma~\ref{L:Ctree}, each $C'\in{\cal C}_{l+1}$ is the direct descendent of a unique $C\in{\cal C}_l\backslash{\cal C}_\ast$.
By the induction hypothesis~(iii), for each $C\in{\cal C}_l\backslash{\cal C}_\ast$ and $1\leq s\leq\sigma$, there exists a unique $1\leq i_s\leq N_l$ such that $P_s(a_{i_s,s})\cap C\neq\emptyset$. Let ${\cal D}_C:=\{C'\in{\cal C}:C\to C'\}$ denote the set of all direct descendants of $C$ and let $D_C:=\bigcup{\cal D}_C$ denote the union of its elements. Then setting $\{b_s\}:=P_s(a_{i_s,s})\cap D_C$ $(1\leq s\leq\sigma)$ defines a polar $(b_1,\ldots,b_\sigma)$ inside $D_C$. Applying Lemma~\ref{L:tiso} to this polar, we can add tight polars to our matrix in (\ref{indmat}) so that condition (iii) becomes satisfied for all $C'\in{\cal D}_C$. Doing this for all $C\in{\cal C}_l\backslash{\cal C}_\ast$, using the tree structure of ${\cal C}$ (Lemma~\ref{L:Ctree}), we see that we can satisfy the induction hypotheses (i)--(iii) for $l+1$.
\end{Proof}
\subsection{Construction of Toom contours}\label{S:constr}
In this subsection, we prove Theorem~\ref{T:contour}. With Proposition~\ref{P:match} proved, most of the work is already done. We will prove a slightly more precise statement. Below $\psi(V)$ and $\psi(V_\ast)$ denote the images of $V$ and $V_\ast$ under $\psi$ and $\psi(\vec E_s):=\big\{\big(\psi(v),\psi(w)\big):(v,w)\in \vec E_s\big\}$. Theorem~\ref{T:contour} is an immediate consequence of Lemma~\ref{L:explan} and the following theorem.
\begin{theorem}[Presence of a Toom contour]
Under\label{T:contex} the assumptions of Theorem~\ref{T:contour}, whenever there is an explanation graph $(U,{\cal H})$ for $(0,0)$ present in ${\bm{\phh}}$, there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in ${\bm{\phh}}$ with the additional properties that $\psi(V)\subset U$, $\psi(V_\ast)\subset U_\ast$, and $\psi(\vec E_s)\subset\vec H_s$ for all $1\leq s\leq\sigma$.
\end{theorem}
\begin{Proof}
The main idea of the proof has already been explained below Proposition~\ref{P:match}. We now fill in the details. Let $(U,{\cal H})$ be an explanation graph for $(0,0)$ that is present in ${\bm{\phh}}$. Let $N:=|U_\ast|$ be the number of sinks. By Proposition~\ref{P:match} there exists a Toom matching $\big(a_{i,s}\big)_{1\leq i\leq N,\ 1\leq s\leq\sigma}$ for $(U,{\cal H})$ such that $a_{1,1}=\cdots=a_{1,\sigma}=(0,0)$, and $(a_{i,1},\ldots,a_{i,\sigma})$ is a tight polar for each $1\leq i\leq N$.
Recall from (\ref{Ww}) that $P_s(w)$ denotes the unique directed path starting at $w$ that uses only directed edges from $\vec H_s$ and that ends at some vertex in $U_\ast$. For each $1\leq i\leq N$ such that $(a_{i,1},\ldots,a_{i,\sigma})$ is a point polar, and for each $1\leq s\leq\sigma$, we will use the notation
\begin{equation}\label{point}
P_s(a_{i,s})=\big\{a^0_{i,s},\ldots,a^{m(i,s)}_{i,s}\big\},
\end{equation}
with $(a^{l-1}_{i,s},a^l_{i,s})\in\vec H_s$ for all $0<l\leq m(i,s)$. For each $1\leq i\leq N$ such that $(a_{i,1},\ldots,a_{i,\sigma})$ is not a point polar, by the definition of a tight polar, we can choose $v_i\in U$ such that $(v_i,a_{i,s})\in\vec H$ for all $1\leq s\leq\sigma$. In this case, we will use the notation
\begin{equation}\label{npoint}
\{v_i\}\cup P_s(a_{i,s})=\big\{a^0_{i,s},\ldots,a^{m(i,s)}_{i,s}\big\},
\end{equation}
where $(a^0_{i,s},a^1_{i,s})\in\vec H$ and $(a^{l-1}_{i,s},a^l_{i,s})\in\vec H_s$ for all $1<l\leq m(i,s)$.
We can now construct a Toom graph $(V,{\cal E})$ with a specially designated source $v_\circ$ as follows. We set
\begin{equation}
w(i,s,l):=\left\{\begin{array}{ll}
i\quad&\mbox{if }l=0<m(i,s),\\[5pt]
(i,s,l)\quad&\mbox{if }0<l<m(i,s),\\[5pt]
a^{m(i,s)}_{i,s}\quad&\mbox{if }l=m(i,s).
\end{array}\right.\qquad(1\leq i\leq N,\ 1\leq s\leq\sigma),
\end{equation}
and
\be\begin{array}{r@{\,}c@{\,}l}\label{WF}
\displaystyle V&:=&\big\{w(i,s,l):1\leq i\leq N,\ 1\leq s\leq\sigma,\ 0\leq l\leq m(i,s)\big\},\\[5pt]
\displaystyle\vec E_s&:=&\displaystyle\big\{\big(w(i,s,l-1),w(i,s,l)\big):
1\leq i\leq N,\ 0<l\leq m(i,s)\big\}\quad(1\leq s\leq\sigma),\\[5pt]
\displaystyle v_\circ&:=&\displaystyle w(1,1,0)=\cdots=w(1,\sigma,0).
\end{array}\ee
It is straightforward to check that $(V,{\cal E})$ is a Toom graph with sets of sources, internal vertices, and sinks given by
\be\begin{array}{r@{\,}c@{\,}l}
\displaystyle V_\circ&=&\displaystyle\big\{i:1\leq i\leq N,\ m(i,s)>0\big\}
\cup\{a^0_{i,s}:m(i,s)=0\big\},\\[5pt]
\displaystyle V_s&=&\displaystyle\big\{(i,s,l):
1\leq i\leq N,\ 0<l<m(i,s)\big\}\qquad(1\leq s\leq\sigma),\\[5pt]
\displaystyle V_\ast&=&\displaystyle\big\{a^{m(i,s)}_{i,s}:1\leq i\leq N,\ 1\leq s\leq\sigma\big\}=U_\ast.
\end{array}\ee
Note that the vertices of the form $a^0_{i,s}$ with $m(i,s)=0$ are the isolated vertices, that are both a source and a sink. We now claim that setting
\begin{equation}
\psi\big(w(i,s,l)\big):=a^l_{i,s}\qquad(1\leq i\leq N,\ 1\leq s\leq\sigma,\ 0\leq l\leq m(i,s))
\end{equation}
defines an embedding of $(V,{\cal E})$. We first need to check that this is a good definition in the sense that the right-hand side is really a function of $w(i,s,l)$ only. Indeed, when $l=0<m(i,s)$, we have $w(i,s,l)=i$ and $a^0_{i,1}=\cdots=a^0_{i,\sigma}$ by the way $a^0_{i,s}$ has been defined in (\ref{point}) and (\ref{npoint}). For $0<l<m(i,s)$, we have $w(i,s,l)=(i,s,l)$, and finally, for $l=m(i,s)$, we have $w(i,s,l)=a^l_{i,s}$.
We next check that $\psi$ is an embedding, i.e.,
\begin{enumerate}
\item $\displaystyle\psi_{d+1}(w)=\psi_{d+1}(v)-1$ for all $(v,w)\in\vec E$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1\in V_\ast$ and $v_2\in V$ with $v_1\neq v_2$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1,v_2\in V_s$ with $v_1\neq v_2$ $(1\leq s\leq\sigma)$.
\end{enumerate}
Property~(i) is clear from the fact that $\vec E\subset\vec H$ and Definition~\ref{def:finiteexpl} of an explanation graph. Property~(ii) follows from the fact that $\psi(V_\ast)=U_\ast$ and $\psi(V\backslash V_\ast)\subset U\backslash U_\ast$. Property~(iii), finally, follows from the observation that
\begin{equation}
P_s(a_{i,s})\cap P_s(a_{j,s})=\emptyset
\quad\forall 1\leq s\leq\sigma,\ 1\leq i,j\leq N,\ i\neq j.
\end{equation}
Indeed, $P_s(a_{i,s})\cap P_s(a_{j,s})\neq\emptyset$ would imply that $\pi_s(a_{i,s})=\pi_s(a_{j,s})$, as in the explanation graph there is a unique directed path of each type from every vertex that ends at some~$w\in U_\ast$, which contradicts the definition of a Toom matching.
Since moreover $\psi(v_\circ)=(0,0)$ and property~(ii) of Definition~\ref{def:finiteexpl} implies that $t<0$ for all $(i,t)\in\psi(V)\backslash\{(0,0)\}$, we see that the quadruple $(V,{\cal E},v_\circ,\psi)$ satisfies all the defining properties of a Toom contour (see Definition~\ref{def:toomcontour}), except that the Toom graph $(V,{\cal E})$ may fail to be connected. To fix this, we restrict ourselves to the connected component of $(V,E)$ that contains the root $v_\circ$.
To complete the proof, we must show that $(V,{\cal E},v_\circ,\psi)$ is present in ${\bm{\phh}}$, i.e.,
\begin{enumerate}
\item $\displaystyle\varphi_{\psi(v)}=\phi^0$ for all $\displaystyle v\in V_\ast$,
\item $\displaystyle\varphi_{\psi(v)}\in\{\phi_1,\ldots,\phi_m\}$ for all $\displaystyle v\in V\backslash V_\ast$,
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_s(\varphi_{\psi(v)})$ for all $(v,w)\in\vec E^\ast_s$ $(1\leq s\leq\sigma$),
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in\bigcup_{s=1}^\sigma A_s(\varphi_{\psi(v)})$ for all $(v,w)\in\vec E^\circ$.
\end{enumerate}
We will show that these properties already hold for the original quadruple $(V,{\cal E},v_\circ,\psi)$, without the need to restrict to the connected component of $(V,E)$ that contains the root. Since the explanation graph $(U,{\cal H})$ is present in ${\bm{\phh}}$, we have $U_\ast=\{u\in U:\varphi_u=\phi^0\}$. Since $\psi(V_\ast)=U_\ast$, this implies properties (i) and (ii). The fact that the explanation graph $(U,{\cal H})$ is present in ${\bm{\phh}}$ moreover means that $j-i\in A_s(\varphi_{(i,t)})$ for all $\big((i,t),(j,t-1)\big)\in\vec H_s$ $(1\leq s\leq\sigma)$. Since $(a^0_{i,s},a^1_{i,s})\in\vec H$ and $(a^{l-1}_{i,s},a^l_{i,s})\in\vec H_s$ for all $1<l\leq m(i,s)$ $(1\leq i\leq N,\ 1\leq s\leq\sigma)$, this implies properties (iii) and (iv).
\end{Proof}
\subsection{Construction of Toom contours with two charges}\label{S:Tcycles}
In this subsection we prove Theorem~\ref{T:strongpres}. As in the previous subsection, we will construct the Toom contour ``inside'' an explanation graph. Theorem~\ref{T:strongpres} is an immediate consequence of Lemma~\ref{L:explan} and the following theorem.
\begin{theorem}[Strong presence of a Toom contour]
If\label{T:strex} $\sigma=2$, then Theorem~\ref{T:contex} can be strengthened in the sense that the Toom contour $(V,{\cal E},v_\circ,\psi)$ is strongly present in ${\bm{\phh}}$.
\end{theorem}
Although it is a strengthening of Theorem~\ref{T:contex}, our proof of Theorem~\ref{T:strex} will be completely different. In particular, we will not make use of the Toom matchings of Subsection~\ref{S:match}. Instead, we will exploit the fact that if we reverse the direction of edges of one of the charges, then a Toom contour with two charges becomes a directed cycle. This allows us to give a proof of Theorem~\ref{T:strex} based on the method of ``loop erasion'' (as explained below) that seems difficult to generalise to Toom contours with three or more charges.
Let $n\geq 0$ be an even integer and let $V:=\{0,\ldots,n-1\}$, equipped with addition modulo $n$. Let $\psi:V\to{\mathbb Z}^{d+1}$ be a function such that
\begin{equation}\label{updo}
\big|\psi_{d+1}(k)-\psi_{d+1}(k-1)\big|=1\qquad(1\leq k\leq n).
\end{equation}
We write $\psi(k)=\big(\vec\psi(k),\psi_{d+1}(k)\big)$ $(k\in V)$ and for $n\geq 2$ we define:
\be\begin{array}{r@{\,}c@{\,}l}\label{eq:v}
\displaystyle V_1&:=&\displaystyle\big\{k\in V:
\psi_{d+1}(k-1)>\psi_{d+1}(k)>\psi_{d+1}(k+1)\big\},\\[5pt]
\displaystyle V_2&:=&\displaystyle\big\{k\in V:
\psi_{d+1}(k-1)<\psi_{d+1}(k)<\psi_{d+1}(k+1)\big\},\\[5pt]
\displaystyle V_\ast&:=&\displaystyle\big\{k\in V:
\psi_{d+1}(k-1)>\psi_{d+1}(k)<\psi_{d+1}(k+1)\big\},\\[5pt]
\displaystyle V_\circ&:=&\displaystyle\big\{k\in V:
\psi_{d+1}(k-1)<\psi_{d+1}(k)>\psi_{d+1}(k+1)\big\}.
\end{array}\ee
In the trivial case that $n=0$, we set $V_1=V_2:=\emptyset$ and $V_\circ=V_\ast:=\{0\}$.
\begin{defi}\label{def:toomcycle}
Let $V$ be as above. A \emph{Toom cycle} is a function $\psi:V\to{\mathbb Z}^{d+1}$ such that:
\begin{enumerate}
\item $\psi$ satisfies (\ref{updo}),
\item $\psi(k_1)\neq\psi(k_2)$ for each $k_1\in V_\ast$ and $k_2\in V$ with $k_1\neq k_2$,
\item $\psi(k_1)\neq\psi(k_2)$ for each $k_1,k_2\in V_s$ with $k_1\neq k_2$ $(1\leq s\leq\sigma)$,
\item $t<\psi_{d+1}(0)$ for all $(i,t)\in\psi(V)\backslash\{\psi(0)\}$,
\end{enumerate}
where $V_1,V_2,V_\ast$, and $V_\circ$ are defined as in (\ref{eq:v}).
\end{defi}
If $\psi:V\to{\mathbb Z}^{d+1}$ is a Toom cycle of length $n\geq 2$, then we set:
\be\begin{array}{r@{\,}c@{\,}l}
\displaystyle\vec E_1&:=&\displaystyle\big\{(k,k+1):
\psi_{d+1}(k)>\psi_{d+1}(k+1),\ k\in V\big\},\\[5pt]
\displaystyle\lvec E_2&:=&\displaystyle\big\{(k,k+1):
\psi_{d+1}(k)<\psi_{d+1}(k+1),\ k\in V\big\},\\[5pt]
\displaystyle\vec E_2&:=&\displaystyle\big\{(k,l):(l,k)\in\lvec E_2\big\},
\end{array}\ee
where as before we calculate modulo $n$. If $n=0$, then $\vec E_1=\vec E_2:=\emptyset$. We let $(V,{\cal E}):=(V,\vec E_1,\vec E_2)$ denote the corresponding directed graph with two types of directed edges. The following simple observation makes precise our earlier claim that if we reverse the direction of edges of one of the charges, then a Toom contour with two charges becomes a directed cycle.
\begin{lemma}[Toom cycles]
If\label{L:cyccon} $\psi:V\to{\mathbb Z}^{d+1}$ is a Toom cycle, then $(V,{\cal E},0,\psi)$ is a Toom contour with root $0$, set of sources $V_\circ$, set of sinks $V_\ast$, and sets of internal vertices of charge $s$ given by $V_s$ $(s=1,2)$. Moreover, every Toom contour with two charges is equivalent to a Toom contour of this form.
\end{lemma}
\begin{Proof}
Immediate from the definitions.
\end{Proof}
\begin{Proof}[of Theorem~\ref{T:strex}]
We will first show that Theorem~\ref{T:contex} can be strengthened in the sense that the Toom contour $(V,{\cal E},v_\circ,\psi)$ also satisfies condition~(v) of Definition~\ref{D:strongpres}. As in Theorem~\ref{T:contex}, let $(U,{\cal H})$ be an explanation graph for $(0,0)$ that is present in ${\bm{\phh}}$. We let $\lvec H_s:=\{(k,l):(l,k)\in\vec H_s\}$ denote the directed edges we get by reversing the direction of all edges in $\vec H_s$ $(s=1,2)$.
We will use an inductive construction. At each point in our construction, $(V,{\cal E},0,\psi)$ will be a Toom contour rooted at $(0,0)$ that is obtained from a Toom cycle $\psi:V\to{\mathbb Z}^{d+1}$ as in Lemma~\ref{L:cyccon}, and $T:=\inf\{\psi_{d+1}(k):k\in V\}$ is the earliest time coordinate visited by the contour. At each point in our construction, it will be true that:
\begin{itemize}
\item[{\rm(i)'}] $\displaystyle\varphi_{\psi(k)}=\phi^0$ for all $k\in V_\ast$ with $T+1<\psi_{d+1}(k)$,
\item[{\rm(ii)}] $\displaystyle\varphi_{\psi(v)}\in\{\phi_1,\ldots,\phi_m\}$ for all $v\in V\backslash V_\ast$,
\item[{\rm(iiia)}] $\displaystyle\big(\psi(k),\psi(k+1)\big)\in\vec H_1$ for each $(k,k+1)\in\vec E_1$ with $k\in V_1\cup\{0\}$,
\item[{\rm(iiib)}] $\displaystyle\big(\psi(k-1),\psi(k)\big)\in\lvec H_2$ for each $(k-1,k)\in\lvec E_2$ with $k\in V_2\cup\{0\}$,
\item[{\rm(iva)}] $\displaystyle\big(\psi(k),\psi(k+1)\big)\in\vec H_2$ for each $(k,k+1)\in\vec E_1$ with $k\in V_\circ\backslash\{0\}$,
\item[{\rm(ivb)}] $\displaystyle\big(\psi(k-1),\psi(k)\big)\in\lvec H_1$ for each $(k-1,k)\in\lvec E_2$ with $k\in V_\circ\backslash\{0\}$,
\item[{\rm(vi)}] $\psi(k-1)\neq\psi(k+1)$ for each $k\in V_\circ\backslash\{0\}$.
\end{itemize}
We observe that condition~(i)' is a weaker version of condition~(i) of Definition~\ref{D:present}. Conditions (ii), (iiia), and (iiib) corresponds to conditions (ii) and (iii) of Definition~\ref{D:present}. Conditions (iva) and (ivb) are a stronger version of condition~(iv) of Definition~\ref{D:present}, that implies also condition~(v) of Definition~\ref{D:strongpres}. Finally, condition (vi) corresponds to condition~(vi) of Definition~\ref{D:strongpres}. Our inductive construction will end as soon as condition~(i) of Definition~\ref{D:present} is fully satisfied, i.e., when:
\begin{itemize}
\item[{\rm(i)}] $\displaystyle\varphi_{\psi(k)}=\phi^0$ for all $k\in V_\ast$.
\end{itemize}
\begin{figure}[htb]
\begin{center}
\inputtikz{looperas}
\caption{The process of exploration and loop erasion.}
\label{fig:looperas}
\end{center}
\end{figure}
We start the induction with the trivial Toom cycle defined by $V:=\{0\}$ and $\psi(0)=(0,0)$. We identify a Toom cycle $\psi:\{0,\ldots,n-1\}\to{\mathbb Z}^{d+1}$ with the word $\psi(0)\cdots\psi(n-1)$. In each step of the induction, as long as (i) is not yet satisfied, we modify our Toom cycle according to the following two steps, which are illustrated in Figure~\ref{fig:looperas}.
\begin{itemize}
\item[\rm I.] \emph{Exploration.} We pick $k\in V_\ast$ such that $\displaystyle\varphi_{\psi(k)}\neq\phi^0$ and $\psi_{d+1}(k)=T+1$, or if such a $k$ does not exist, with $\psi_{d+1}(k)=T$. We define $w_s$ by $\vec H_{s,{\rm out}}(\psi(k)):=(\psi(k),w_s)$ $(s=1,2)$. In the word $\psi(0)\cdots\psi(n-1)$, on the place of $\psi(k)$, we insert the word $\psi(k)w_1\psi(k)w_2\psi(k)$.
\item[\rm II.] \emph{Loop erasion.} If as a result of the exploration, there are $k_1,k_2\in V_\ast$ with $k_1<k_2$ such that $\psi(k_1)=\psi(k_2)$, then we remove the subword $\psi(k_1)\cdots\psi(k_2)$ from the word $\psi(0)\cdots\psi(n-1)$ and on its place insert $\psi(k_1)$. We repeat this step until $\psi(k_1)\neq\psi(k_2)$ for all $k_1,k_2\in V_\ast$ with $k_1\neq k_2$.
\end{itemize}
The effect of the exploration step is that one sink is replaced by a source and two internal vertices, one of each charge, and than two new sinks are created (see Figure~\ref{fig:looperas}). These new sinks are created at height $-T$ or $-T+1$ and hence can overlap with each other or with other preexisting sinks, but not with sources or internal vertices. If the exploration step has created overlapping sinks or the two new internal vertices overlap, then these are removed in the loop erasion step. After the removal of a loop, all remaining vertices are of the same type (sink, source, or internal vertex of a given charge) as before. Using these observations, it is easy to check that:
\begin{itemize}
\item[(C)] After exploration and loop erasion, the modified word $\psi$ is again a Toom cycle rooted at $(0,0)$ (see Definition~\ref{def:toomcycle}) and the induction hypotheses (i)', (ii), (iiia), (iiib), (iva), (ivb) and (vi) remain true.
\end{itemize}
Let $\Delta:=\{\psi(k):k\in V_\ast,\ \varphi_{\psi(k)}\neq\phi_0\}$. In each step of the induction, we remove one element from $\Delta$ with a given time coordinate, say $t$, and possibly add one or two new elements to $\Delta$ with time coordinates $t-1$. Since the explanation graph is finite, this cannot go on forever so the induction terminates after a finite number of steps. This completes the proof that Theorem~\ref{T:contex} can be strengthened in the sense that the Toom contour $(V,{\cal E},v_\circ,\psi)$ also satisfies condition~(v) of Definition~\ref{D:strongpres}.
\end{Proof}
\subsection{Forks}\label{S:fork}
We recall that for Toom contours with two charges, Theorem~\ref{T:strongpres} strengthened Theorem~\ref{T:contour} by showing the presence of a Toom contour with certain additional properties. As we have seen in Subsection~\ref{S:explic}, such additional properties reduce the number of Toom contours one has to consider and hence lead to sharper Peierls bounds. In the present subsection, we will prove similar (but weaker) strengthened version of Theorem~\ref{T:contour} that holds for an arbitrary number of charges.
Let $(V,{\cal E},v_\circ,\psi)$ be a Toom contour. By definition, a \emph{fork} is a source $v\in V_\circ$ such that:
\begin{equation}
\big|\{\psi(w):(v,w)\in\vec E\}\big|=2.
\end{equation}
As we will show in a moment, the proof of Theorem~\ref{T:contex} actually yields the following somewhat stronger statement. In the original formulation of Toom \cite{Too80}, his contours contain no sources but they contain objects that Toom calls forks and that effectively coincide with our usage of this term. For Toom, the fact that the number of sinks equals the number of forks plus one was part of his definition of a contour. In our formulation, this is a consequence of the fact that the number of sources equals the number of sinks.
\begin{theorem}[Toom contour with forks only]
Theorem~\ref{T:contex}\label{T:fork} can be strengthened in the sense that all sources $v\in V\backslash\{v_\circ\}$ are forks.
\end{theorem}
\begin{Proof}
Let us say that $v\in V_\circ$ is a \emph{point source} if $|\{\psi(w):(v,w)\in\vec E\}|=1$. We first show that Theorem~\ref{T:contex} can be strengthened in the sense that all sources $v\in V\backslash\{v_\circ\}$ are forks or point sources. Indeed, this is a direct consequence of the fact that the tight polars $(a_{i,1},\ldots,a_{i,\sigma})$ $(2\leq i\leq M)$ constructed in the proof of Lemma~\ref{L:tiso} are either point polars or have the property that the set $\{a_{i,s}:1\leq s\leq\sigma\}$ has precisely two elements. The latter give rise to forks while the former give rise to point sources or isolated vertices. Since a Toom countour is connected, sources other than the root can never be isolated vertices. This shows that Theorem~\ref{T:contex} can be strengthened in the sense that all sources $v\in V\backslash\{v_\circ\}$ are forks or point sources.
Now if some $v\in V_\circ\backslash\{v_\circ\}$ is a point source, then we can simplify the Toom contour by removing this source from the contour and joining all elements of $\{w:(v,w)\in\vec E\}$ into a new source, that is embedded at the space-time point $z\in{\mathbb Z}^{d+1}$ defined by $\{z\}:=\{\psi(w):(v,w)\in\vec E\}$. Repeating this process until it is no longer possible to do so we arrive at Toom contour $(V,{\cal E},v_\circ,\psi)$ with the additional property that all sources $v\in V\backslash\{v_\circ\}$ are forks.
\end{Proof}
\section{Bounds for eroders}\label{S:bounds}
\subsubsection*{Outline}
In this section, we apply the abstract theory developed in the previous section to concrete models. In Subsection~\ref{S:erosion}, we discuss the erosion criteria (\ref{erosion}) and (\ref{erode}). In particular, we prove Lemma~\ref{L:erode} and show that (\ref{erode}) implies that $\phi$ is an eroder. In Subsection~\ref{S:expbd}, we prove Lemmas \ref{L:expbd} and \ref{L:expbdcycle} which give an exponential upper bound on the number of Toom contours and Toom cycles with a given number of edges. In Subsection~\ref{S:finP}, we prove Lemma~\ref{L:Peifin} which shows that for eroders, finiteness of the Peierls sum is sufficient to conclude that $\overline\rho(p)>0$. At this point, we have proved all ingredients needed for the proof of Toom's stability theorem described in Subsection~\ref{S:erod} and also for the explicit bounds for concrete eroders stated in Subsection~\ref{S:explic}.
\subsection{Eroders}\label{S:erosion}
In this subsection we prove Lemma~\ref{L:erode}. Our proof depends on the equivalence of (\ref{erosion}) and the eroder property, which is proved in \cite[Thm~1]{Pon13}. In Lemma~\ref{L:Lerod}, we give an alternative direct proof that (\ref{erode}) implies that $\phi$ is an eroder. Although we do not really need this alternative proof, we have included it since it is short and instructive. In particular, it links the eroder property to edge speeds, which we otherwise do not discuss but which are an important motivating idea behind the definition of Toom contours.\medskip
\begin{Proof}[of Lemma~\ref{L:erode}]
In \cite[Lemma~12]{Pon13} it is shown\footnote{Since Ponselet discusses stability of the all-zero fixed point while we discuss stability of the all-one fixed point, in \cite{Pon13}, the roles of zeros and ones are reversed compared to our conventions.} that (\ref{erosion}) is equivalent to the existence of a polar function $L$ of dimension $2\leq\sigma\leq d+1$ and constants $\varepsilon_1,\ldots,\varepsilon_\sigma$ such that $\sum_{s=1}^\sigma \varepsilon_s>0$ and for each $1\leq s\leq\sigma$, there exists an $A_s\in{\cal A}(\phi)$ such that $\varepsilon_s-L_s(i)\leq 0$ for all $i\in A_s$. It follows that
\begin{equation}
\sum_{s=1}^\sigma\sup_{A\in{\cal A}(\phi)}\inf_{i\in A}L_s(i)
\geq\sum_{s=1}^\sigma\inf_{i\in A_s}L_s(i)\geq\sum_{s=1}^\sigma \varepsilon_s>0,
\end{equation}
which shows that (\ref{erode}) holds. Assume, conversely, that (\ref{erode}) holds. Since ${\cal A}(\phi)$ is finite, for each $1\leq s\leq\sigma$ we can choose $A_s(\phi)\in{\cal A}(\phi)$ such that
\begin{equation}\label{epss}
\varepsilon_s:=\inf_{i\in A_s(\phi)}L_s(i)=\sup_{A\in{\cal A}(\phi)}\inf_{i\in A}L_s(i).
\end{equation}
Then (\ref{erode}) says that $\sum_{s=1}^\sigma\varepsilon_s>0$. Let $H_s:=\{z\in{\mathbb R}^d:L_s(z)\geq\varepsilon_s\}$. By the definition of a polar function, $\sum_{s=1}^\sigma L_s(z)=0$ for each $z\in{\mathbb R}^d$, and hence the condition $\sum_{s=1}^\sigma\varepsilon_s>0$ implies that for each $z\in{\mathbb R}^d$, there exists an $1\leq s\leq\sigma$ such that $L_s(z)<\varepsilon_s$. In other words, this says that $\bigcap_{s=1}^\sigma H_s=\emptyset$. For each $1\leq s\leq\sigma$, the set $A_s(\phi)$ is contained in the half-space $H_s$ and hence the same is true for ${\rm Conv}(A_s(\phi))$, so we conclude that
\begin{equation}
\bigcap_{s=1}^\sigma{\rm Conv}\big(A_s(\phi)\big)=\emptyset,
\end{equation}
from which (\ref{erosion}) follows.
\end{Proof}
\begin{lemma}[The eroder property]
If\label{L:Lerod} a non-constant monotonic function $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$ satisfies (\ref{erode}), then $\phi$ is an eroder.
\end{lemma}
\begin{Proof}
Most of the argument has already been given below Lemma~\ref{L:erode}. It only remains to prove (\ref{edgespeed}). It suffices to prove the claim for $n=1$; the general claim then follows by induction. Assume that $i\in{\mathbb Z}^d$ satisfies $L_s(i)>r_s(X^0_0)-\delta_s$. We need to show that $X^0_1(i)=1$ for all such $i$. By the definition of $\delta_s$, we can choose $A\in{\cal A}(\phi)$ such that $\inf_{j\in A}L_s(j)=\delta_s$. It follows that $L_s(i+j)>r_s(X^0_0)$ for all $j\in A$ and hence $X^0_0(i+j)=1$ for all $j\in A$, which implies $X^0_1(i)=1$ by (\ref{Aphi}).
\end{Proof}
\subsection{Exponential bounds on the number of contours}\label{S:expbd}
In this subsection, we prove Lemmas~\ref{L:expbd} and~\ref{L:expbdcycle}.\medskip
\begin{Proof}[of Lemma~\ref{L:expbd}]
We first consider the case that the number of charges $\sigma$ is even. Let $T=(V,{\cal E},v_\circ,\psi)\in{\cal T}'_0$. Recall that $(V,{\cal E})$ is a directed graph with $\sigma$ types of edges, that are called charges. In $(V,{\cal E})$, all edges point in the direction from the sources to the sinks. We modify $(V,{\cal E})$ by reversing the direction of edges of the charges $\ffrac{1}{2}\sigma+1,\ldots,\sigma$. Let $(V,{\cal E}')$ denote the modified graph. In $(V,{\cal E}')$, the number of incoming edges at each vertex equals the number of outgoing edges. Since moreover the undirected graph $(V,E)$ is connected, it is not hard to see\footnote{This is a simple variation of the ``Bridges of K\"onigsberg'' problem that was solved by Euler.} that it is possible to walk through the directed graph $(V,{\cal E}')$ starting from the root using an edge of charge $1$, in such a way that each directed edge of ${\cal E}'$ is traversed exactly once.
Let $m:=\sigma n_{\rm e}(T)$ denote the total number of edges of $(V,{\cal E}')$ and for $0<k\leq m$, let $(v_{k-1},v_k)\in\vec E'_{s_k}$ denote the $k$-th step of the walk, which has charge $s_k$. Let $\delta_k:=\vec\psi(v_k)-\vec\psi(v_{k-1})$ denote the spatial increment of the $k$-th step. Note that the temporal increment is determined by the charge $s_k$ of the $k$-th step. Let $k_0,\ldots,k_{\sigma/2}$ denote the times when the walk visits the root $v_\circ$. We claim that in order to specify $(V,{\cal E},v_\circ,\psi)$ uniquely up to equivalence, in the sense defined in (\ref{equiv}), it suffices to know the sequences
\begin{equation}
(s_1,\ldots,s_m),\quad(\delta_1,\ldots,\delta_m),\quad\mbox{and}\quad(k_0,\ldots,k_{\sigma/2}).
\end{equation}
Indeed, the sinks and sources correspond to changes in the temporal direction of the walk which can be read off from the charges. Although the images under $\psi$ of sources may overlap, we can identify which edges connect to the root, and since we also know the increment of $\psi(v_k)$ in each step, all objects in (\ref{equiv}) can be identified.
The first charge $s_1$ is 1 and after that, in each step, we have the choice to either continue with the same charge or choose one of the other $\ffrac{1}{2}\sigma$ available charges. This means that there are no more than $(\ffrac{1}{2}\sigma+1)^{m-1}$ possible ways to specify the charges $(s_1,\ldots,s_m)$. Setting $M:=\big|\bigcup_{s=1}^\sigma A_s(\phi)\big|$, we see that there are no more than $M^m$ possible ways to specify the spatial increments $(\delta_1,\ldots,\delta_m)$. Since $k_0=0,k_{\sigma/2}=m$, we can roughly estimate the number of ways to specify the visits to the root from above by $n^{\sigma/2-1}$. Recalling that $m=\sigma n_{\rm e}(T)$, this yields the bound
\begin{equation}
N_n\leq n^{\sigma/2-1}(\ffrac{1}{2}\sigma+1)^{\sigma n-1}M^{\sigma n}.
\end{equation}
This completes the proof when $\sigma$ is even.
When $\sigma$ is odd, we modify $(V,{\cal E})$ by doubling all edges of charge $\sigma$, i.e., we define $(V,{\cal F})$ with
\begin{equation}
{\cal F}=(\vec F_1,\ldots,\vec F_{\sigma+1}):=(\vec E_1,\ldots,\vec E_\sigma,\vec E_\sigma),
\end{equation}
and next we modify $(V,{\cal F})$ by reversing the direction of all edges of the charges $\lceil\ffrac{1}{2}\sigma\rceil+1,\ldots,\sigma+1$. We can define a walk in the resulting graph $(V,{\cal F}')$ as before and record the charges and spatial increments for each step, as well as the visits to the root. In fact, in order to specify $(V,{\cal E},v_\circ,\psi)$ uniquely up to equivalence, we do not have to distinguish the charges $\sigma$ and $\sigma+1$. Recall that edges of the charges $\sigma$ and $\sigma+1$ result from doubling the edges of charge $\sigma$ and hence always come in pairs, connecting the same vertices. Since sinks do not overlap and since internal vertices of a given charge do not overlap, and since we traverse edges of the charges $\sigma$ and $\sigma+1$ in the direction from the sinks towards the sources, whenever we are about to traverse an edge that belongs to a pair of edges of the charges $\sigma$ and $\sigma+1$, we know whether we have already traversed the other edge of the pair. In view of this, for each pair, we only have to specify the spatial displacement at the first time that we traverse an edge of the pair. Using these considerations, we arrive at the bound
\begin{equation}
N_n\leq n^{\lceil\sigma/2\rceil-1}(\lceil\ffrac{1}{2}\sigma\rceil+1)^{(\sigma+1)n-1}M^{\sigma n}.
\end{equation}
\end{Proof}
\begin{Proof}[of Lemma~\ref{L:expbdcycle}]
The proof goes along the same lines as that of Lemma~\ref{L:expbd} for the case $\sigma$ is even. Observe that for $\sigma=2$, the walk visits the root 0 twice: $k_0=0, k_1=m$. Thus $(k_0, k_1)$ is deterministic, and we only need to specify the sequences
\begin{equation}
(s_1,\ldots,s_m),\quad(\delta_1,\ldots,\delta_m).
\end{equation}
The first charge $s_1$ is 1 and after that, in each step, we have the choice to either continue with the same charge or choose charge 2. This means that there are no more than $2^{m-1}$ possible ways to specify the charges $(s_1,\ldots,s_m)$. Once we have done that, by condition~(v) of Definition~\ref{D:strongpres} of what it means for a cycle to be strongly present, we know for each $0<k\leq m$ whether the spatial increment $\delta_k$ is in $A_1(\phi)$ or $A_2(\phi)$. Setting $M_s:=|A_s(\phi)\big|$ $(s=1,2)$, using the fact that $|\vec E_1|=|\vec E_2|=2 n_{\rm e}(T)=m/2$, we see that there are no more than $M_1^{m/2} \cdot M_2^{m/2}$ possible ways to specify $(\delta_1,\ldots,\delta_m)$. This yields the bound
\begin{equation}
N_n\leq 2^{2n-1}M_1^{n} \cdot M_2^{n}.
\end{equation}
\end{Proof}
\subsection{Finiteness of the Peierls sum}\label{S:finP}
In this subsection, we prove Proposition~\ref{P:Peifin} about the presence of a large contour. As a direct consequece of this proposition, we obtain Lemma~\ref{L:Peifin} which says that for an eroder, finiteness of the Peierls sum in (\ref{Peierls}) suffices to conclude that the intensity of the upper invariant law is positive. We also prove a stronger version of Proposition~\ref{P:Peifin}, where we show the strong presence of a Toom contour in which all sources are forks.
\medskip
\begin{Proof}[of Proposition~\ref{P:Peifin}]
Recall the definition of the modified collection of monotonic maps ${\bm{\phh}}^{(r)}$ in~\eqref{eq:modifiedbooleanmaps}. Let $\overline x^{(r)}$ denote the maximal trajectory of ${\bm{\phh}}^{(r)}$. For each integer $q\geq 0$, let $C_q:={\rm Conv}(\{qj_1,\ldots,qj_\sigma\})$. Then
\begin{equation}
C_{q+1}=\big\{i+j_s:i\in C_q,\ 1\leq s\leq\sigma\big\}\qquad(q\geq 0).
\end{equation}
Using this, it is easy to see by induction that our assumption that $\overline x^{(r)}_{-r}(i)=0$ for all $i\in C_r$ implies that $\overline x^{(r)}_{-q}(i)=0$ for all $i\in C_q$ and $0\leq q\leq r$. In particular, this holds for $q=0$, so $\overline x^{(r)}_0(0)=0$.
Using this, it is straightforward to adapt the proof of Lemma~\ref{L:explan} and show that there is an explanation graph $(U,{\cal H})$ for $(0,0)$ present in ${\bm{\phh}}^{(r)}$ which has the additional properties:
\begin{itemize}
\item $\big\{i\in{\mathbb Z}^d:(i,-q)\in U\big\}=C_q$ $(0\leq q\leq r)$,
\item $\big((i,-q),(i+j_s,-q-1)\big)\in\vec H_s$ $(0\leq q<r,\ i\in C_q)$.
\end{itemize}
In particular, these properties imply that
\begin{itemize}
\item $t\leq-r$ for all $(i,t)\in U_\ast$.
\end{itemize}
Theorem~\ref{T:contex} tells us that there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in ${\bm{\phh}}^{(r)}$ with the additional properties that $\psi(V)\subset U$, $\psi(V_\ast)\subset U_\ast$, and $\psi(\vec E_s)\subset\psi(\vec H_s)$ for all $1\leq s\leq\sigma$. This immediately implies that $\psi_{d+1}(v)\leq-r$ for all $v\in V_\ast$.
To see that the Toom contour can be chosen such that moreover $\psi_{d+1}(v)\leq 1-r$ for all $v\in V_\circ\backslash\{v_\circ\}$, we have to look into the proof of Theorem~\ref{T:contex}. In Subsection~\ref{S:match} we defined an equivalence relation $\sim$ on the set of vertices $U$ of an explanation graph $(U,{\cal H})$. In Lemma~\ref{L:Ctree}, we showed that the set of all equivalence classes has the structure of a directed tree. If we draw time downwards, then the root of this tree lies below. In the proof of Proposition~\ref{P:match}, we constructed a Toom matching for $(U,{\cal H})$ with the property that except for the root, all other polars lie at a level above the last level where the tree still consisted of a single equivalence class. Finally, in the proof of Theorem~\ref{T:contex}, we used these polars to construct sources that lie at most one level below the corresponding polar. The upshot of all of this is that in order to show that $\psi_{d+1}(v)\leq 1-r$ for all $v\in V_\circ\backslash\{v_\circ\}$, it suffices to show that the set of vertices $\{(i,t)\in U:t=1-r\}$ forms a single equivalence class as defined in Subsection~\ref{S:match}.
To see that this indeed is the case, call two points $i=(i_1,\dots,i_\sigma),j=(j_1,\dots,j_\sigma)\in C_{r-1}$ neighbours if there exist $1\leq s_1,s_2\leq\sigma$ with $s_1\neq s_2$ such that $i_{s_1}=j_{s_1}-1$, $i_{s_2}=j_{s_2}+1$, and $i_s=j_s$ for all $s\in\{1,\ldots,\sigma\}\backslash\{s_1,s_2\}$. Define $k\in C_r$ by $k_{s_1}=j_{s_1}$, $k_{s_2}=j_{s_2}+1$, and $k_s=j_s$ for all other $s$. Then $\big((i,1-r),(k,-r)\big)\in\vec H$ and $\big((j,1-r),(k,-r)\big)\in\vec H$ which proves that $(i,1-r)\approx(j,1-r)$. Since any two points in $C_{r-1}$ are connected by a path that in each step moves from a point to a neighbouring point, this shows that $\{(i,t)\in U:t=1-r\}$ forms a single equivalence class.
To complete the proof, we need to show that if $\sigma=2$, then we can construct the Toom contour so that in addition it is strongly present in ${\bm{\phh}}^{(r)}$. We use the same explanation graph $(U,{\cal H})$ for $(0,0)$ with properties (i)--(iii) as above. Theorem~\ref{T:strex} now tells us that there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ strongly present in ${\bm{\phh}}^{(r)}$ with the additional properties that $\psi(V)\subset U$, $\psi(V_\ast)\subset U_\ast$, and $\psi(\vec E_s)\subset\psi(\vec H_s)$ for all $1\leq s\leq\sigma$. This again immediately implies that $\psi_{d+1}(v)\leq-r$ for all $v\in V_\ast$, so again it remains to show that the Toom contour can be chosen such that moreover $\psi_{d+1}(v)\leq 1-r$ for all $v\in V_\circ\backslash\{v_\circ\}$.
\begin{figure}[htb]
\begin{center}
\inputtikz{large}
\caption{The Toom cycle $\psi$ described in the proof of Proposition~\ref{P:Peifin}.}
\label{fig:large}
\end{center}
\end{figure}
To see that this is the case, we have to look into the proof of Theorem~\ref{T:strex}. Instead of starting the inductive construction with the trivial Toom cycle of length zero, we claim that it is possible to start with a Toom cycle $\psi$ of length $4r$ for which all sources except the root have the time coordinate $1-r$ and all sinks have the time coordinate $-r$. Since the process of exploration and loop erasion will then only create new sources with time coordinate $-r$ or lower, the claim then follows. A Toom cycle $\psi$ with the described properties is drawn in Figure~\ref{fig:large}. More formally, this cycle has the following description. Starting from $(0,0)$, it first visits the points $(-k,kj_1)$ with $k=1,\ldots,r$. Next, it alternatively visits the points $(1-r,(r-k)j_1+(k-1)j_2)$ and $(-r,(r-k)j_1+kj_2)$ with $k=1,\ldots,r$. Finally, it visits the points $(k-r,(r-k)j_2)$ with $k=1,\ldots,r$, ending in $(0,0)$, where it started.
\end{Proof}
\begin{proposition}[Large contours with forks only]
Proposition~\ref{P:Peifin}\label{P:finfork} can be strengthened in the sense that all sources $v\in V\backslash\{v_\circ\}$ are forks.
\end{proposition}
\begin{Proof}
A Toom contours with two charges that is strongly present in $\Phi^{(r)}$ automatically has the property that all sources $v\in V\backslash\{v_\circ\}$ are forks, because of condition~(vi) of Definition~\ref{D:strongpres}. Thus, it suffices to prove the claim for Toom contours with three or more charges. In this case, as pointed out in the proof of Proposition~\ref{P:Peifin}, the fact that all sources $v\in V\backslash\{v_\circ\}$ are forks is an automatic result of the construction used in the proof of Theorem~\ref{T:contex}. Since we used this same construction in the proof of Proposition~\ref{P:Peifin}, the contour constructed there also has this property.
\end{Proof}
\begin{Proof}[of Lemma~\ref{L:Peifin}]
Let
\begin{equation}
{\cal T}'_{0,r}:=\big\{(V,{\cal E},v_\circ,\psi)\in{\cal T}'_0:\psi_{d+1}(v)\leq-r
\mbox{ for all }v\in V_\ast\big\}.
\end{equation}
By assumption, $\displaystyle\sum_{T\in{\cal T}'_0}p^{n_\ast(T)}<\infty$, so we can choose $r$ sufficiently large such that
\begin{equation}
\varepsilon:=\sum_{T\in{\cal T}'_{0,r}}p^{n_\ast(T)}<1.
\end{equation}
Fix $j_s\in A_s(\phi)$ $(1\leq s\leq\sigma)$ and set $\Delta_r:={\mathbb Z}^d\cap{\rm Conv}(\{rj_1,\ldots,rj_\sigma\})$. Then Proposition~\ref{P:Peifin} allows us to estimate
\begin{equation}
\P\big[\overline X_{-r}(i)=0\ \forall i\in\Delta_r\big]
\leq\sum_{T\in{\cal T}'_{0,r}}\P\big[T\mbox{ is present in }\Phi^{(r)}\big]\leq\varepsilon,
\end{equation}
where in the last step we have used that $\psi_{d+1}(v)\leq-r$ for all $v\in V_\ast$ and hence all sinks of $V$ must be mapped to space-time points $(i,t)$ where $\Phi^{(r)}_{(i,t)}=\Phi_{(i,t)}$. By translation invariance,
\begin{equation}
\P\big[\overline X_{-r}(i)=1\mbox{ for some }i\in\Delta_r\big]
\leq\sum_{i\in\Delta_r}\P\big[\overline X_{-r}(i)=1\big]
=|\Delta_r|\P\big[\overline X_0(0)=1\big].
\end{equation}
Combining this with our previous formulas, we see that
\begin{equation}
\overline\rho(p)=\P\big[\overline X_0(0)=1\big]\geq|\Delta_r|^{-1}(1-\varepsilon)>0.
\end{equation}
For Toom contours with two charges, Proposition~\ref{P:Peifin} guarantees the strong presence of a large Toom contour, so we can argue similarly, replacing ${\cal T}'_0$ by ${\cal T}''_0$.
\end{Proof}
\noindent
\textbf{Remark} In Peierls arguments, it is frequently extremely helpful to be able to draw conclusions based only on the fact that that the Peierls sum is finite (but not necessarily less than one). These sorts of arguments played an important role in \cite{KSS14}, where we took inspiration for Lemma~\ref{L:Peifin}, and can be traced back at least to \cite[Section~6a]{Dur88}.
\section{Cooperative branching and the identity map}\label{S:intbd}
In this subsection, we study the monotone random cellular automaton that applies the maps $\phi^0,\phi^{\rm id}$, and $\phi^{\rm coop,d}$ with probabilities $p,q,r$, respectively. For each $p,r\geq 0$ such that $p+r\leq 1$, let $\overline\rho(p,r)$ denote the intensity of the upper invariant law of the process with parameters $p,1-p-r,r$. For each $0\leq r<1$, there exists a $p_{\rm c}(r)\in[0,1-r]$ such that $\overline\rho(p,r)>0$ for $0\leq p<p_{\rm c}(r)$ and $\overline\rho(p,r)=0$ for $p_{\rm c}(r)<p\leq 1-r$. We give lower bounds on $p_{\rm c}(r)$.
Recall from Subsection~\ref{S:intrins} that we set $\sigma=2$ and for the sets $A_s(\phi_k)$ in (\ref{As}) we make the choices
\begin{equation}\begin{array}{ll}\label{A12}
\displaystyle A_1(\phi^{\rm id}):=A_1,\quad& A_2(\phi^{\rm id}):=A_1,\\[5pt]
\displaystyle A_1(\phi^{\rm coop,d}):=A_1,\quad& A_2(\phi^{\rm coop, d}):=A_2,
\end{array}\ee
with $A_1:=\{0\}$ and $A_2:=\{e_1,\dots, e_d\}$. Let $\Phi=(\Phi_{(i,t)})_{(i,t)\in{\mathbb Z}^3}$ be an i.i.d.\ collection of monotonic maps so that $\P[\Phi_{(i,t)}=\phi^0]=p$, $\P[\Phi_{(i,t)}=\phi^{\rm id}]=q$, and $\P[\Phi_{(i,t)}=\phi^{\rm coop, d}]=r$. We let ${\cal T}_0$ denote the set of Toom contours $(V, \mathcal E, 0, \psi)$ rooted at the origin with respect to the given choice of $\sigma$ and the sets $A_s(\phi_k)$ in~\eqref{coopA12}. Theorem~\ref{T:contour} then implies the Peierls bound
\begin{equation}\label{strPeicoop}
1-\overline\rho \leq \sum_{T\in{\cal T}_0}\P\big[T\mbox{ is strongly present in }\Phi\big].
\end{equation}
In the remainder of this section, we give an upper bound on this expression.
Recall from Subsection~\ref{S:Tcycles} that if we reverse the direction of edges of charge 2, then the Toom graph becomes a directed cycle with edge set $\vec E_1 \cup\cev E_2$. For any set $A\subset{\mathbb Z}^d$, let us write $-A:=\{-i:i\in A\}$. For any $(v, w)\in \vec E_1 \cup\cev E_2$ we say that~$\psi\big((v, w)\big)$ is
\begin{enumerate}
\item \emph{outward}, if $\psi_3(w)=\psi_3(v)-1$ and $\vec\psi(w)-\vec\psi(v)\in A_2$,
\item \emph{upward}, if $\psi_3(w)=\psi_3(v)-1$ and $\vec\psi(w)-\vec\psi(v)\in A_1$,
\item \emph{inward}, if $\psi_3(w)=\psi_3(v)+1$ and $\vec\psi(w)-\vec\psi(v)\in -A_2$,
\item \emph{downward}, if $\psi_3(w)=\psi_3(v)+1$ and $\vec\psi(w)-\vec\psi(v)\in -A_1$.
\end{enumerate}
The use of the words ``upward'' and ``downward'' are inspired by our habit of drawing negative time upwards in pictures. As $|A_2|=d$, we distinguish $d$ types of outward and inward edges: we say that~$\psi\big((v, w)\big)$ is type $i$, if $|\vec\psi(w)-\vec\psi(v)|=e_i$. Our definitions in (\ref{A12}) together with Definitions~\ref{D:present} and \ref{D:strongpres} imply that a Toom contour is strongly present in $\Phi$ if and only if the following conditions are satisfied:
\begin{itemize}
\item[{\rm(i)}] $\displaystyle\Phi_{\psi(v)}=\phi^0$ for all $\displaystyle v\in V_\ast$,
\item[{\rm(iia)}] $\displaystyle\Phi_{\psi(v)}\in\{\phi^{\rm id},\phi^{\rm coop, d}\}$ for all $\displaystyle v\in V_1\cup V_2\cup \{v_\circ\}$,
\item[{\rm(iib)}] $\displaystyle\Phi_{\psi(v)}=\phi^{\rm coop, d}$ for all $\displaystyle v\in V_\circ\backslash\{v_\circ\}$,
\item[{\rm(iiia)}] If $(v, w)\in \vec E^\ast_1$, then $\psi\big((v, w)\big)$ is upward,
\item[{\rm(iiib)}] If $(v, w)\in \cev E^\ast_2$, then $\displaystyle\left\{\begin{array}{ll}\psi\big((v, w)\big)\mbox{ is downward }\quad\mbox{if }\Phi_{\psi(w)}=\phi^{\rm id},\\ \psi\big((v, w)\big)\mbox{ is inward }\quad\mbox{if }\Phi_{\psi(w)}=\phi^{\rm coop, d},\end{array}\right.$
\item[{\rm(iva)'}] If $(v, w)\in \vec E^\circ_1$, then $\psi\big((v, w)\big)$ is outward,
\item[{\rm(ivb)'}] If $(v, w)\in \cev E^\circ_2$, then $\psi\big((v, w)\big)$ is downward,
\end{itemize}
where $\vec E^\circ_i$ and $\vec E^\ast_i$ are defined in~\eqref{Ecirc}.
If $(V, {\cal E}, v_\circ, \psi)$ is a Toom contour rooted at 0 that is strongly present in $\Phi$, then we can fully specify $\psi$ by saying for each $(v, w)\in \vec E_1\cup\cev E_2$ whether $\psi\big((v, w)\big)$ is upward, downward, inward or outward, and its type in the latter two cases.
In other words, we can represent the contour by a word of length $n$ consisting of the letters from the alphabet $\{o_1,\dots,o_d,u,d,i_1,\dots,i_d\}$, which represents the different kinds of steps the cycle can take.
Then we obtain a word consisting of the letters $o_1,\dots,o_d,u,d,i_1,\dots,i_d$ that must satisfy the following rules:
\begin{itemize}
\item Each outward step must be immediately preceded by a downward step.
\item Between two occurrences of the string $do_\cdot$, and also before the first occurrence of $do_\cdot$ and after the last occurrence, we first see a string consisting of the letter $u$ of length $\geq 0$, followed by a string consisting of the letters $d,i_1,\dots, i_d$, again of length $\geq 0$.
\end{itemize}
So, for example the contour in the middle of Figure~\ref{fig:minexpl} is described by the following word:
\begin{equation}
\underbrace{uuuu}\underbrace{do_1}\underbrace{do_1}\underbrace{uu}\underbrace{do_2}\underbrace{do_2}\underbrace{di_1}\underbrace{di_1}\underbrace{do_1}\underbrace{do_1}\underbrace{di_1}\underbrace{di_1}\underbrace{di_2}\underbrace{di_2}.
\end{equation}
We call a sequence of length $\geq 0$ of consecutive downward/upward steps a downward/upward segment. We can alternatively represent $\psi$ by a word of length $n$ consisting of the letters from $\{o_1,\dots,o_d,U,D,i_1,\dots,i_d, i^\circ_1,\dots,i^\circ_d\}$, where $U$ and $D$ represent upward and downward segments. Let us for the moment ignore the $\circ$ superscripts.
Then we can obtain a word consisting of these letters that must satisfy the following rules:
\begin{itemize}
\item Each outward step must be immediately preceded by a downward segment of length $\geq 1$ and followed by an upward segment of length $\geq 0$.
\item The first step is an upward segment.
\item Between two occurrences of the string $Do_\cdot U$, and also before the first and after the last occurrence, we see a sequence of the string $Di_\cdot$ of length $\geq 0$.
\item The last step is a downward segment.
\end{itemize}
We add the superscript $\circ$ to each inward step whose endpoint overlaps with the image of a source other than the root already visited by the cycle in one of the previous steps.
For any Toom contour $T$ denote by $W(T)$ the corresponding word satisfying these rules. The structure of such a representation of a contour becomes more clear if we indicate the vertices in $V_1,V_2,V_\ast$, and $V_\circ$ with the symbols $1,2,\ast,\circ$, respectively. Then the contour in the middle of Figure~\ref{fig:minexpl} is described by the following word:
\begin{equation}\label{eq:discreteword}
\accentset{\circ}{|} U \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_1 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_1 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_2 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_2 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{2}{|} i_1} \accentset{2}{|} \underbrace{D\accentset{2}{|} i_1} \accentset{2}{|} \underbrace{D\accentset{\circ}{|} o_1 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_1 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{2}{|} i^\circ_1} \accentset{2}{|} \underbrace{D\accentset{2}{|} i_1} \accentset{2}{|} \underbrace{D\accentset{2}{|} i_2} \accentset{2}{|} \underbrace{D\accentset{2}{|} i_2}
\accentset{2}{|} D\accentset{\circ}{|}.
\end{equation}
Finally, let $l^+(T), l^-(T)$ and $l^{-,\circ}(T)$ denote the vectors containing the lengths of the upward segments, downward segments followed by $o_\cdot$ or $i_\cdot$ and downward segments followed by $i^\circ_\cdot$ respectively in the order we encounter them along the cycle. For the example above we have:
\begin{equation}\begin{aligned}
l^+(T)=&(4,0,2,0,0,0,0),\\
l^-(T)=&(1,1,1,1,0,0,1,1,0,0,0,0,0),\\
l^{-,\circ}(T)=&(0).
\end{aligned}
\end{equation}
\begin{claim}\label{claim:downwardlength}
Let $T$ be a Toom contour strongly present in $\Phi$ rooted at 0. Then $W(T), l^+(T)$ and $l^-(T)$ uniquely determine $(V, {\cal E}, 0, \psi)$.
\end{claim}
\begin{Proof}
Knowing the word describing $T$ together with the lengths of all upward and downward segments uniquely determines the contour, so it is enough to show that $W(T), l^+(T)$ and $l^-(T)$ determines $l^{-,\circ}(T)=(l_1, \dots, l_j)$ $(j\geq 0)$.
Assume we know $l_1,\dots, l_i$ for some $0\leq i<j$. We then know the length and type of each step along the cycle up to the downward segment corresponding to $l_{i+1}$, that is we know the coordinates of its starting point. This downward segment ends at a charge 2 internal vertex, and the consecutive step is inward ending at a source other than the root already visited by the cycle. The cycle enters each such source by a downward step and leaves it by an outward step, hence by the structure of the explanation graph the endpoints of this outward step must coincide with the endpoints of the inward step following the downward segment with length $l_{i+1}$. As each outward step is followed by an upward segment, the starting point of the consecutive upward segment must be the endpoint of our downward segment. The endpoint of every upward segment is a defective site, and each site along a downward segment (except maybe its endpoints) is an identity site, so this upward segment must contain every site of our downward segment. Furthermore, by (iii) of Definition~\ref{def:embedding} of an embedding there cannot be any other upward segment that overlaps with this downward segment. Therefore, given the starting coordinates of our downward segment, we check which upward segment visited these coordinates previously, and we let $l_{i+1}$ be the distance between the starting points of this upward segment and our downward segment.
\end{Proof}
By a small abuse of notation, let us also use the letters $o,i$ to indicate the number of times the symbols $o, i$ occur in our representation of the contour (regardless of the sub- and superscripts). As our contour is a cycle starting and ending at 0, we must have the same number of inward and outward steps, furthermore, the total lengths of upward and downward segments must be equal as well:
\begin{equation}\label{eq:inoutward}
o=i\quad\mbox{and}\quad \|l^+(T)\|_1 =\| l^-(T)\|_1+\|l^{-,\circ}(T)\|_1.
\end{equation}
We observe that each source (other than the root) is followed by an outward step, thus
\begin{equation}\label{eq:inwarddefective}
|V_\circ|=|V_\ast|=i+1.
\end{equation}
Finally, in the representation $W(T)$ of a contour the first and last step is $U$ and $D$ respectively, and in between $i$ strings of $DoU$ alternate with $i$ strings of $Di$. Thus, letting $0\leq j\leq i$ denote the number of inward steps with the superscript $\circ$ and using \eqref{eq:inoutward} we have
\begin{equation}
l^+(T) \in \big({\mathbb Z}^+\cup\{0\}\big)^{i+1}, \quad l^-(T)\in \big({\mathbb Z}^+\cup\{0\}\big)^{2i-j+1},\quad l^{-,\circ}(T)\in \big({\mathbb Z}^+\cup\{0\}\big)^{j}.
\end{equation}
Let $W(i,j)$ denote the number of different words that have $i$ inward steps and $j$ inward steps with the superscript $\circ$ made from the alphabet $\{o_1,\dots,o_d,U,D,i_1,\dots,i_d, i^\circ_1,\dots,i^\circ_d\}$ that satisfy our rules.
\begin{claim}\label{claim:Wij}
For all $0\leq i,\; 0\leq j\leq i$ we have
\begin{equation}\label{eq:Wij}
W(i,j)\leq \binom{2i}{i}\binom{i}{j}d^{2i-j}.
\end{equation}
\end{claim}
\begin{Proof}
In any $\mathcal W\in W(i, j)$ the first and last step is $U$ and $D$ respectively, and in between $i$ strings of $DoU$ alternate with $i$ strings of $Di$. Thus (ignoring the super- and subscripts) we can arrange these strings in $\binom{2i}{i}$ possible ways. We then choose $j$ inward steps to which we add the superscript $\circ$, this can be done in $\binom{i}{j}$ ways. Finally, we can assign the $o$'s and $i$'s subscripts $1,\dots, d$ one by one. As we have seen in the proof of Claim~\ref{claim:downwardlength}, an inward step with the superscript $\circ$ overlaps with an outward step previously visited by the cycle, so the type of this inward step is the same as the type of that outward step. Hence we can assign the types of $o$'s and $i$'s in $d^{2i-j}$ different ways.
\end{Proof}
\begin{claim}\label{claim:wordprob}
Let $\mathcal W\in W(i, j)$ for some $0\leq i,\; 0\leq j\leq i$. Then
\[
\sum_{T\in \mathcal T_0: W(T)=\mathcal W}\P\big[T\mbox{ is strongly present in }\Phi\big]\leq\binom{3i-j}{i} p^{i+1}r^{2i-j}\left(\frac 1 {1-q} \right)^{3i-j+1}.
\]
\end{claim}
Using $q=1-p-r$ and Claim~\ref{claim:Wij} we can estimate the Peierls sum in (\ref{strPeicoop}) from above by
\begin{equation}\label{Prq}
\begin{aligned}
\sum_{i=0}^\infty\sum_{j=0}^i W(i,j) \binom{3i-j}{i} p^{i+1}r^{2i-j}\left(\frac 1 {1-q} \right)^{3i-j+1}\\
< \frac {p} {p+r}\sum_{i=0}^\infty \left(\frac {16dpr\big((2d+1)r+p\big)} {(p+r)^3} \right)^{i}.
\end{aligned}\end{equation}
For any fixed $r$ this sum is finite as soon as $p<\big(\sqrt{(d+0.5)^2+1/(16d)}-d-0.5\big)r$. In particular for $d=2$ we obtain the following bound on the critical parameter
\[p_c(r)> 0.00624 r.\]
\begin{Proof}[Proof of Claim~\ref{claim:wordprob}] The idea of the proof is similar to that of Lemma 9 in~\cite{GG82}.
As the Toom cycle $T$ is strongly present in $\Phi$, each sink is mapped to a defective site, and each inward step ends and each outward step starts at a site where the cooperative branching map is applied. The definition of an embedding entails that sinks do not overlap, so using \ref{eq:inwarddefective} they contribute to a factor $p^{i+1}$. To estimate the contribution of the in- and outward steps, we need to recall the construction of the Toom cycle in Section~\ref{S:Tcycles}. We inductively add edges to the cycle by exploring its previously unexplored sites one by one. At an exploartion step, starting at the site we are exploring, an upward, a downward, an outward and an inward step is added in this order. Although during the loop erasion some of these steps might be erased, their relative order in the cycle does not change and the site is not visited again in later iterations. Therefore, each site is the starting point of at most one outward step and the endpoint of at most one inward step, and if both steps are present, the outward step is always visited first by the cycle. As outward steps start at a source, there are $i$ inward and outward steps and $j$ inward steps with the superscript $\circ$, we have that these steps contribute to a factor $r^{2i-j}$. Finally, the strong presence of $T$ implies that every downward step, except for the ones ending at a source other than the root, ends at a site where the identity map is applied. ~\ref{eq:inoutward} then yields that downward segments contribute to a factor $q^{\|l^+(T)\|_1-i}$. Let
\begin{equation}
\mathcal L(\mathcal W):=\{(l^+(T), l^-(T)): W(T)=\mathcal W\}
\end{equation}
Recall that by Claim~\ref{claim:downwardlength} $W(T)=\mathcal W, l^+(T)$ and $l^-(T)$ uniquely specify the Toom contour $T$. We then have
\begin{equation}
\sum_{T\in \mathcal T_0: W(T)=\mathcal W}\P\big[T\mbox{ is strongly present in }\Phi\big]\leq p^{i+1}r^{2i-j} \sum_{(l^+, l^-)\in \mathcal L(\mathcal W)}q^{\|l^+\|_1-i}.
\end{equation}
It remains to show that
\begin{equation}\label{eq:endproof}
q^{-i}\sum_{(l^+, l^-)\in \mathcal L(\mathcal W)}q^{\|l^+\|_1}\leq\binom{3i-j}{i}\left(\frac 1 {1-q} \right)^{3i-j+1}.
\end{equation}
From now on, we will omit the last coordinate of $l^-$. As we have seen in the proof of Claim~\ref{claim:downwardlength}, to determine the lengths in $l^{-,\circ}$ it is enough to know the type and length of each step along the cycle up to the corresponding downward step. Therefore, when the cycle visits the last downward segment, the length of every other down- and upward segment is already known. By~\eqref{eq:inoutward} we then have $l^-_{2i-j+1}=\|l^+\|_1-\|l^{-,\circ}\|_1-l^-_1-\dots-l^-_{2i-j}$. By a small abuse of notation we will denote $l^-=(l^-_1,\dots, l^-_{2i-j})$ and $l^+=(l^+_1, \dots, l^+_{i+1})$.
Given~$l^-$ and $l^+$ we merge all the lengths into a single vector in a certain order, that is we inductively construct two vectors $k\in \big(\mathbb Z^+\cup \{0\}\big)^{3i-j+1}$ and $k^\pm\in\{1,-1\}^{3i-j+1}$ in the following way.
We let $K_0=k_0^+=k_0^-=0$ and for each $1\leq s< 3i-j+1$
\begin{itemize}
\item if $l^-_{s-1}-l^+_{s-1}> K_{s-1}$ or $l^-_{s-1}-l^+_{s-1}= K_{s-1} < 0$, then
\[k_s:=l^+_{k^+_{s-1}+1}, \quad k^\pm_s:=1, \quad k^+_s:=k^+_{s-1}+1, \quad k^-_s:=k^-_{s-1},\]
\item otherwise
\[k_s:=l^-_{k^-_{s-1}+1}, \quad k^\pm_s:=-1, \quad k^+_s:=k^+_{s-1}, \quad k^-_s:=k^-_{s-1}+1,\]
\end{itemize}
and we let
\[K_s:=K_{s-1}+k_sk^\pm_s.
\]
Finally we let $k:=(k_1,\dots, k_{3i-j+1})$ and $k^\pm:=(k^\pm_1,\dots, k^\pm_{3i-j+1})$. Note that each element $k_s^\pm$ is 1 or -1, depending on whether $k_s$ was chosen from $l^+$ or $l^-$ respectively, furthermore, the vectors $k$ and $k^\pm$ satisfy the property
\begin{equation}\label{eq:kproperty}
K_s\geq0 \quad\mbox{iff}\quad k^\pm_s=1\qquad\forall s.
\end{equation}
Informally, this means that we rearrange the lengths such that every upward step ends at a non-negative height and every downward step ends at a negative height. As $K_{3i-j+1}=\|l^+\|_1-\|l^-\|_1\geq 0$, this implies that $k^\pm_{3i-j+1}=1$, that is the last element of $k$ is an upward length. Let us further denote the sum of upward and downward lengths in $k$ up to coordinate $s$ by
\begin{equation}\begin{aligned}
K_s^+&:=k_1 \mathbbm{1}\{K^\pm_1=1\}+\dots+k_s \mathbbm{1}\{K^\pm_s=1\},\\
K_s^-&:=k_1 \mathbbm{1}\{K^\pm_1=-1\}+\dots+k_s \mathbbm{1}\{K^\pm_s=-1\}.
\end{aligned}\end{equation}
Clearly,~$K_s^+\geq K_{s-1}^+$ and~$K_s^-\geq K_{s-1}^-$ for each $s$. Furthermore,~\eqref{eq:kproperty} implies
\begin{equation}\label{eq:kpmbounds}
\begin{cases}K_{s-1}^-< K_{s-1}^-\leq K_{s}^+, \quad &\text{if } k^\pm_{s-1}=-1, k^\pm_{s}=1,\\
K_{s-1}^-\leq K_{s-1}^+< K_{s}^-, \quad &\text{if } k^\pm_{s-1}=1, k^\pm_{s}=-1.
\end{cases}
\end{equation}
Let $\mathcal K$ denote the set of all pairs of vectors $(k, k^\pm)$ such that $k\in \big(\mathbb Z^+\cup \{0\}\big)^{3i-j+1},k^\pm\in\{1,-1\}^{3i-j+1}$ and that satisfy poperty~\eqref{eq:kproperty}, and let $\mathcal K^\pm$ denote the set of all vectors $k^\pm$ that contain $2i-j$ (-1)'s and $i+1$ 1's such that $k^\pm_{3i-j+1}=1$. We then can further bound
\begin{equation}\label{eq:endproof2}
\sum_{(l^+, l^-)\in \mathcal L(\mathcal W)}q^{\|l^+\|_1}\leq
\sum_{k^\pm\in\mathcal K^\pm}\sum_{k: (k, k^\pm)\in\mathcal K} q^{K^{+}_{3i-j+1}}.
\end{equation}
Let us fix for the moment the vector $k^\pm$ and consider the sum
\begin{equation}\label{eq:k1k2k3}
\sum_{k: (k, k^\pm)\in\mathcal K} q^{K^{+}_{3i-j+1}}=\sum_{k_1\in\mathcal K_1}\dots \sum_{k_{3i-j+1}\in\mathcal K_{3i-j+1}}q^{K^{+}_{3i-j+1}},
\end{equation}
where $\mathcal K_s(k_1,\dots, k_{s-1})$ denotes the set of all the possible $k_s$'s given the first $s-1$ coordinate of $k$. For any $k^\pm_s=1$, we can estimate
\begin{equation}
\sum_{k_{s-1}\in\mathcal K_{s-1}}\sum_{k_{s}\in\mathcal K_s} q^{K^{+}_s}\leq
\begin{cases}\begin{aligned}
\sum_{k_{s-1}\in\mathcal K_{s-1}}\sum_{K_{s}^+=K_{s-1}^+}^\infty q^{K^{+}_s}= \frac{1}{1-q}\sum_{k_{s-1}\in\mathcal K_{s-1}}q^{K^{+}_{s-1}} \quad &\mbox{if }k^\pm_{s-1}=1, \\
\sum_{k_{s+1}\in\mathcal K_{s+1}}\sum_{K_{s}^+=K_{s-1}^-}^\infty q^{K^{+}_s}= \frac{1}{1-q}\sum_{k_{s-1}\in\mathcal K_{s+1}}q^{K^{-}_{s-1}} \quad &\mbox{if }k^\pm_{s-1}=-1,
\end{aligned}
\end{cases}
\end{equation}
by a change of variable and using~$K_s^+\geq K_{s-1}^+$ in the first case and~\eqref{eq:kpmbounds} in the second. Similarly, for any $k^\pm_s=-1$, we can estimate
\begin{equation}
\sum_{k_{s-1}\in\mathcal K_{s-1}}\sum_{k_{s}\in\mathcal K_s} q^{K^{-}_s}\leq
\begin{cases}\begin{aligned}
\sum_{k_{s-1}\in\mathcal K_{s-1}}\sum_{K_{s}^-=K_{s-1}^+}^\infty q^{K^{-}_s}= \frac{1}{1-q}\sum_{k_{s-1}\in\mathcal K_{s-1}}q^{K^{+}_{s-1}} \quad &\mbox{if }k^\pm_{s-1}=1, \\
\sum_{k_{s+1}\in\mathcal K_{s+1}}\sum_{K_{s}^-=K_{s-1}^-}^\infty q^{K^{-}_s}= \frac{1}{1-q}\sum_{k_{s-1}\in\mathcal K_{s+1}}q^{K^{-}_{s-1}} \quad &\mbox{if }k^\pm_{s-1}=-1.
\end{aligned}
\end{cases}
\end{equation}
Finally, if a length $k_s$ with $k^\pm_s=-1$ corresponds to a downward segment ending at a source (of which we have $i$ in total), we have $k_s\geq 1$. Then we can bound $K_s^-\geq K_{s-1}^-+1$ if $k^\pm_{s-1}=-1$, and $K_s^-\geq K_{s-1}^++1$ if $k^\pm_{s-1}=1$, as we have a strict inequality in~\eqref{eq:kpmbounds} in this case. Thus these downward segments will each contribute to an additional factor of $q$.
As $k^\pm_{3i-j+1}=1$, we can repeatedly apply these formulas in~\eqref{eq:k1k2k3} for all $s$ to obtain the upper bound $q^i\big(\frac{1}{1-q}\big)^{3i-j+1}$. Observing that $|\mathcal K^\pm|=\binom{3i-j}{i}$ and using~\eqref{eq:endproof2} we can conclude~\eqref{eq:endproof}.
\end{Proof}
\section{Continuous time}\label{S:cont}
\subsection*{Outline}
In this section, we consider monotone interacting particle systems with a finite collection $\phi_0, \phi_1,\ldots,\phi_m$ of monotonic maps such that $\phi_0=\phi^0$, $\phi_k\neq \phi^{\text{id}}$ for any $1\leq k\leq m$, and a collection of nonnegative rates $r_0, r_1,\ldots,r_m$, evolving according to~\eqref{traj}. We extend the definition of Toom contours to continuous time, and show how to use them to obtain explicit bounds for certain models.
\subsection{Toom contours in continuous time}
Recall Definition~\ref{def:toomgraph} of a Toom graph $(V,\mathcal E)=(V, \vec E_1,\dots,\vec E_\sigma)$ with $\sigma$ charges and the definition of sources, sinks and internal vertices in~\eqref{eq:sourcesinkint}. \textit{Continuous Toom contours} are Toom graphs embedded in space-time $\mathbb Z^d\times \mathbb R$.
\begin{defi}\label{def:contembedding}
A \emph{continuous embedding} of $(V,{\cal E})$ is a map
\begin{equation}\label{psicontin}
V\ni v\mapsto\psi(v)=\big(\vec\psi(v),\psi_{d+1}(v)\big)\in{\mathbb Z}^d\times{\mathbb R}
\end{equation}
that has the following properties:
\begin{enumerate}
\item either $\displaystyle\psi_{d+1}(w)<\psi_{d+1}(v)$ and $\vec\psi(w)=\vec\psi(v)$, or $\displaystyle\psi_{d+1}(w)=\psi_{d+1}(v)$ and $\vec\psi(w)\neq\vec\psi(v)$ for all $(v,w)\in\vec E$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1\in V_\ast$ and $v_2\in V$ with $v_1\neq v_2$,
\item $\psi(v_1)\neq\psi(v_2)$ for each $v_1,v_2\in V_s$ with $v_1\neq v_2$ $(1\leq s\leq\sigma)$,
\item $\psi_{d+1}(v_3)\notin\big(\psi_{d+1}(v_2), \psi_{d+1}(v_1)\big)$ for each $(v_1, v_2)\in \vec E_s, \; v_3\in V_s \cup V_\ast$ with $\vec\psi(v_1)=\vec\psi(v_2)=\vec\psi(v_3)$ $(1\leq s\leq\sigma)$.
\end{enumerate}
\end{defi}
\noindent
We call $\psi((v, w))=(\psi(v), \psi(w))$ a \textit{vertical segment}, if $\displaystyle\psi_{d+1}(w)<\psi_{d+1}(v)$, and a \textit{horizontal segment}, if $\displaystyle\psi_{d+1}(w)=\psi_{d+1}(v)$. Then (i) implies that $\psi(\vec E)$ is the union of vertical and horizontal segments. Property (iv) says that an internal vertex of charge $s$ or a sink is not mapped into a point of a vertical segment in $\psi(\vec E_s)$ $(1\leq s\leq\sigma)$. Note that, unlike in the discrete time case, this definition of an embedding does not imply $|\vec E_1|=\dots=|\vec E_\sigma|$.
\begin{defi}\label{def:conttoomcontour}
A \emph{continuous Toom contour} is a quadruple $(V,{\cal E},v_\circ,\psi)$, where $(V,{\cal E})$ is a connected Toom graph, $v_\circ\in V_\circ$ is a specially designated source, and $\psi$ is a continuous embedding of $(V,{\cal E})$ that has the additional property that:
\begin{enumerate}\addtocounter{enumi}{4}
\item $\psi_{d+1}(v_\circ)>t$ for each $(i,t)\in\psi(V)\backslash\psi(\{v_\circ\})$.
\end{enumerate}
\end{defi}
\noindent
We set
\be\begin{array}{r@{\,}c@{\,}l}
\displaystyle V_\text{vert}&:=&\displaystyle\big\{v\in V: \psi((w, v)) \mbox{ is a vertical segment for some } (w, v)\in\vec E\big\},\\[5pt]
\displaystyle V_\text{hor}&:=&\displaystyle\big\{v\in V: \psi((v, w)) \mbox{ is a horizontal segment for some } (v, w)\in\vec E\big\},
\end{array}\ee
that is $V_\text{vert}$ is the set of vertices in $V$ whose images under~$\psi$ are the endpoints of a vertical segment, and $V_\text{hor}$ is the set of vertices in $V$ whose images under~$\psi$ are the starting points of a horizontal segment.
We let $\P^\mathbf r$ with $\mathbf r=(r_0, \dots, r_m)$ be a probability measure under which we define a family of independent Poisson processes on ${\mathbb R}$:
\begin{equation}\label{eq:poi}
\mathbf P_{i, k}\text{ for } i \in \mathbb Z^d,\; 0\leq k\leq m, \quad \text{each with rate }r_k.
\end{equation}
We regard each $\mathbf P_{i, k}$ as a random discrete subset of ${\mathbb R}$. Note that $\mathbb P^{\mathbf r}$-a.s. these sets are pairwise disjoint. $\mathbf P=\big(\mathbf P_{(i,k)}\big)_{i\in{\mathbb Z}^{d}, 0\leq k\leq m}$ almost surely determines a stationary process $(\overline X_t)_{t\in{\mathbb R}}$ that at each time $t$ is distributed according to the upper invariant law $\overline\nu$.
As in the discrete time case, we need a special construction of this process. Let $\mathcal P=\big(\mathcal P_{i, k}\big)_{i\in{\mathbb Z}^d, 0\leq k\leq m}$ denote a realization of the Poisson processes. We will call a point in $\mathcal P_{i, k} \; (i\in {\mathbb Z}^d)$ a \textit{type k arrival point}, and call type 0 arrival points \textit{defective points}. Furthermore, let $\{0,1\}^{{\mathbb Z}^{d}\times {\mathbb R}}$ denote the space of all space-time configurations $x=(x_t(i))_{i\in{\mathbb Z}^{d}, t\in {\mathbb R}}$. For $x\in\{0,1\}^{{\mathbb Z}^{d}}$ and $t\in{\mathbb R}$, we define $x_t\in\{0,1\}^{{\mathbb Z}^d}$ by $x_t:=(x_t(i))_{i\in{\mathbb Z}^d}$. By definition, a \emph{trajectory} of $\mathcal P$ is a space-time configuration $x$ such that
\begin{equation}
x_t(i)=\begin{cases}
\phi_{k}(\theta_ix_{t-})\qquad &\forall \; 0\leq k\leq m,\; t\in\mathcal P_{i, k}, \\
x_{t-}(i)\qquad&\mbox{otherwise.}
\end{cases}
\quad\big((i,t)\in{\mathbb Z}^{d}\times {\mathbb R}\big)
\end{equation}
We have the following continuous-time equivalents of Lemmas~\ref{L:maxtraj} and~\ref{L:maxup}.
\begin{lemma}[Minimal and maximal trajectories]
Let $\mathcal P$ be a realization of the Poisson processes defined in~\eqref{eq:poi}. Then there exist trajectories $\underline x$ and $\overline x$ that are uniquely characterised by the property that each trajectory $x$ of $\mathcal P$ satisfies $\underline x\leq x\leq\overline x$ (pointwise).
\end{lemma}
\begin{lemma}[The lower and upper invariant laws]
Let $\phi_0,\ldots,\phi_m$ be monotonic functions, let $r_0,\ldots,r_m$ be nonnegative rates, and let $\underline\nu$ and $\overline\nu$ denote the lower and upper invariant laws of the corresponding monotone interacting particle system. Let $\mathbf P=\big(\mathbf P_{(i,k)}\big)_{i\in{\mathbb Z}^{d}, 0\leq k\leq m}$ be a family of independent Poisson processes, each with rate $r_k$, and let $\underline X$ and $\overline X$ be the minimal and maximal trajectories of $\mathbf P$. Then for each $t\in{\mathbb R}$, the random variables $\underline X_t$ and $\overline X_t$ are distributed according to the laws $\underline\nu$ and $\overline\nu$, respectively.
\end{lemma}
\noindent
We omit the proofs, as they go along the same lines as that of the discrete time statements.
From now on, we fix a realization $\mathcal P$ of the Poisson processes such that the sets $\mathcal P_{i,k}$ are pairwise disjoint. Recall the definition of ${\cal A}(\phi_k)$ in (\ref{Aphi}). We fix an integer $\sigma\geq 2$ and for each $1\leq k\leq m$ and $1\leq s\leq\sigma$ we choose a set
\begin{equation}\label{Ascontin}
A_s(\phi_k)\in{\cal A}(\phi_k).
\end{equation}
\begin{defi}\label{def:conttoomcontourpresent}
A continuous Toom contour $(V,{\cal E},v_\circ,\psi)$ with $\sigma$ charges is \emph{present} in the realization of the Poisson processes $\mathcal P=\big(\mathcal P_{i, k}\big)_{i\in{\mathbb Z}^d, 0\leq k\leq m}$ if:
\begin{enumerate}
\item $\displaystyle\psi_{d+1}(v)\in \mathcal P_{\vec\psi(v), 0}$ if and only if $\displaystyle v\in V_\ast$,
\item $\displaystyle\psi_{d+1}(v)\in \cup_{k=1}^m \mathcal P_{\vec\psi(v), k}$ for all $\displaystyle v\in V_\text{hor}\cup (V_\circ\backslash \{v_\circ\})$,
\item $\displaystyle\psi_{d+1}(v)\in \mathcal P_{\vec\psi(v), k}$ for some $1\leq k\leq m$ such that $A_s(\phi_k)\neq \{(0, 0)\}$ for all $\displaystyle v\in V_s\cap V_\text{vert}$ $(1\leq s\leq\sigma)$,
\item $\mathcal P_{\vec\psi(v), k}\cap \big(\psi_{d+1}(w), \psi_{d+1}(v)\big)=\emptyset$ for all $(v,w)\in\vec E_s$ such that $w\in V_\text{vert}$ and for all $1\leq k\leq m$ such that $(0, 0)\notin A_s(\phi_k)$ $(1\leq s\leq\sigma)$,
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in A_s(\phi_k)$ if $\displaystyle\psi_{d+1}(v)\in \mathcal P_{\vec\psi(v), k}$ for some $1\leq k\leq m$, for all $(v, w)\in \vec E^\ast$ with $v\in V_\text{hor}$ $(1\leq s\leq\sigma)$,
\item $\displaystyle\vec\psi(w)-\vec\psi(v)\in\bigcup_{s=1}^\sigma A_s(\phi_{k})$ if $\displaystyle\psi_{d+1}(v)\in \mathcal P_{\vec\psi(v), k}$ for some $1\leq k\leq m$, for all $(v, w)\in \vec E^\circ$,
\end{enumerate}
where $\vec E^\circ$ and $\vec E^\ast$ are defined in~\eqref{Ecirc}.
\end{defi}
\noindent
Condition (i) says that sinks and only sinks are mapped to defective points. Together with condition (iv) of Definition~\ref{def:contembedding} of a continuous embedding this implies that we cannot encounter any defective point along a vertical segment of the contour. Condition (ii) says that vertices in $V_\text{hor}$ and sources (except for the root) are mapped to type $k$ arrival points with $1\leq k\leq m$. As the other endpoint of the horizontal segment is not an arrival point, the consecutive segment must be vertical, furthermore, together with (i) this implies that there cannot be a defective point at either end of a horizontal segment. Condition (iii) says that internal vertices with charge~$s$ in $V_\text{vert}$ are mapped to type $k$ arrival points with $A_s(\phi_k)\neq \{(0,0)\}$. Condition (iv) says that we can only encounter type $k$ arrival points with $(0,0)\in A_s(\phi_k)$ along a vertical segment in $\psi(\vec E_s)$ $(1\leq s\leq\sigma)$. Condition~(v) says that if $\psi((v,w))$ is a horizontal segment such that $v$ is an internal vertex with charge $s$ or the root that is mapped into a type $k$ arrival point ($1\leq k\leq m$), then $(v,w)$ is mapped to a pair of space-time points of the form $\big((i,t),(i+j,t)\big)$ with $j\in A_s(\phi_k)$. Condition~(vi) is similar, except that if $v$ is a source different from the root, then we only require that $j\in\bigcup_{s=1}^\sigma A_s(\phi_{k})$.
Again, we can strengthen this definition for the $\sigma=2$ case.
\begin{defi}\label{def:conttoomcontourstrongpresent}
A continuous Toom contour $(V,{\cal E},v_\circ,\psi)$ with $2$ charges is \emph{strongly present} in the realization of the Poisson processes $\mathcal P=\big(\mathcal P_{i, k}\big)_{i\in{\mathbb Z}^d, 0\leq k\leq m}$ if in addition to conditions (i)--(vi) of Definition~\ref{def:conttoomcontourpresent}, for each $v\in V_\circ\backslash\{v_\circ\}$ and $w_1,w_2\in V$ with $(v,w_s)\in\vec E_{s,{\rm out}}(v)$ $(s=1,2)$, one has:
\begin{enumerate}\addtocounter{enumi}{6}
\item $\displaystyle\vec\psi(w_i)-\vec\psi(v)\in A_{3-i}(\phi_k)$ if $\displaystyle\psi_{d+1}(v)\in \mathcal P_{\vec\psi(v), k}$ for some $1\leq k\leq m$ $(i=1, 2)$,
\item $\vec\psi(w_1)\neq\vec\psi(w_2)$.
\end{enumerate}
\end{defi}
Our aim is to show that $\overline x_0(0)$ implies the existence of a continuous Toom contour rooted at $(0,0)$ present in $\mathcal P$. To that end, we define ``connected components'' of space-time points in state 0, that will play the role of explanation graphs in continuous time. We first define oriented paths on the space-time picture of the process. For each~$t\in \mathcal P_{i, k}$ $(i\in{\mathbb Z}^d, 1\leq k\leq m)$ such that $\overline x_t(i)=0$ place an \textit{arrow} (an oriented edge) pointing from~$(i, t)$ to each~$(j, t)\in A_s(\phi_k)$ such that $\overline x_t(j)=0$ ($1\leq s\leq\sigma$). It is easy to see that we place at least one arrow pointing to each set~$A_s(\phi_k)$, otherwise site $i$ would flip to state 1 at time $t$. Furthermore, for each~$t\in\mathcal P_{i, 0}$ place a \textit{death mark} at~$(i, t)$. A \textit{path} moves in the decreasing time direction without passing through death marks and possibly jumping along arrows in the direction of the arrow. More precisely, it is a function $\gamma: [t_1,t_2] \to \mathbb Z^d$ which is left continuous with right limits and satisfies, for all $t \in (t_1, t_2)$,
\[
\begin{aligned}
t &\notin \mathcal P_{\gamma(t), 0} \qquad \text{ and }\\ \gamma(t) &\neq \gamma(t+) \text{ implies } t \in \mathcal P_{\gamma(t),k}, \gamma(t+)-\gamma(t)\in A_s(\phi_k)\text{ and } \overline x_t(\gamma(t+))=0\\ & \qquad\text{ for some } 1\leq k\leq m, \; 1\leq s\leq\sigma.
\end{aligned}
\]
We say that two points $(i, t), (j, s)$ with $t > s$ are connected by a path if there exists a path $\gamma: [s, t] \to\mathbb Z^d$ with $\gamma(t) = i$ and $\gamma(s) = j$. Define
\begin{equation}\label{eq:gamma}
\Gamma_{(i, t)}:=\{(j, s): (i, t) \mbox{ and }(j,s) \mbox{ are connected by a path} \}
\end{equation}
and $\Gamma^T_{(i, t)}:=\Gamma_{(i, t)}\cap {\mathbb Z}^d\times [t-T, t]$.
If $\overline x_{0}(0)=0$, then by the definition of the paths and arrows we have $\overline x_s(j)=0 $ for all $(j, s)\in \Gamma_{(0,0)}$.
\begin{theorem}[Presence of a continuous Toom contour]\label{T:contcontour}
Let $\phi_0, \dots, \phi_m$ be monotonic functions where $\phi_0=\phi^0$ is the constant map that always gives the outcome zero, and let $r_0, \dots, r_m$ be nonnegative rates.
Let $\mathcal P$ be a realization of the Poisson processes defined in~\eqref{eq:poi}, and denote its maximal trajectory by~$\overline x$.
Let $\sigma\geq 2$ be an integer and for each $1\leq s\leq\sigma$ and $1\leq k\leq m$, let $A_s(\phi_k)\in{\cal A}(\phi_k)$ be fixed. Then, if $\Gamma^T_{(0,0)}$ is bounded for all $T>0$, $\overline x_0(0)=0$ implies that with respect to the given choice of $\sigma$ and the sets $A_s(\phi_k)$, there is a continuous Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in $\mathcal P$ for $\sigma\geq 2$, and strongly present in $\mathcal P$ for $\sigma=2$.
\end{theorem}
The monotone interacting particle systems we consider here have the property that $\Gamma^T_{(0,0)}$ is bounded for all $T>0$ (see for example Chapter 4 of the lecture notes~\cite{Swart17}), if
\begin{equation}\begin{aligned}
&\sum_{k=0}^m r_k<\infty, \\
&\sum_{k=0}^m r_k \left(| \cup_{A\in\mathcal A} A| -1\right)<\infty.
\end{aligned}
\end{equation}
\begin{Proof}
As~$\Gamma^T_{(0,0)}$ is bounded, the set~$\Gamma^T_{(0, 0)}\cap \big(\cup_{i\in\mathbb Z^d, 0\leq k\leq m}\mathcal \{i\}\times \mathcal P_{i, k}\big)$ is finite for all $T>0$, therefore we can order the arrival points in ~$\Gamma_{(0,0)}$ in decreasing order. Denote by~$(i_l, t_l)$ its elements with $0 \geq t_1 > t_2 >\dots$, and let $t_0:=0$. We define a monotonic flow ${\bm{\phh}}$ in ${\mathbb Z}^{d+1}$ as follows. For all $(i, t)\in{\mathbb Z}^{d+1}$ we let
\begin{equation}\label{eq:discretemaps}
\varphi_{(i,t)}:=\left\{\begin{array}{ll}
\phi_k\quad&\mbox{if } (i, t)=(i_l, -2l) \mbox{ for some } t_l\in \mathcal P_{i_l, k} \quad(0\leq k\leq m),\\[5pt]
\phi^{\text{id}}&\mbox{otherwise,}
\end{array}\right.
\end{equation}
where~$\phi^{\text{id}}$ is the identity map defined in \eqref{phiid}. Denoting by $\overline x'$ the maximal trajectory of this monotonic flow, it is easy to see that~$\overline x'_0(0)=0$, thus Theorem \ref{T:contour} implies the existence of a Toom contour $(V', \mathcal E', v'_\circ, \psi')$ rooted at $(0, 0)$ present in ${\bm{\phh}}$ with respect to the given choice of $\sigma$ and the sets $A_s(\phi_k)$. We use this discrete-time contour to define the continuous-time one. For all $v'\in V$ such that $\psi'(v)=(i, -l)$ we let
\begin{equation}\label{eq:contpsi}
\psi(v):=\left\{\begin{array}{ll}
\psi(w_1) &\mbox{if } \exists w_1, w_2: (w_1, v), (v, w_2)\in \vec E' \mbox{ and } \vec\psi'(w_1)=\vec\psi'(w_2)= \vec\psi'(v),\\[5pt]
(i, t_{\lceil l/2\rceil})\quad&\mbox{otherwise.}
\end{array}\right.
\end{equation}
Recall that for $v, w\in V'$, we write $v\leadsto_{\vec E'}w$ when we can reach $w$ from $v$ through directed edges of $\vec E'$. We define
\begin{equation}
\mathcal W(v):=\{w\in V': v\leadsto_{\vec E'}w \mbox{ and } \psi(w)=\psi(v)\} \quad \forall v\in V'.
\end{equation}
Note that $\mathcal W(v)=\{v\}$ for all $v\in V'_\ast$. Set $V:=\cup_{s=1}^\sigma V_s\cup V_\circ\cup V_\ast$ with
\begin{equation}\label{eq:conttoomgraph}
\begin{array}{l}
V_\circ:=\{\mathcal W(v): v\in V'_\circ\},\\[5pt]
V_\ast:=\{\mathcal W(v): v\in V'_\ast\},\\[5pt]
V_s:=\{\mathcal W(v): v\in V'_s\setminus \cup_{w\in V'_\circ}\mathcal W(w)\} \quad(1\leq s\leq\sigma).
\end{array}
\end{equation}
For all $W\in V$ we let $\psi(W):=\psi(w)$ for some $w\in W$. We further define
\begin{equation}
\vec E_s:=\{(W_1, W_2)\in V\times V:\exists w_i\in W_i \mbox{ such that }(w_1, w_2)\in \vec E'_s\} \quad (1\leq s\leq \sigma).
\end{equation}
Letting $v_\circ$ be the set $W\subset V$ containing $v'_\circ$, we claim that $(V, \mathcal E, v_\circ, \psi)$ is a continuous Toom contour rooted at $(0, 0)$ present in $\mathcal P$ for $\sigma\geq 2$, and strongly present in $\mathcal P$ for $\sigma= 2$. (See Figure~\ref{fig:contcontour} for an example of the construction.)
\begin{figure}[htb!]
\begin{center}
\includegraphics[height=12cm]{figures/contcontour2.pdf}
\caption{Top left: A realization of $\mathbf{P}$ that applies the maps $\phi^0$ and $\phi^{\rm coop}$ with rates $r_0$ and $r_1$ respectively. The points marked with a star are defective, ensuring that the origin (0,0,0) is in state 0. The connected component $\Gamma_{(0,0,0)}$ of the origin is marked by black. Right: The monotone cellular automaton ${\bm{\phh}}$ defined in~\eqref{eq:discretemaps} and the corresponding Toom contour rooted at (0,0,0). The sites marked with a star and open dot apply $\phi^0$ and $\phi^{\rm coop}$ respectively, every other site applies the identity map. The origin in state zero. Middle: The Toom graph corresponding to the Toom contour on the right. The green sets correspond to the vertices of the Toom graph of the continuous contour, defined in~\eqref{eq:conttoomgraph}. Bottom left: The Toom contour corresponding to the realization of $\mathbf{P}$ on the top left.}
\label{fig:contcontour}
\end{center}
\end{figure}
Let us start with some simple observations. By definition, in ${\bm{\phh}}$ at each height $-2l \; (1\leq l\leq n)$ there is exactly one site $(i, -2l)$ such that $\varphi_{(i, -2l)}\neq \phi^{\text{id}}$, every other site of $\mathbb Z^{d+1}$ applies identity map. By the construction of the Toom contour a site with the identity map cannot be the image of a source, furthermore any edge in $\psi'(\vec E')$ starting at such a site is vertical. Any edge starting at a site with $\phi^k \; (1\leq k\leq m)$ has the form $\big((i, t), (j, t-1)\big)$ for some $t\in 2\mathbb Z, i, j\in\mathbb{Z}^d$. We call these edges diagonal, if $i\neq j$. Thus, $\psi'(\vec E')$ is the union of vertical and diagonal edges, such that each diagonal edge points from an even height to an odd height. Furthermore, as $\varphi_{\psi'(v)}=\phi^0$ for all $v\in V'_\ast$, each sink is mapped to a space-time point with even height. Together with the defining properties of an embedding in Definition~\ref{def:embedding} these observations imply tha
\begin{equation}\label{eq:psiprimeadditional}
(j, t)\notin\psi'(V'_s\cup V'_\ast) \mbox{ for each }\big((i, t), (j, t-1)\big)\in\psi'(\vec E'_s) \mbox{ with }i\neq j\quad (1\leq s\leq\sigma).
\end{equation}
As $\phi_{(i, t)}\neq \phi^{\text{id}}$, we must have $\phi_{(j, t)} = \phi^{\text{id}}$, furthermore, we have the identity map at every site at height $t-1$. As $t-1$ is odd, clearly $(j, t)\notin\psi'(V'_\ast)$. Assume that $(j, t)=\psi'(v)$ for some $v\in V'_s$, then there is a $w\in V'_s$ such that $(v, w)\in\vec E'$ and $\psi'((v, w))$ is vertical. This means that $\psi'(w)=(j, t-1)$, that is a type $s$ vertex overlaps with another type $s$ vertex, contradicting property (iii) of Definition~\ref{def:embedding}.
Let us now examine the image of $(V', \mathcal E')$ under $\psi$. By definition,
for each $(v, w)\in \vec E'$ such that $\psi'((v, w))$ is diagonal we have $\psi_{d+1}(v)=\psi_{d+1}(w)$. Furthermore, $\vec\psi(v)=\vec\psi'(v)$ for all $v\in V'$, implying that $\psi(\vec E')$ is the union of horizontal and vertical segments. Observe that for any sequence of vertices $v_1, \dots, v_n\in V_s \;(1\leq s\leq \sigma)$ such that $\psi'((v_i, v_{i+1}))$ is vertical for each $1\leq i\leq n-1$ the embedding $\psi$ maps $v_2,\dots, v_{n-1}$ to $\psi(v_1)$. Thus the starting points of vertical edges in $\psi'(\vec E')$ are eventually mapped into the endpoints of horizontal segments or sources under $\psi$. From the definition of $V$ in~\eqref{eq:conttoomgraph} it is easy to see that, with the convention that $\psi((v,w))=(\psi(v),\psi(w))=\emptyset$ if $\psi(v)=\psi(w)$, we have \begin{equation}\label{eq:psipsiprime}
\begin{array}{l}
\psi(V_\circ)= \psi(V'_\circ),\quad \psi(v_\circ)= \psi(v'_\circ), \quad \psi(V_\ast)= \psi(V'_\ast),\\[5pt]
\psi(V_s)= \psi(V'_s)\setminus \psi(V'_\circ), \quad \psi(\vec E_s)=\psi(\vec E'_s), \quad (1\leq s\leq\sigma).\\[5pt]
\end{array}
\end{equation}
For any $(v, w)\in \vec E'$ such that $\psi'((v, w))$ is diagonal or $v\in V'_\circ$ we have $\varphi_{\psi'(v)}= \phi_k \; (1\leq k\leq m)$, thus $\psi_{d+1}(v)$ is an arrival point of $\mathcal P_{\vec\psi(v), k}$. Finally, for each $v\in V'_\ast$ we have $\varphi_{\psi'(v)}= \phi_0$, thus $\psi_{d+1}(v)$ is a defective point.
We are now ready to show that $(V, \mathcal E, v_\circ, \psi)$ is a continuous Toom contour rooted at $(0, 0)$. As $(V', \mathcal E')$ is a Toom graph, it is straightforward to check that $(V, \mathcal E)$ is a Toom graph as well. We have already seen that $\psi$ satisfies condition (i) of Definition~\ref{def:contembedding} of a continuous embedding. As $\psi'$ satisfies Definition~\ref{def:embedding}, its properties (ii) and (iii) together with \eqref{eq:contpsi} and~\eqref{eq:psiprimeadditional} easily yield conditions (ii) and (iii). Finally, assume that (iv) does not hold.
By~\eqref{eq:conttoomgraph} then there exist $v_1 \in V'_s\cup V'_\circ, v_2 \in V'_s, v_3\in V'_s\cup V'_\ast$ such that $\vec\psi'(v_1)=\vec\psi'(v_2)=\vec\psi'(v_3)$ and $\psi'_{d+1}(v_2)<\psi'_{d+1}(v_3)<\psi'_{d+1}(v_1)$ with $\psi'_{d+1}(v_i)\in \mathbb Z$ for each $i=1,2,3$. As there is a type $s$ charge travelling through $v_1$ and $v_2$ in $(V', \mathcal E')$ and the difference between the time coordinates of $\psi'$ of two consecutive vertices of a charge is 1, there must be a $w\in V'_s$ such that $\psi'(w)=\psi'(v_3)$, that is a sink or an internal vertex of type $s$ overlaps with another internal vertex of type $s$. This contradicts conditions (ii) and (iii) of Definition~\ref{def:embedding}, therefore condition (iv) must hold.
By Defintion~\ref{def:toomcontour} and the definition of $\psi$ we have $\psi_{d+1}(v)\leq 0$ for all $v\in V'$ (hence for all $v\in V$ as well), and $\psi(v'_\circ)=\psi(v_\circ)=(0,0)$. By~\eqref{eq:conttoomgraph} any vertex $v\in V$ such that $\psi(v)=(0,0)$ is contained in some $W\in V_\circ$, thus $(V, \mathcal E, v_\circ, \psi)$ satisfies the defining property of Definition \ref{def:conttoomcontour} of a continuous Toom contour rooted at $v_\circ$. We are left to show that this contour is (strongly) present in $\mathcal P$.
As $(V', \mathcal E', v'_\circ, \psi')$ is a Toom contour rooted at $(0, 0)$ present in ${\bm{\phh}}$, it satisfies Definition~\ref{D:present}.
We now check the conditions of Definition~\ref{def:conttoomcontourpresent}. We have already seen that conditions (i) and (ii) hold. Condition (iii) says that internal vertices with charge~$s$ in $V_\text{vert}$ are mapped to type $k$ arrival points with $A_s(\phi_k)\neq \{(0,0)\}$. As for all $w\in V$ such that $\psi'(w)$ is the starting point of a vertical edge in $\psi'(\vec E')$, $\psi(w)$ is the endpoint of a horizontal segment or a source, we have that indeed $\varphi_{\psi'(v)}$ cannot be the identity map or a map $\phi_k$ with $A_s(\phi_k)= \{(0,0)\}$ for any $v\in V_\text{vert}\cap V_s$. Condition (iv) says that we can only encounter type $k$ arrival points with $(0,0)\in A_s(\phi_k)$ along a vertical segment in $\psi(\vec E_s)$ $(1\leq s\leq\sigma)$. If $(0,0)\notin A_s(\phi_k)$ for an arrival point along the image of a charge $s$, then by the construction of the discrete contour the charge is diverted at this point in a horizontal direction, so it is necessarily the endpoint of that vertical segment. Finally, conditions (v) and (vi) are immediate from conditions (iii) and (iv) of Definition~\ref{D:present}. Since moreover $(V', \mathcal E', v'_\circ, \psi')$ satisfies Definition~\ref{D:strongpres} for $\sigma=2$, the defining properties of Definition~\ref{def:conttoomcontourstrongpresent} hold for $(V', \mathcal E', v'_\circ, \psi')$.
\end{Proof}
\begin{remark}\label{rem:contcontour}
We have observed before, that in the image under $\psi$ of a type $s$ charge ($1\leq s\leq \sigma$) horizontal segments are always followed by vertical segments. The construction of the continuous Toom contour described above also ensures that vertical segments either end at a defective point, or are followed by a horizontal segment. Thus, starting from the image of the source, we have an alternating sequence of horizontal and vertical edges ending with a vertical edge at the image of the sink. Furthermore, if $(0,0)\notin \cup_{k=0}^m \mathcal P_{0, k}$, then $\varphi_{(0,0)}=\phi^{\rm id}$, so every $(v_\circ, w)\in\psi'(\vec E')$ is vertical. \eqref{eq:contpsi} then implies that every segment in the continuous contour starting at $\psi(v_\circ)$ is also vertical.
\end{remark}
\subsection{Explicit bounds}\label{S:contbounds}
\noindent
\textbf{Sexual contact process on $\mathbb Z^d \; (d\geq 1)$}
Recall from Subsection~\ref{S:contfirst} that we define $A_1:=\{0\}$ and $A_2:=\{e_1,\dots, e_d\}$ and we have
\begin{equation}
{\cal A}(\phi^{\rm coop, d})=\big\{A_1,A_2\big\}.
\end{equation}
We set $\sigma:=|{\cal A}(\phi^{\rm coop, d})|=2$, and for the sets $A_s(\phi_k)$ in (\ref{As}) we make the choices
\begin{equation}
A_1(\phi^{\rm coop,d}):=A_1,\quad A_2(\phi^{\rm coop, d}):=A_2,
\end{equation}
that is we have $A_s(\phi_1)\neq A_1$ only for $s=2$.
Let $\mathbf P=\big(\mathbf P_{(i,k)}\big)_{i\in{\mathbb Z}^{d}, k=0,1}$ be a family of independent Poisson processes such that for each $i$ $\mathbf P_{(i,0)}$ has rate 1 and $\mathbf P_{(i,1)}$ has rate $\lambda$. In line with the terminology used for contact processes, we will call type 0 arrival points \emph{death marks} and type 1 arrivel points \emph{birth marks}. Then Theorem~\ref{T:contcontour} implies the Peierls bound:
\begin{equation}\label{eq:Peicont}
1-\overline\rho=\P[\overline X_0(0)=0]
\leq\P\big[\mbox{a Toom contour rooted at 0 is strongly present in }\mathbf P\big].
\end{equation}
In what follows, we give an upper bound on this probability.
Definitions~\ref{def:conttoomcontourpresent} and \ref{def:conttoomcontourstrongpresent} imply that a continuous Toom contour is strongly present in $\mathbf P$ if and only if the following conditions are satisfied:
\begin{itemize}
\item[{\rm(i)}] $\psi(v)$ is a death mark for all $\displaystyle v\in V_\ast$,
\item[{\rm(ii)}] $\psi(v)$ is a birth mark for all $\displaystyle v\in V_{\text{hor}}$,
\item[{\rm(iii)}] There are no death marks along vertical segments of $\psi(\vec E)$,
\item[{\rm(iv)}] There are no birth marks along vertical segments of $\psi(\vec E_2)$,\
\item[{\rm(v)}] $v\in V_2\cup V_\circ$ for all $\displaystyle v\in V_{\text{hor}}$,
\item[{\rm(vi)}] Horizontal and vertical segments alternate along each path between a source and a sink,
\item[{\rm(viia)}] If $(v, w)\in \vec E^\circ_1$, then $\psi\big((v, w)\big)$ is a vertical segment,
\item[{\rm(viib)}] If $(v, w)\in \vec E^\circ_2$, then $\psi\big((v, w)\big)$ is a horizontal segment,
\item[{\rm(viii)}] If $(v, w)\in \vec E$ with $w\in V_\ast$, then $\psi\big((v, w)\big)$ is a vertical segment,
\end{itemize}
where $\vec E^\circ_i$ is defined in~\eqref{Ecirc}. As horizontal segments cannot start at the image of a type 1 internal vertex and they alternate with vertical segments along each path between a source and a sink, this implies that the image of a type 1 charge starting at a source and ending at a sink is either a single vertical segment (that is there is no internal vertex along the path), or a horizontal segment followed by a vertical segment (that is there is exactly one internal vertex along the path). Furthermore, by Remark~\ref{rem:contcontour}, $\mathbb P^{(1,\lambda)}$-a.s. the type 1 path starting at $v_\circ$ consists of a single vertical segment.
We now can argue similarly as in the discrete time case in Section~\ref{S:intbd}. If we reverse the direction of edges of charge 2, then the Toom graph becomes a directed cycle with edge set $\vec E_1 \cup\cev E_2$. We then call vertical segments in $\psi(\vec E_1)$ \textit{upward} and in $\psi(\cev E_2)$ \textit{downward}, and horizontal segments in $\psi(\vec E_1)$ \textit{outward} and in $\psi(\cev E_2)$ \textit{inward}. As $|A_2|=d$ we distinguish $d$ types of outward and inward segments: we say that~$\psi\big((v, w)\big)$ is type $i$, if $|\vec\psi(w)-\vec\psi(v)|=e_i$.
If $(V, {\cal E}, v_\circ, \psi)$ is a continuous Toom contour rooted at 0 that is strongly present in $\mathbf P$, then we can fully specify $\psi$ by saying for each $(v, w)\in \vec E_1\cup\cev E_2$ whether $\psi\big((v, w)\big)$ is an upward, a downward, an outward or an inward segment, and its length in the former two and type in the latter two cases.
In other words, we can represent the contour by a word of length $n$ consisting of the letters from the alphabet $\{o_1,\dots,o_d,u, d, i_1,\dots, i_d\}$, which represents the different kinds of steps the cycle can take, and a vector $l$ that contains the length of each vertical segment along the cycle in the order we encouner them. Then we can obtain a word consisting of these letters that must satisfy the following rules:
\begin{itemize}
\item The first step is an upward segment.
\item Each outward segment must be immediately preceded by a downward segment and followed by an upward segment.
\item Between two occurrences of the string $Do_\cdot U$, and also before the first and after the last occurrence, we see a sequence of the string $Di_\cdot$ of length $\geq 0$.
\item The last step is a downward segment.
\end{itemize}
Notice that the structure of a possible word is exactly the same as in~\eqref{eq:discreteword}. Then the contour in the bootom left of Figure~\ref{fig:contcontour} is described by the following word:
\begin{equation}
\accentset{\circ}{|} U \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_2 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{\circ}{|} o_1 \accentset{1}{|} U} \accentset{\ast}{|} \underbrace{D\accentset{2}{|} i_2} \accentset{2}{|} \underbrace{D\accentset{2}{|} i_1}
\accentset{2}{|} D\accentset{\circ}{|}.
\end{equation}
For any continuous Toom contour $T$ denote by $W(T)$ the corresponding word satisfying these rules and by $\mathbf W$ the set of all possible words satisfying these rules. We then can bound
\begin{equation}\begin{aligned}
\P[\overline X_0(0)=0]&\\
\leq\sum_{W\in\mathbf W} \P\big[&\mbox{a Toom contour } T \mbox{ with } W(T)=W \mbox{ rooted at 0 is strongly present in }\mathbf P\big].\end{aligned}
\end{equation}
From this point on, we can count the number of possible words and assign probabilities to each following the same line of thought (adapted to continuous time) as in Section~\ref{S:intbd} for the discrete-time monotone cellular automaton that applies the cooperative branching and the identity map. We then recover the following Peierls bound:
\begin{equation}
\P[\overline X_0(0)=0]
\leq\frac {1} {1+\lambda}\sum_{i=0}^\infty\left(\frac {16d\lambda\big((2d+1)\lambda+1\big)} {(\lambda+1)^3} \right)^{i}.
\end{equation}
The argument is similar to that of \cite[Lemma~8 and 9]{Gra99}. Presenting it would be long and technical, but not particularly challenging, so we will skip it.
As we have mentioned earlier, we can think of this process as the limit of the random cellular automaton with time steps of size $\varepsilon$ where the maps $\phi^0,\phi^{\rm coop, d}$ and $\phi^{\rm id}$ are applied with probabilities $\varepsilon$, $\varepsilon \lambda$, and $1-\varepsilon(1+\lambda)$, respectively. Observe that we recover the exact same Peierls bound by substituting $p=\varepsilon$, $r=\varepsilon \lambda$, and $q=1-\varepsilon(1+\lambda)$ into~\eqref{Prq} and letting $\varepsilon\to 0$. In particular for $d=1$ we obtain the bound
\begin{equation}
\lambda_c (1)\leq 49.3242\dots,
\end{equation}
and for $d=2$ the bound
\begin{equation}
\lambda_c (2)\leq 161.1985\dots .
\end{equation}
\section{Minimal explanations}\label{S:expla}
\subsection*{Outline}
Our proof of Theorem~\ref{T:contour} started with Lemma~\ref{L:explan}, which shows that if $\overline x_0(0)=0$, then there is an explanation graph present in ${\bm{\phh}}$, in the sense of Definitions \ref{def:finiteexpl} and \ref{def:finexpres}. In this section, we explain how explanation graphs, whose definition looks somewhat complicated at first sight, naturally arise from a more elementary concept, which we will call a \emph{minimal explanation}. Our definition of a minimal explanation will be similar to, though different from the definition of John Preskill \cite{Pre07}. We introduce minimal explanations in Subsection~\ref{S:finexpl} and then discuss their relation to explanation graphs in Subsection~\ref{S:exgr}.
\subsection{Finite explanations}\label{S:finexpl}
For each monotonic map $\phi:\{0,1\}^{{\mathbb Z}^d}\to\{0,1\}$, we define
\be\begin{array}{r@{\,}c@{\,}l}
\displaystyle{\cal A}^\uparrow(\phi)&:=&\displaystyle\big\{A\subset{\mathbb Z}^d:\phi(1_A)=1\big\},\\[5pt]
\displaystyle{\cal Z}^\uparrow(\phi)&:=&\displaystyle\big\{Z\subset{\mathbb Z}^d:\phi(1-1_Z)=0\big\},
\end{array}\ee
where $1_A$ denotes the indicator function of $A$ and hence $1-1_Z$ is the configuration that is zero on $Z$ and one elsewhere. Clearly, ${\cal A}^\uparrow(\phi)$ is an increasing set in the sense that ${\cal A}^\uparrow(\phi)\ni A\subset A'$ implies $A'\in{\cal A}(\phi)$. Likewise ${\cal Z}^\uparrow(\phi)$ is increasing. We say that an element $A\in{\cal A}^\uparrow(\phi)$ is \emph{minimal} if $A,A'\in{\cal A}^\uparrow(\phi)$ and $A'\subset A$ imply $A'=A$. We define minimal elements of ${\cal Z}^\uparrow(\phi)$ in the same way and set
\begin{equation}\label{upmin}
{\cal A}(\phi):=\big\{A\in{\cal A}^\uparrow(\phi):A\mbox{ is minimal}\big\}
\quad\mbox{and}\quad
{\cal Z}(\phi):=\big\{Z\in{\cal Z}^\uparrow(\phi):Z\mbox{ is minimal}\big\}.
\end{equation}
Since monotonic maps are local (i.e., depend only on finitely many coordinates), it is not hard to see that
\be\begin{array}{r@{\,}c@{\,}l}\label{minup}
\displaystyle{\cal A}^\uparrow(\phi)&:=&\displaystyle\big\{A\subset{\mathbb Z}^d:A\supset A'\mbox{ for some }A'\in{\cal A}(\phi)\big\},\\[5pt]
\displaystyle{\cal Z}^\uparrow(\phi)&:=&\displaystyle\big\{Z\subset{\mathbb Z}^d:Z\supset Z'\mbox{ for some }Z'\in{\cal Z}(\phi)\big\}.
\end{array}\ee
It follows that
\begin{equation}
\phi(x)=\bigvee_{A\in{\cal A}(\phi)}\bigwedge_{i\in A}x(i)=\bigwedge_{Z\in{\cal Z}(\phi)}\bigvee_{i\in Z}x(i).
\end{equation}
In particular, our present definition of ${\cal A}(\phi)$ coincides with the one given in (\ref{Aphi}). We note that ${\cal A}(\phi^0)=\emptyset$ and ${\cal A}(\phi^1)=\{\emptyset\}$, and similarly ${\cal Z}(\phi^0)=\{\emptyset\}$ and ${\cal Z}(\phi^1)=\emptyset$. One has
\begin{equation}
A\in{\cal A}^\uparrow(\phi)\quad\mbox{if and only if}\quad
A\cap Z\neq\emptyset\quad\forall Z\in{\cal Z}^\uparrow(\phi),
\end{equation}
and by (\ref{minup}) the same is true with ${\cal Z}^\uparrow(\phi)$ replaced by ${\cal Z}(\phi)$. Similarly,
\begin{equation}\label{Zchar}
Z\in{\cal Z}^\uparrow(\phi)\quad\mbox{if and only if}\quad
Z\cap A\neq\emptyset\quad\forall A\in{\cal A}(\phi).
\end{equation}
For monotonic maps $\phi$ and $\phi'$ defined on $\{0,1\}^{{\mathbb Z}^d}$, we write $\phi\leq\phi'$ if $\phi(x)\leq\phi'(x)\quad\forall x\in\{0,1\}^{{\mathbb Z}^d}$. Moreover, we write
\begin{equation}
\phi\prec\phi'\quad\mbox{if and only if}\quad{\cal Z}(\phi)\subset{\cal Z}(\phi').
\end{equation}
Note that $\phi\prec\phi'$ implies that $\phi\geq\phi'$. For monotonic flows ${\bm{\phh}}$ and ${\bm{\psi}}$, we write ${\bm{\phh}}\leq{\bm{\psi}}$ (resp.\ ${\bm{\phh}}\prec{\bm{\psi}}$) if $\varphi_{(i,t)}\leq\psi_{(i,t)}$ (resp.\ $\varphi_{(i,t)}\prec\psi_{(i,t)}$) for all $(i,t)\in{\mathbb Z}^{d+1}$. We let $\overline x^{\bm{\phh}}$ denote the maximal trajectory of a monotonic flow ${\bm{\phh}}$. By definition, a \emph{finite explanation} for $(0,0)$ is a monotonic flow ${\bm{\psi}}$ such that:
\begin{enumerate}
\item $\overline x_0^{\bm{\psi}}(0)=0$,
\item $\psi_{(i,t)}\neq\phi^1$ for finitely many $(i,t)\in{\mathbb Z}^{d+1}$.
\end{enumerate}
By definition, a \emph{minimal explanation} for $(0,0)$ is a finite explanation ${\bm{\psi}}$ that is minimal with respect to the partial order $\prec$, i.e., ${\bm{\psi}}$ has the property that if ${\bm{\psi}}'$ is a finite explanation for $(0,0)$ such that ${\bm{\psi}}'\prec{\bm{\psi}}$, then ${\bm{\psi}}'={\bm{\psi}}$.
\begin{lemma}[Existence of a minimal explanation]
Let\label{L:minexist} ${\bm{\phh}}$ be a monotonic flow. Then $\overline x_0^{\bm{\phh}}(0)=0$ if and only if there exists a minimal explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$.
\end{lemma}
\begin{Proof}
Assume that there exists a minimal explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$. Then ${\bm{\psi}}\geq{\bm{\phh}}$ and hence $0=\overline x^{\bm{\psi}}_0(0)\geq\overline x^{\bm{\phh}}_0(0)$. To complete the proof, we must show that conversely, $\overline x_0^{\bm{\phh}}(0)=0$ implies the existence of a minimal explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$.
We first prove the existence of a finite explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$. For each $s\in{\mathbb Z}$, we define $x^s$ as in (\ref{maxs}). Then (\ref{maxconv}) implies that $x^{-n}_0(0)=0$ for some $0\leq n<\infty$. For each $(i,t)\in{\mathbb Z}^{d+1}$, let
\begin{equation}
U(i,t):=\big\{(j,t-1):j\in A\mbox{ for some }A\in{\cal A}(\varphi_{(i,t)})\big\}
\end{equation}
denote the set of ``ancestors'' of $(i,t)$. For any $Z\subset{\mathbb Z}^{d+1}$, we set $U(Z):=\{U(z):z\in Z\}$ and we define inductively $U^0(Z):=Z$ and $U^{k+1}(Z):=U(U^k(Z))$ $(k\geq 0)$. Then $\bigcup_{k=0}^nU^k(0,0)$ is a finite set. Since $x^{-n}_0(0)=0$, it follows that setting
\begin{equation}
\psi_{(i,t)}:=\left\{\begin{array}{ll}
\displaystyle\varphi_{(i,t)}\quad&\mbox{if }(i,t)\in\bigcup_{k=0}^nU^k(0,0),\\[5pt]
\displaystyle\phi^1\quad&\mbox{otherwise}
\end{array}\right.
\end{equation}
defines a finite explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$.
We observe that for a given monotonic map $\phi$, there are only finitely many monotonic maps $\phi'$ such that $\phi'\prec\phi$. Also, since ${\cal Z}(\phi^1)=\emptyset$, the only monotonic map $\phi$ such that $\phi\prec\phi^1$ is $\phi=\phi^1$. Therefore, since $\psi_{(i,t)}\neq\phi^1$ for finitely many $(i,t)\in{\mathbb Z}^{d+1}$, there exist ony finitely many monotonic flows ${\bm{\psi}}'$ such that ${\bm{\psi}}'\prec{\bm{\psi}}$. It follows that the set of all finite explanations ${\bm{\psi}}'$ for $(0,0)$ that satisfy ${\bm{\psi}}'\prec{\bm{\psi}}$ must contain at least one minimal element, which is a minimal explanation for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$.
\end{Proof}
The following lemma gives a more explicit description of minimal explanations. In Figure~\ref{fig:minexpl} on the right, a minimal explanation ${\bm{\psi}}$ for $(0,0)$ is drawn with ${\bm{\psi}}\prec{\bm{\phh}}$, where ${\bm{\phh}}$ is a monotonic flow that takes values in $\{\phi^0,\phi^{\rm coop}\}$. For each $(i,t)\in{\mathbb Z}^{d+1}$ such that $\psi_{(i,t)}\neq\phi^1$, thick black lines join $(i,t)$ to the points $(j,t-1)$ with $j\in Z_{(i,t)}$, where $Z_{(i,t)}$ is the set defined in point~(v) below. Orange stars indicate points $(i,t)$ where $\psi_{(i,t)}=\phi^0$. The minimal explanation drawn in Figure~\ref{fig:minexpl} has the special property that even if we replace $\psi_{(i,t)}$ by $\varphi_{(i,t)}$ in all points except for the defective points of ${\bm{\phh}}$, then it is still true that removing any of the defective points of ${\bm{\psi}}$ results in the origin having the value one. This means that the set of defective points drawn in Figure~\ref{fig:minexpl} corresponds to a ``minimal explanation'' in the sense defined by John Preskill in \cite{Pre07}, which is a bit stronger than our definition.
\begin{lemma}[Minimal explanations]
Let\label{L:minexpl} ${\bm{\psi}}$ be a finite explanation for $(0,0)$. Then ${\bm{\psi}}$ is a minimal explanation for $(0,0)$ if and only if in addition to conditions (i)--(iii) of the definition of a finite explanation, one has:
\begin{enumerate}\addtocounter{enumi}{3}
\item $\psi_{(i,t)}=\phi^1$ for all $(i,t)\in{\mathbb Z}^{d+1}\backslash\{(0,0)\}$ such that $t\geq 0$,
\item for each $(i,t)\in{\mathbb Z}^{d+1}$ such that $\psi_{(i,t)}\neq\phi^1$, there exists a finite $Z_{(i,t)}\subset{\mathbb Z}^d$ such that ${\cal Z}(\psi_{(i,t)})=\{Z_{(i,t)}\}$,
\item for each $(i,t)\in{\mathbb Z}^{d+1}\backslash\{(0,0)\}$ such that $\psi_{(i,t)}\neq\phi^1$, there exists a $j\in{\mathbb Z}^d$ such that $\psi_{(j,t+1)}\neq\phi^1$ and $i\in Z_{(j,t+1)}$.
\end{enumerate}
Moreover, each minimal explanation ${\bm{\psi}}$ for $(0,0)$ satisfies:
\begin{enumerate}\addtocounter{enumi}{6}
\item $\overline x^{\bm{\psi}}_t(i)=0$ for each $(i,t)\in{\mathbb Z}^{d+1}$ such that $\psi_{(i,t)}\neq\phi^1$,
\end{enumerate}
\end{lemma}
\begin{Proof}
We first show that a finite explanation ${\bm{\psi}}$ for $(0,0)$ satisfying (iv)--(vi) is minimal. By our definition of minimal explanations, we must check that if ${\bm{\psi}}'$ is a finite explanation such that ${\bm{\psi}}'\prec{\bm{\psi}}$, then ${\bm{\psi}}'={\bm{\psi}}$. Imagine that conversely, $\psi'_{(i,t)}\neq\psi_{(i,t)}$ for some $(i,t)\in{\mathbb Z}^{d+1}$. Then by (v) and the fact that $\psi'_{(i,t)}\prec\psi_{(i,t)}$, we must have that ${\cal Z}(\psi'_{(i,t)})=\emptyset$ and hence $\psi'_{(i,t)}=\phi^1$. Since $\psi'_{(i,t)}\neq\psi_{(i,t)}$, it follows that $\psi_{(i,t)}\neq\phi^1$. By (iv), this implies that either $(i,t)=(0,0)$ or $t<0$. Let $n:=-t$. Using (vi), we see that there exist $i=i_0,\ldots,i_n$ such that $\psi_{(i_k,t+k)}\neq\phi^1$ $(0\leq k\leq n)$ and $i_{k-1}\in Z_{(i_k,t+k)}$ $(0<k\leq n)$. By (iv), we must have $i_n=0$. Since $\psi'_{(i,t)}=\phi^1$ we have $\overline x^{{\bm{\psi}}'}_t(i)=1$. Using the fact that ${\bm{\psi}}'\prec{\bm{\psi}}$ and $i_{k-1}\in Z_{(i_k,t+k)}$ $(0<k\leq n)$, it follows that $\overline x^{{\bm{\psi}}'}_{t+k}(i_k)=1$ for all $0\leq k\leq n$. In particular, this shows that $\overline x^{{\bm{\psi}}'}_0(0)=1$, contradicting the fact that ${\bm{\psi}}'$ is a finite explanation for $(0,0)$.
We next show that each minimal explanation ${\bm{\psi}}$ for $(0,0)$ satisfies (iv)--(vii). Property~(iv) follows from the fact that if $\psi_{(i,t)}\neq\phi^1$ for some $(i,t)\in{\mathbb Z}^{d+1}\backslash\{(0,0)\}$ such that $t\geq 0$, then setting $\psi'_{(i,t)}:=\phi^1$ and $\psi'_{(j,s)}:=\psi_{(j,s)}$ for all $(j,s)\neq(i,s)$ defines a finite explanation ${\bm{\psi}}'\prec{\bm{\psi}}$. Property~(vii) follows in the same way: if $\overline x^{\bm{\psi}}_t(i)=1$ for some $(i,t)\in{\mathbb Z}^{d+1}$ such that $\psi_{(i,t)}\neq\phi^1$, then we can replace $\psi_{(i,t)}$ by $\phi^1$ without changing the fact that ${\bm{\psi}}$ is a finite explanation. To prove (v), we first observe that if $\psi_{(i,t)}\neq\phi^1$ for some $(i,t)\in{\mathbb Z}^{d+1}$, then $\overline x^{\bm{\psi}}_t(i)=0$ by (vii). It follows that there exists some $Z\in{\cal Z}(\psi_{(i,t)})$ such that $\overline x^{\bm{\psi}}_{t-1}(j)=0$ for all $j\in Z$. (Note that this in particular includes the case that $\psi_{(i,t)}=\phi^0$ and ${\cal Z}(\psi_{(i,t)})=\{\emptyset\}$.) If ${\cal Z}(\psi_{(i,t)})$ contains any other elements except for $Z$, then we can remove these without changing the fact that ${\bm{\psi}}$ is a finite explanation. Therefore, by minimality, we must have ${\cal Z}(\psi_{(i,t)})=\{Z\}$, proving (v). To prove (vi), finally, assume that $(i,t)\in{\mathbb Z}^{d+1}\backslash\{(0,0)\}$ and $\psi_{(i,t)}\neq\phi^1$, but there does not exist a $j\in{\mathbb Z}^d$ such that $\psi_{(j,t+1)}\neq\phi^1$ and $i\in Z_{(j,t+1)}$. Then we can replace $\psi_{(i,t)}$ by $\phi^1$ without changing the fact that ${\bm{\psi}}$ is a finite explanation, which contradicts minimality. This completes the proof.
\end{Proof}
\subsection{Explanation graphs revisited}\label{S:exgr}
We claim that in the proof of many of our results, such as Theorems \ref{T:contour} and \ref{T:strongpres}, we can without loss of generality assume that
\begin{equation}\label{wlog}
{\cal A}(\phi_k)=\big\{A_s(\phi_k):1\leq s\leq\sigma\big\}\qquad(1\leq k\leq m).
\end{equation}
To see this, let ${\bm{\phh}}$ be a monotonic flow on $\{0,1\}^{{\mathbb Z}^d}$ taking values in $\{\phi_0,\ldots,\phi_m\}$, where $\phi_0=\phi^0$ is the constant map that always gives the outcome zero and $\phi_1,\ldots,\phi_m$ are non-constant. Let $\sigma\geq 2$ be an integer and for each $1\leq k\leq m$ and $1\leq s\leq\sigma$, fix $A_s(\phi_k)\in{\cal A}(\phi_k)$. We let ${\bm{\phh}}^\ast=(\varphi^\ast_{(i,t)})_{(i,t)\in{\mathbb Z}^{d+1}}$ denote the image of ${\bm{\phh}}$ under the map from $\{\phi_0,\ldots,\phi_m\}$ to $\{\phi^\ast_0,\ldots,\phi^\ast_m\}$ defined by setting $\phi_0^\ast:=\phi_0$ and
\begin{equation}\label{phick}
\phi^\ast_k(x):=\bigvee_{s=1}^\sigma\bigwedge_{i\in A_s(\phi_k)}x(i)
\qquad\big(1\leq k\leq m,\ x\in\{0,1\}^{{\mathbb Z}^d}\big).
\end{equation}
We set $A_s(\phi^\ast_k):=A_s(\phi_k)$ $(1\leq k\leq m,\ 1\leq s\leq\sigma)$. We make the following simple observations.
\begin{lemma}[Modified monotonic flow]
The\label{L:wlog} modified monotonic flow ${\bm{\phh}}^\ast$ has the following properties:
\begin{enumerate}
\item ${\bm{\phh}}^\ast$ satisfies (\ref{wlog}),
\item ${\bm{\phh}}^\ast\geq{\bm{\phh}}$,
\item $\overline x^{{\bm{\phh}}^\ast}\geq\overline x^{\bm{\phh}}$,
\item an explanation graph is present in ${\bm{\phh}}^\ast$ if and only if it is present such that ${\bm{\psi}}\prec{\bm{\phh}}$,
\item a Toom contour is (strongly) present in ${\bm{\phh}}^\ast$ if and only if it is present such that ${\bm{\psi}}\prec{\bm{\phh}}$.
\end{enumerate}
\end{lemma}
\begin{Proof}
Property~(iii) is a direct consequence of (ii) and all other properties follow directly from the definitions.
\end{Proof}
Because of Lemma~\ref{L:wlog}, in the proof of results such as Theorems \ref{T:contour} and \ref{T:strongpres} about the (strong) presence of Toom contours or Lemma~\ref{L:explan} about the presence of an explanation graph, we can without loss of generality assume that (\ref{wlog}) holds. Indeed, by part (iii) of the lemma, $\overline x^{\bm{\phh}}_0(0)=0$ implies $\overline x^{{\bm{\phh}}^\ast}_0(0)=0$ so replacing ${\bm{\phh}}$ by ${\bm{\phh}}^\ast$, in view of parts (iv) and (v), it suffices to prove the presence of an explanation graph or the (strong) presence of a Toom contour in ${\bm{\phh}}^\ast$.
We now come to the main subject of this subsection, which is to link minimal explanations to explanation graphs. We start with a useful observation.
\begin{lemma}[Presence of an explanation graph]
Assume\label{L:exnul} that ${\bm{\phh}}$ satisfies (\ref{wlog}). Then properties (ii) and (iii) of Definition~\ref{def:finexpres} imply property~(i).
\end{lemma}
\begin{Proof}
Property~(ii) of Definition~\ref{def:finexpres} implies that
\begin{equation}\label{U0}
\overline x_t(i)=0\quad\forall (i,t)\in U_\ast.
\end{equation}
We next claim that for $(i,t)\in U\backslash U_\ast$,
\begin{equation}\label{Unext}
\overline x_{t-1}(j)=0\quad\forall\big((i,t),(j,t-1)\big)\in\vec H
\quad\mbox{implies}\quad
\overline x_t(i)=0.
\end{equation}
Indeed, if $\overline x_{t-1}(j)=0$ for all $\big((i,t),(j,t-1)\big)\in\vec H$, then by
property~(iii) of Definition~\ref{def:finexpres}, for each $1\leq s\leq\sigma$, there is a $k\in A_s(\varphi_{(i,t)})$ such that $\overline x_{t-1}(i+k)=0$, which by (\ref{wlog}) implies that $\overline x_t(i)=0$. Define inductively $U_0:=U_\ast$ and $U_{n+1}:=\{u\in U:v\in U_n\ \forall (u,v)\in\vec H\}$. Then (\ref{U0}) and (\ref{Unext}) imply that $\overline x_t(i)=0$ for all $(i,t)\in\bigcup_{n=0}^\infty U_n=U$.
\end{Proof}
We now make the link between minimal explanations and the presence of explanation graphs as defined in Definitions \ref{def:finiteexpl} and \ref{def:finexpres}. As before, ${\bm{\phh}}$ is a monotonic flow on $\{0,1\}^{{\mathbb Z}^d}$ taking values in $\{\phi_0,\ldots,\phi_m\}$, where $\phi_0=\phi^0$ and $\phi_1,\ldots,\phi_m$ are non-constant. Moreover, we have fixed an integer $\sigma\geq 2$ and for each $1\leq k\leq m$ and $1\leq s\leq\sigma$, we have fixed $A_s(\phi_k)\in{\cal A}(\phi_k)$.
\begin{lemma}[Minimal explanations and explanation graphs]
Assume\label{L:expl} that ${\bm{\phh}}$ satisfies (\ref{wlog}) and that ${\bm{\psi}}$ is a minimal explanation for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$. For each $(i,t)\in{\mathbb Z}^{d+1}$ such that $\psi_{(i,t)}\neq\phi^1$, let $Z_{(i,t)}$ be as in point~(v) of Lemma~\ref{L:minexpl}. Then there is an explanation graph $(U,{\cal H})$ for $(0,0)$ present in ${\bm{\phh}}$ such that:
\begin{equation}\begin{array}{c}\label{UUH}
\displaystyle U=\big\{(i,t)\in{\mathbb Z}^{d+1}:\psi_{(i,t)}\neq\phi^1\big\},\quad
U_\ast=\big\{(i,t)\in U:\psi_{(i,t)}=\phi^0\big\},\\[5pt]
\displaystyle\mbox{and}\quad
\vec H=\big\{\big((i,t),(j,t-1)\big):(i,t)\in U,\ j\in Z_{(i,t)}\big\}.
\end{array}\ee
\end{lemma}
\begin{Proof}
Let $U$ and $U^\ast$ be defined by (\ref{UUH}). Let $(i,t)\in U\backslash U_\ast$. Since ${\bm{\psi}}\prec{\bm{\phh}}$ we have ${\cal Z}(\psi_{(i,t)})\subset{\cal Z}(\varphi_{(i,t)})$ and hence $Z_{(i,t)}\in{\cal Z}(\varphi_{(i,t)})$, so by (\ref{Zchar}), for each $1\leq s\leq\sigma$, we can choose some $j_s(i,t)\in Z_{(i,t)}\cap A_s(\varphi_{(i,t)})$. We claim that $Z_{(i,t)}=\{j_1(i,t),\ldots,j_\sigma(i,t)\}$. To see this, set $Z'_{(i,t)}:=\{j_1(i,t),\ldots,j_\sigma(i,t)\}$. Then $Z'_{(i,t)}\subset Z_{(i,t)}$ and (\ref{wlog}) implies that $Z'_{(i,t)}\cap A\neq\emptyset$ for all $A\in{\cal A}(\varphi_{(i,t)})$, which by (\ref{Zchar}) implies that $Z'_{(i,t)}\in{\cal Z}^\uparrow(\varphi_{(i,t)})$. By (\ref{upmin}), $Z_{(i,t)}$ is a minimal element of ${\cal Z}^\uparrow(\varphi_{(i,t)})$, so we conclude that $Z'_{(i,t)}=Z_{(i,t)}$.
We claim that setting
\begin{equation}
\vec H_s:=\big\{\big((i,t),(j_s(i,t),t-1)\big):(i,t)\in U\backslash U_\ast\big\}
\qquad(1\leq s\leq\sigma)
\end{equation}
now defines an explanation graph that is present in ${\bm{\phh}}$. Properties (i), (ii), (iv) and (v) of Definition~\ref{def:finiteexpl} follow immediately from our definitions and the fact that $\psi_{(0,0)}\neq\phi^1$ since ${\bm{\psi}}$ is a mininal explanation for $(0,0)$. Property~(iii) follows from Lemma~\ref{L:minexpl}~(vi). This proves that $(U,{\cal H})$ is an explanation graph. To see that $(U,{\cal H})$ is present in ${\bm{\phh}}$, we must check conditions (i)--(iii) of Definition~\ref{def:finexpres}. Condition~(i) follows from Lemma~\ref{L:minexpl}~(vii) and conditions (ii) and (iii) are immediate from our definitions.
\end{Proof}
\subsection{Discussion}
As before, let ${\bm{\phh}}$ be a monotonic flow on $\{0,1\}^{{\mathbb Z}^d}$ taking values in $\{\phi_0,\ldots,\phi_m\}$, where $\phi_0=\phi^0$ and $\phi_1,\ldots,\phi_m$ are non-constant. Let $\sigma\geq 2$ and for each $1\leq k\leq m$ and $1\leq s\leq\sigma$, let $A_s(\phi_k)\in{\cal A}(\phi_k)$ be fixed. Consider the following conditions:
\begin{enumerate}
\item $\overline x^{\bm{\phh}}_0(0)=0$,
\item there exists a minimal explanation ${\bm{\psi}}$ for $(0,0)$ such that ${\bm{\psi}}\prec{\bm{\phh}}$,
\item there is an explanation graph $(U,{\cal H})$ for $(0,0)$ present in ${\bm{\phh}}$,
\item there is a Toom contour $(V,{\cal E},v_\circ,\psi)$ rooted at $(0,0)$ present in ${\bm{\phh}}$.
\end{enumerate}
Theorem~\ref{T:contour} and Lemmas \ref{L:explan} and \ref{L:minexist} say that conditions (i)--(iii) are equivalent and imply (iv). As the example in Figure~\ref{fig:minexpl} showed, (iv) is strictly weaker than the other three conditions. This raises the question whether it is possible to prove Toom's stability theorem using a Peierls argument based on minimal explanations, as suggested in \cite{Pre07}.
Let us say that $(i,t)$ is a \emph{defective site} for a finite explanation ${\bm{\psi}}$ if $\psi_{(i,t)}=\phi^0$. Let $\phi$ be an eroder and let $M_n$ denote the number of minimal explanations ${\bm{\psi}}$ for $(0,0)$ with $n$ defective sites that satisfy $\psi_{(i,t)}\prec\phi$ whenever $(i,t)$ is not defective. We pose the following open problem:
\begin{quote}
Do there exist finite constants $C,N$ such that $M_n\leq CN^n$ $(n\geq 0)$?
\end{quote}
If the answer to this question is affirmative, then it should be possible to set up a Peierls argument based on minimal explanations, rather than Toom contours. In principle, such an argument has the potential to be simpler and more powerful than the Peierls arguments used in this article, but as we have seen the relation between minimal explanations and Toom contours is not straightforward and finding a good upper bound on the number of minimal explanations with a given number of defective sites seems even harder than in the case of Toom contours.
\subsection*{Acknowledgment}
We thank Anja Sturm who was involved in the earlier phases of writing this paper for her contributions to the discussions. We thank Ivailo Hartarsky for useful discussions. The first author is supported by grant 20-08468S of the Czech Science Foundation (GA CR). The second and third authors are supported by ERC Starting Grant 680275 ``MALIG''.
|
1,314,259,995,057 | arxiv | \section{\@startsection {section}{1}{\z@}
{-30pt \@plus -1ex \@minus -.2ex}
{2.3ex \@plus.2ex}
{\normalfont\normalsize\bfseries\boldmath}}
\renewcommand\subsection{\@startsection{subsection}{2}{\z@}
{-3.25ex\@plus -1ex \@minus -.2ex}
{1.5ex \@plus .2ex}
{\normalfont\normalsize\bfseries\boldmath}}
\renewcommand{\@seccntformat}[1]{\csname the#1\endcsname. }
\makeatother
{\it The Statistician} {\bf 47} (1998), 183-189.
\bibitem{K} M. Kouril, The van der Waerden number $W(2,6)$ is 1132, {\it Experiment. Math.} {\bf 17} (2008), 53-61.
\bibitem{Lawless} J. Lawless, Negative binomial and mixed Poisson regression,
{\it Canad. J. Stat.} {\bf 15} (1987), 209-225.
\bibitem{M} A. Marshall and I. Olkin, Bivariate distributions generated
from P\'olya-Eggenberger urn models, {\it J. Multivariate Anal.} {\bf 35} (1990), 48-65.
\bibitem{MR} B. D. McKay and S.P. Radziszowski, R(4,5) = 25, {\it Journal of Graph Theory} {\bf 19} (1995), 309-322.
\bibitem{R} {\tt R} Core Team (2016). {\tt R}: A language and environment for statistical computing, {\tt R} Foundation for Statistical Computing, Vienna, Austria, {\tt https://www.R-project.org}.
\bibitem{V} G. Venter, Effects of variations from Gamma-Poisson assumptions, {\it CAS Proceedings} {\bf LXXVIII} (1991), 41-55.
\bibitem{Z} D. Zeilberger, Symbolic moment calculus II: why is Ramsey theory sooooo eeeenormously hard,
{\it Integers} {\bf 7(2)} (2007), \#A34.
\end{thebibliography}
\end{document} |
1,314,259,995,058 | arxiv | \section{Abstract Layers for Verifiable Networks}
Program verification often involves both the addition of erasable annotations
\cite{Sascha:08} and program transformations to make the resulting
(semantically equivalent program) more suitable for verification \cite{fastver}. That is, unlike
standard transformations which aim to produce a program that runs faster, here, the goal is to produce
a more verifiable program. Our key insight is to leverage this ``programming to prove''
paradigm in a similar fashion when designing robust neural networks.
Based on this insight, we describe the novel concept of \emph{Abstract Layers}. These layers
are specifically provided by the network designer but differ from traditional concrete layers in
that they have no effect on the concrete execution. Instead, they only affect the analysis of
the network, i.e., they only modify abstract elements that propagate through
the layers (e.g., boxes or hybrid zonotopes).
We describe two types of abstract layers designed to tune the precision and scalability
of the analysis with the Hybrid Zonotope domain. For all abstract layers, we
describe their effect on a given hybrid zonotope $h$ with $m$ correlated
error coefficients, producing a new hybrid zonotope $h'$. For our abstract layers, it holds that $h'_C = h_C$.
\subsection{Correlation layers}
A correlation layer increases the precision of the analysis in successive layers by producing
a new hybrid zonotope $h'$ which contains more correlated error coefficients than the original input $h$.
We note that here we have $\gamma(h') = \gamma(h)$, that is, both hybrid zonotopes actually represent the same set of points.
However, the advantage of $h'$ over $h$ is that $h'$ contains more shared dependencies between different dimensions (variables) than $h$,
meaning that successive steps of the analysis using $h'$ will be produce more precise results than those same steps using $h$.
Informally, a correlation layer selects a set of dimension indices (variables) $P$ and creates $|P|$ new correlated
error coefficients. For each selected variable $i \in P$, we introduce one correlated error coefficient whose value is that of
the variable's uncorrelated error coefficient. All other remaining correlated coefficients (a total of $|P| - 1$) for $i$ are set to 0.
For all variables not in $P$, their new correlated error coefficients are all set to $0$. More formally, given $h$, we define $h'$ as follows:
\begin{tabular}{lccr}
$h'_{B,i} = h_{B,i}$ & & & $i \notin P$\\
$h'_{B,i} = 0$ & & & $i \in P$ \\
$h'_{E,i,j} = h_{E,i,j}$ & & & $\forall 0 \le i < p, 0 \le j < m$\\
$h'_{E,i,m + t} = h_{B,i}$ & & & $i \in P \wedge t = |P_{< i}|$ \\
$h'_{E,i,m + t} = 0$ & & & $\forall t < |P| . i \notin P \vee t \neq |P_{< i}|$ \\
\end{tabular}
\input{neural_network}
Here we use $P_{< i}$ to denote the subset of $P$ where each element is smaller than $i$.
Next, we define four variants of a correlation layer based on the choice of the set $P$.
\textbf{CorrelateAll} correlates all uncorrelated coefficients in all $p$ dimensions thereby adding $p$ correlated error coefficients. Formally, it uses $P = \{ i~~|~~0 \leq i < p \}$.
\textbf{CorrelateFixed$_k$} correlates $k$ fixed dimensions, chosen by taking every $\frac{p}{k}$ of the flattened list of dimension indices. Formally, we have $P = \{ \lfloor\frac{i \cdot p}{k}\rfloor~~|~~0 \leq i < k\}$.
\textbf{CorrelateMax$_k$} correlates the first $k$ dimensions whose interval concretization (see \secref{background}) has the largest upper bound value. This heuristic aims to improve precision while still keeping the analysis scalable.
Formally, we have $P = \{ i~~|~~\textsc{UB}(\iota_\textit{H}(h)_i) \in \textsc{top}_{k}(\textsc{UB}(\iota_\textit{H}(h))) \}$ where $\textsc{top}_{k}$ returns the $k$ largest elements and \textsc{UB} returns the upper bound of an interval.
\textbf{CorrelateMaxPool$_{c,w,h,s}$} correlates dimensions chosen using MaxPooling \cite{krizhevsky2012imagenet}. We apply MaxPooling with kernel size $(c,w,h)$ and stride $s$ on a function $f$ defined over $h$. If the
correlation is applied before the first layer then $f=h_C$ otherwise $f=h_B$. Formally, $P = \{i~~|~~ f(i) \in \textsc{Maxpool}_{c,w,h,s}(f) \}$.
\subsection{Decorrelation layers}
The purpose of decorrelation layers is opposite that of correlation layers: to reduce the number of correlated coefficients so to make analysis for successive layers more efficient but less precise. Concretely, a decorrelation layer removes correlated error coefficients and adds their absolute sum to the value of uncorrelated error coefficients in each dimension. We now introduce two choices for the set $P$, each defining different dimensions to be decorelated:
\textbf{DecorrelateAll} produces a hybrid zonotope with no correlated coefficients in any dimension and in each dimension, the uncorrelated coefficient is defined as:
$$h'_{B,i} = h_{B,i} + \sum_{j=0}^{m-1} |h_{E,i,j}|$$
\textbf{DecorrelateMin$_k$} is based on a heuristic to minimize the loss of
precision due to decorrelation by removing $m - k$ correlated error coefficients whose absolute sum in all dimensions is the smallest. As a result, $k$ correlated coefficients remain in the output. Formally, we define $P$ to be the indices of the $m - k$ smallest elements of the sequence $\{ \sum_{i=0}^{p-1} |h_{E,i,j}| \}_{j=0\ldots m-1}$. Then, the new zonotope $h'$, where $i \in [0,p)$, is defined as:
\begin{tabular}{lrrr}
$h'_{B,i} = h_{B,i} + \sum_{j \in P} |h_{E,i,j}|$ & & & \\
\\
$h'_{E,i,j} = h_{E,i,t}$ & $t \notin P \wedge j = t - |P_{< t}|$& &
\end{tabular}
Informally, in the first equation, we accumulate all correlated coefficients chosen for removal (that is, the set $P$) into the uncorrelated coefficient, while the second equation ensures the remaining correlated coefficients are shifted to be next to each other (order is preserved).
\subsection{DeepLoss}
For deeper neural networks such as ResNet-18, it is possible for naive abstraction imprecision to grow exponentially to the point where overflow occurs before the final loss is calculated, making optimizing that loss futile.
In such cases, we would like the network to produce more precise results in intermediate layers, before an overflow occurs.
As these layers do not have the same number of neurons as target classifications, we cannot optimize using a standard
provability loss. Instead, a loss on a generic heuristic for provability must be defined on the output of a specific layer.
As this loss does not effect concrete execution and operates using the abstract element from a specific layer, we also consider it a form of an abstract layer.
We define the following losses on an interval concretization $c$ in $n$ dimensions:
\begin{tabular}{lcr}
$L_{\text{lb},f,i}(c)$ & = & $\max \{f(c_{j,2} - c_{i,1})~~|~~c_{j,1} \leq c_{i,1} \}$\\
$L_{\text{ub},f,i}(c)$ & = & $\max \{f(c_{i,2} - c_{j,1})~~|~~c_{i,2} \leq c_{j,2} \}$\\
$L_{\text{deep},f}(d)$ & = & $\frac{1}{2n} \sum_{i=0}^{n-1} (L_{\text{lb},f,i}(\iota(d)) + L_{\text{ub},f,i}(\iota(d)))$
\end{tabular}
where $f$ is a positive activation and $L_{\text{deep},f}$ combines the first two losses for each dimension of an arbitrary abstract element $d$. Intuitively, this loss measures and sums for each dimension the worst offending overlap between the concretization lower bound in that dimension and the upper bound of any other dimension, and visa versa.
In our experiments, we used ReLU for $f$. A naive implementation of the above loss would require $n^2$ computations (and potentially $n^2$ space on a GPU), which could be problematic given that the loss is intended to be used on the output of an intermediate, and presumably quite wide, layer. While a matrix multiplication would also typically involve using up to an $n^2$ sized matrix, this loss is intended to be used between convolutions which typically permits significantly wider outputs through utilizing significantly smaller kernels. We implement it leveraging one dimensional MaxPool and Sort so that marshaling between the CPU and GPU is not required, and such that algorithmic optimizations to MaxPool can be leveraged to potentially\footnote{Provided an optimal implementation of MaxPool} provide an $O(n \log n)$ implementation.
\subsection{Example network with abstract layers}
\figref{nn2} shows our analysis with abstract layers on an example toy feedforward network. The neural network contains three neurons per layer. We add a CorrelateMax$_{2}$ and a DecorrelateMin$_{1}$ abstract layers after the first and second hidden layer respectively. We show the 3-dimensional shapes propagated through each layer along with the corresponding hybrid zonotope based encoding. The top, middle, and bottom neurons in each layer represent the x,y, and z-directions in the shapes. Our analysis abstracts the input region with a Box and propagates it through the first hidden layer. After correlations are added, the abstraction is shown in blue (before correlations, the shape is gray).
\textbf{CorrelateMax$_{2}$} changes the encoding of the abstract element obtained after the first hidden layer by creating correlated error coefficients for neurons $x_{4}$ and $x_{5}$ whose upper bound is larger than that for $x_{6}$. The introduction of correlated error coefficients increases the precision of the result obtained \emph{after} applying the transformers for the second hidden layer as the resulting shape is no longer a box. We note that neuron $x_{9}$ is set to $0$ in the result.
\input{goalconstruct}
\textbf{DecorrelateMin$_{1}$} removes the second correlated error coefficient as its absolute sum over all neurons is smaller. The absolute value of this coefficient is added to the uncorrelated coefficient in each dimension. This changes the concretization of the abstract element by removing dependencies making it less imprecise while increasing scalability. The output layer transformations next produce a result more precise than the box obtained without the abstract layers.
\section{Evaluation}
\begin{strip}
\centering
\captionof{table}{ The networks compared and their sizes and speeds under different training schemes. }
\vskip 0.1in
{\footnotesize
\begin{tabu}{@{}rrrrrrr@{}}
\toprule
Network Name
& Neurons
& Parameters
& Abstract Layers
& Training Scheme
& Batch Size
& Seconds Per Epoch
\\ \midrule
\multirow{11}{*}{ ResNet-Tiny } & \multirow{11}{*}{ 311796 } & \multirow{11}{*}{ 18415231 }
& \multirow{5}{*}{ None } & Baseline & 50 & 106 \\
& & & & InSamp & 50 & 106 \\
& & & & InSampLPA & 50 & 107 \\
& & & & Adv$_1$ISLPA & 50 & 161 \\
& & & & Adv$_3$ISLPA & 50 & 130 \\ \clinelight{4-}
& & & \multirow{5}{*}{ FewCombo } & Baseline & 50 & 209 \\
& & & & InSamp & 50 & 205 \\
& & & & InSampLPA & 50 & 206 \\
& & & & Adv$_1$ISLPA & 50 & 220 \\
& & & & Adv$_3$ISLPA & 50 & 265 \\ \clinelight{4-}
& & & ManyFixed & Adv$_1$ISLPA & 50 & 347 \\
\clinelight{}
\multirow{5}{*}{ SkipNet-18 } & \multirow{5}{*}{ 558080 } & \multirow{5}{*}{ 15626634 }
& \multirow{3}{*}{ None } & Baseline$_{18}$ & 200 & 152 \\
& & & & InSamp$_{18}$ & 200 & 102 \\
& & & & Adv$_5$IS$_{18}$ & 200 & 260 \\ \clinelight{4-}
& & & Combo & InSamp$_{18}$ & 100 & 342 \\ \clinelight{}
ResNet-Large & 639976 & 65819474 & LargeCombo & BiAdv$_{L}$ & 50 & 527 \\ \clinelight{}
ResNet-Large & 558k & 18m & ResNet-18 & Adv$_5$ISLPA$_{R18}$ & 200 & 233 \\ \clinelight{}
ResNet-Large & 967k & 25m & ResNet-34 & InSampLPA$_{R34}$ & 200 & 176 \\
\bottomrule
\end{tabu} }
\label{tab:nets}
\end{strip}
\quad
\begin{strip}
\centering
\captionof{table}{ Comparison of different abstract layers and training schemes on ResNet-Tiny. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrrrr@{}} \toprule
Train Scheme
& Abstract Layers
& Seconds Per Epoch
& Standard Accuracy \%
& Attacked Accuracy \%
& Verified Robust \%
\\ \midrule
\multirow{2}{*}{Baseline} & None & 105 & 32.9 & 23.7 & 19.6 \\
& FewCombo & 209 & 32.7 & 24.1 & 19.2 \\ \clinelight{1-}
\multirow{2}{*}{InSamp} & None & 106 & 33.6 & 24.7 & 19.3 \\
& FewCombo & 205 & 30 & 23.2 & 20.3 \\ \clinelight{1-}
\multirow{2}{*}{InSampLPA} & None & 107 & 30.1 & 22.5 & 19.2 \\
& FewCombo & 206 & 31.1 & 23 & 20.7 \\ \clinelight{1-}
\multirow{2}{*}{Adv$_3$ISLPA} & None & 161 & 28.2 & 22.2 & 19.2 \\
& FewCombo & 267 & 28.4 & 22.5 & 20.4 \\
\bottomrule
\end{tabu} }
\label{tab:tinyCompareMethod}
\end{strip}
\quad
\clearpage
\section{Further Accuracy Results for MNIST}
\begin{strip}
\centering
\captionof{table}{ Networks for MNIST. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network Name
& Neurons
& Parameters
& Depth (ReLUs)
\\ \midrule
FFNN & 500 & 119910 & 5 \\
ConvSmall & 3604 & 89606 & 3 \\
ConvMed & 4804 & 166406 & 3 \\
ConvBig & 48064 & 1974762 & 6 \\
ConvLargeIBP & 175816 & 5426402 & 6 \\
TruncatedVGG & 151040 & 13109706 & 5 \\
\bottomrule
\end{tabu} }
\label{tab:mnist_nets}
\end{strip}
\begin{strip}
\centering
\captionof{table}{ MNIST with 0.1 }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network & Standard Accuracy & PGD Accuracy & HBox Provability \\ \midrule
FFNN & 93.3\% & 90.8\% & 88.9\% \\
ConvSmall & 97.8\% & 96.2\% & 95.5\% \\
ConvMed & 97.8\% & 96.3\% & 95.5\% \\
ConvBig & 98.5\% & 97.2\% & 95.6\% \\
ConvLargeIBP & 98.7\% & 97.5\% & 95.8\% \\
TruncatedVGG & 98.9\% & 97.7\% & 95.6\% \\
\bottomrule
\end{tabu} }
\label{tab:mnist1}
\end{strip}
\begin{strip}
\centering
\captionof{table}{ MNIST with $\epsilon=0.3$. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network & Standard Accuracy & PGD Accuracy & HBox Provability \\ \midrule
FFNN & 80.2\% & 73.4\% & 62.6\% \\
ConvSmall & 96.9\% & 93.6\% & 89.1\% \\
ConvMed & 96.6\% & 93.1\% & 89.3\% \\
ConvBig & 97.0\% & 95.2\% & 87.8\% \\
ConvLargeIBP & 97.2\% & 95.4\% & 88.8\% \\
TruncatedVGG & 96.5\% & 94.4\% & 87.6\% \\
\bottomrule
\end{tabu} }
\label{tab:mnist3}
\end{strip}
\quad
\clearpage
\section{Further Accuracy Results for CIFAR10}
\begin{strip}
\centering
\captionof{table}{ Networks for CIFAR10. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network Name
& Neurons
& Parameters
& Depth (ReLUs)
\\ \midrule
FFNN & 500 & 348710 & 5 \\
ConvSmall & 4852 & 125318 & 3 \\
ConvMed & 6244 & 214918 & 3 \\
ConvBig & 62464 & 2466858 & 6 \\
ConvLargeIBP & 229576 & 6963554 & 6 \\
TruncatedVGG & 197120 & 17043018 & 5 \\
\bottomrule
\end{tabu} }
\label{tab:cifar_nets}
\end{strip}
\begin{strip}
\centering
\captionof{table}{ CIFAR10 with $\epsilon=3/255$. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network & Standard Accuracy & PGD Accuracy & HBox Provability \\ \midrule
FFNN& 45.1\% & 37.0\% & 33.1\% \\
ConvSmall& 56.1\% & 46.2\% & 42.4\% \\
ConvMed& 56.9\% & 46.6\% & 43.2\% \\
ConvBig& 61.9\% & 51.4\% & 45.0\% \\
ConvLargeIBP& 61.1\% & 51.4\% & 44.5\% \\
TruncatedVGG& 62.3\% & 51.4\% & 45.5\% \\
\bottomrule
\end{tabu} }
\label{tab:cifar3}
\end{strip}
\begin{strip}
\centering
\captionof{table}{ CIFAR10 with $\epsilon=8/255$. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrr@{}} \toprule
Network & Standard Accuracy & PGD Accuracy & HBox Provability \\ \midrule
FFNN& 33.5\% & 23.8\% & 19.0\% \\
ConvSmall& 42.6\% & 30.5\% & 24.9\% \\
ConvMed& 43.6\% & 30.3\% & 24.7\% \\
ConvBig& 46.0\% & 34.2\% & 25.2\% \\
ConvLargeIBP& 46.2\% & 34.7\% & 27.2\% \\
TruncatedVGG& 45.9\% & 34.4\% & 27.0\% \\
\bottomrule
\end{tabu} }
\label{tab:cifar8}
\end{strip}
\quad
\clearpage
\section{Networks and Abstract Layers}
\RecustomVerbatimCommand{\VerbatimInput}{VerbatimInput}%
{fontsize=\scriptsize,
}
\label{nets}
\begin{strip}
\captionof{figure}{ResNet-Tiny, None}
\VerbatimInput{nets/resnetTinyNone.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{ResNet-Tiny, FewCombo}
\VerbatimInput{nets/resnetTinyFewCombo.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{ResNet-Tiny, ManyFixed}
\VerbatimInput{nets/resnetTinyManyFixed.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{SkipNet-18, None}
\VerbatimInput{nets/resnet18none.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{SkipNet-18, Combo}
\VerbatimInput{nets/resnet18Combo.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{ResNet-Large, LargeCombo}
\VerbatimInput{nets/resnetLargeCombo.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{ResNet-18, None}
\VerbatimInput{nets/resnet18.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{ResNet-34, None}
\VerbatimInput{nets/resnet34.txt}
\end{strip}
\quad
\clearpage
\begin{strip}
\captionof{figure}{DenseNet-100, None}
\VerbatimInput{nets/densenet100.txt}
\end{strip}
\quad
\clearpage
\section{Background on Robust Training}\label{Se:background}
We now provide necessary background on training neural networks to be provably robust against adversarial examples. A neural network $N_\theta\colon \mathbb{R}^d \rightarrow \mathbb{R}^k$ maps a $d$-dimensional input to a $k$-dimensional output based on learned weights $\theta$. Let $B_\epsilon(x)$ be the $\ell_\infty$-ball of radius $\epsilon$ around an input $x\in\mathbb{R}^d$. A network $N_\theta$ is called \emph{$\epsilon$-robust} around a point $x \in \mathbb{R}^d$ if $\forall \tilde x \in B_\epsilon(x), N_\theta(\tilde x)_i > N_\theta(\tilde x)_j$ where $i,j\in\{1,\ldots,k\}$ and $j \ne i$. The goal of a robust training procedure is to learn a $\theta$ such that: (i) $N_{\theta}$ assigns the correct class $y_i$ to each training example $x_i$, and (ii) $N_\theta$ is $\epsilon$-robust around each example $x_i$.
\paragraph{Differentiable Abstract Interpretation}
In this work we leverage the differentiable abstract interpretation framework introduced by \citet{diffai}. Here, one verifies neural network robustness and formulates provability losses by constructing sound overapproximations using \emph{abstract interpretation} \cite{CC77}. We now introduce the necessary terms used later in the paper.
\begin{definition}
An \emph{abstract domain} $\mathcal D$ consists of: (a) abstract elements representing a set of concrete points in $\mathcal{P}(\mathbb{R}^p)$ for $p\in\mathbb N$, (b) a \emph{concretization function} $\gamma\colon\mathcal D\to\mathcal{P}(\mathbb{R}^p)$ mapping an abstract element $d \in \mathcal D $ to the set of concrete points it represents, and (c) a set of abstract transformers $T^{\#}$ approximating the concrete transformer $T$ in $\mathcal{P}(\mathbb{R}^p)$, i.e., $T(\gamma(d)) \subseteq \gamma(T^\#(d))$.
\end{definition}
Our approach additionally requires the existence of an abstraction function $\alpha\colon\mathcal{P}(\mathbb{R}^p)\to\mathcal D$ mapping the set of concrete points in $\mathbb{R}^p$ to an abstract element $d \in \mathcal{D}$.
Abstract transformers compose and hence by defining abstract transformers for each basic operation in a neural network $N$, we can derive an overall abstract transformer $T^\#_N$ for the entire $N$.
We apply abstract interpretation to compute $T^\#_N(\alpha(B_\epsilon(x)))$, describing a superset of the possible outputs of $N$ for all inputs in $B_\epsilon(x)$ which can be used to compute an abstract loss as in \citet{diffai}.
\input{zonotope_vs_box}
\paragraph{Hybrid Zonotope Domain}
In this work we use the \emph{Hybrid Zonotope Domain} as described by \citet{diffai}. This domain, introduced originally by \citet{perturbed}, is a generalization of two domains: (i) the simple Box domain (the Box domain is also referred to as interval bound propagation in \citet{ibp}) and, (ii) the base zonotope domain \citet{t1p}. The main benefit of hybrid zonotopes is that they allow for more fine-grained control of analysis precision and performance.
The Hybrid Zonotope domain associates with every computed result $v$ (e.g., a neuron) in the network, a triplet $h_v = \langle (h_C)_v, (h_B)_v, (h_E)_v \rangle$ where $h = \langle h_C, h_V, h_E \rangle$ and is referred to as the hybrid zonotope over all $p$ variables. Here, $(h_C)_v\in\mathbb{R}$ is a center point, $(h_B)_v\in\mathbb{R}_{\ge0}$ is a non-negative \emph{uncorrelated error coefficient} (similar to the Box domain), and $(h_E)_v~\in~\mathbb{R}^m$ are the \emph{correlated error coefficients} the number $m$ of which determine the accuracy of the domain.
These coefficients define an affine function $\widehat{h}$ which is parameterized by the \emph{correlated error terms} $e~\in~[-1,1]^m$ and an \emph{uncorrelated error term} $\beta~\in~[-1,1]^p$:
\[
\widehat{h}(\beta,e) = ({h_1}(\beta,e), \ldots, {h_p}(\beta,e))
\]
where:
\[
\widehat{h_v}(\beta,e) = (h_C)_v + (h_B)_v \cdot \beta_v + (h_E)_v \cdot e
\]
Different variables share the correlated error terms which introduces dependencies between variables making over-approximations more precise than those produced with the Box domain (which does not track dependencies).
Formally, the \emph{concretization} function $\gamma_\textit{H}$ of a hybrid zonotope $h$ is
\[
\gamma_\textit{H}(h) = \{ \widehat{h}(\beta,e) \mid \beta \in [-1,1]^p, e \in [-1,1]^m \}.
\]
A box $b$ can be expressed as a hybrid zonotope $h$ with $h_\textit{C}=b_\textit{C}$ (the box's center), $h_\textit{B}=b_\textit{B}$ (the box's radius) and $m=0$.
Descriptions of our hybrid zonotope transformers (e.g., ReLU), can be found in \citet{diffai}.
\paragraph{Interval concretization}
For operations such as constructing an abstract loss or building heuristics in abstract layers, it is necessary to determine the bounds of a hybrid zonotope $h$ for the $i$-th variable using \emph{interval concretization}:
\[
\iota_\textit{H}(h)_i = [ (h_\textit{C})_i - \epsilon_{\textit{H}}(h)_i ,\; (h_\textit{C})_i + \epsilon_{\textit{H}}(h)_i ]
\]
where $\epsilon_{\textit{H}}(h)_i = (h_\textit{B})_i+\sum_{j = 1}^{m} \left\lvert (h_\textit{E})_{i,j}\right\rvert$ is the \emph{total error}.
\paragraph{Example: Box vs. Hybrid Zonotope}
\figref{transformers} shows an affine transformation on inputs abstracted in both the Box and the Hybrid Zonotope domains. The box representation in \figref{transformers} (a) only contains the center and the uncorrelated error coefficients whereas the hybrid zonotope representation in \figref{transformers} (b) also contains non-zero correlated error coefficients. The affine transformation creates dependency between $x_{3}$ and $x_{4}$ as they are assigned values using affine expressions defined over the same variables $x_{1}$ and $x_{2}$. The Box domain cannot capture this and as a result its output is less precise (contains more concrete points) than the one produced with Hybrid Zonotope domain.
\section{Conclusion}
We introduced a method for training provably robust networks based on the novel concept of abstract layers and a domain specific language for specifying complex training objectives. Our experimental evaluation demonstrates that our approach is effective in training provably robust networks that are an order of magnitude larger than those considered in prior work.
\section{Experimental Setup}
Our system, and the code for reproducing experiments, is publicly available at \href{https://github.com/eth-sri/diffai}{https://github.com/eth-sri/diffai}. We implemented this system using PyTorch-0.4.1. We ran all experiments using GeForce RTX 2080 Ti GPUs. We do not use weight normalization reparameterization and clipping.
To demonstrate the effectiveness of our technique, we evaluate using the most challenging dataset commonly used for provable verification tasks, CIFAR-10 \cite{cifar}. We also use the largest commonly used epsilon, $\epsilon=0.031373 \sim 8/255$. All accuracies and verifiable robustness percentages use the full 10,000 image test set. To augment the dataset and make it easier to learn, random cropping with a padding of 4 was used (this maintains image size) as well as random horizontal flipping.
While IBP and MixTrain presented improvements to robust training, we did not compare against these systems. For IBP, the public code did not contain residual networks though we were able to integrate the proposed training improvements into DiffAI. For MixTain, the codebase was unavailable, and we chose not to perform normalization on the dataset prior to usage. Instead, we add a fixed layer to each network in order to make attack and verification epsilons easier to compare across different systems.
For testing the attacked accuracy, we used MI-FGSM \cite{dong2018boosting} with $\mu=0.8$, 20 iterations, and a step size of $0.0031373$.
To test verifiable robustness, we used DiffAI's built-in Hybrid-Zonotope domain (described earlier).
\subsection{ Evaluated Networks }
A brief overview of the network sizes we evaluate on and their training speed under well performing training schemes, is shown in Table~\ref{tab:simple_nets}. To the best of our knowledge, no other system can train as deep and as large provable networks as our system. In the Appendix, we provide the complete table for all training schemes. Next, we give a brief description of these networks and our training parameters.
\textbf{ResNet-Tiny} is a wide residual network, 12 layers deep, similar to the ResNet described by \citet{wong2018scaling} but with more and wider layers shown in \figref{resnetabslayer}.
It has 50\% more neurons than the largest CIFAR10 network trained via IBP or \citet{mixtrain}. For this network we always use an initial learning rate of $0.001$ with a schedule as used by IBP, and Adam optimization \cite{kingma2014adam}. We also use an $L_2$ regularization constant of $0.01$.
\textbf{SkipNet-18} is an 18 layer deep network with 4 residual connections adapted from PyTorch's vision library.
For this network we always use an initial learning rate of $0.1$ and a schedule where the rate is multiplied by $0.1$ at steps 10, 20, 250 and 300.
Instead of Adam, standard SGD is used. The $L_2$ regularization constant is set to $0.0005$.
\textbf{ResNet-18 and ResNet-34} are 18 and 34 layer (respectively) deep residual networks adapted from PyTorch's vision library.
For these network we always use an initial learning rate of $0.1$ and a similar schedule where the rate is multiplied by $0.1$ at steps 10, 20, 250, 300, and 350, and a batch size of 200.
We also use SGD here, but do not use any regularization.
\textbf{DenseNet-100} is a network with 99 layers, and many residual connections, adapted from the models proposed by \citet{huang2017densely}.
To our knowledge, this is the largest network in terms of depth and the number of neurons to have been provably trained so far.
For this network, we always use an initial learning rate of $0.1$ and a schedule where the rate is multiplied by $0.1$ at steps 20, 50, 200, 250, and 300.
We also use SGD here, and no regularization, and a batch size of 50. Due to its size, it is only verified using the Box domain and not with the hybrid Zonotope domain.
\subsection{ Abstract Layers }
To evaluate the effect of abstract layers, we investigated a variety of configurations for the above networks.
For all of our networks, we use CorrelateAll before the last linear layer during both training and testing.
This has the effect of not causing any loss of accuracy by that linear layer before the concretization of the loss function.
\textbf{None} means that no additional abstract layers are used.
\textbf{FewCombo} for ResNet-Tiny, has a CorrelateMax$_{32}$ layer before the first layer, a DecorrelateMin$_8$ after the first layer,
a DecorrelateMin$_4$ after the first wide residual block, a DecorrelateAll after the second wide residual block,
and a CorrelateMax$_{10}$ before the fully connected layers.
\textbf{ManyFixed} for ResNet-Tiny, has a CorrelateMax$_{32}$ layer before the first layer, a CorrelateFixed$_{16}$ then DecorrelateMin$_{16}$ after the first layer, a CorrelateFixed$_{8}$ then DecorrelateMin$_{8}$ after the first and second wide blocks, and a CorrelateFixed$_{4}$ then DecorrelateMin$_{4}$ after the third wide block, and a DecorrelateAll after the fourth.
\textbf{Combo} for SkipNet-18, has pairs of CorrelateFixed$_{k}$ and DecorrelateMin$_{\lfloor 0.5k \rfloor}$ with $k=20,10,5$ after layers 3, 4 and 5 respectively and
uses DeepLoss after the fourth layer with a weight schedule of Until(90,~Lin(0,~0.2,~50,~40),~0).
\textbf{LargeCombo} for ResNet-Large, has a CorrelateFixed$_{4}$ then DecorrelateMin$_{4}$ before wide residual blocks 1, 2, 3, and 4. Before the wide residual block 5, we place DecorrelateMin$_{2}$. It uses DeepLoss after block 2 and 5, with weight schedules of Until(1,~0,~Lin(0.5,~0,~50,~3)) and Until(24,Lin(0,~0.1,~20,~4),~Lin(0.1,~0,~50)).
A complete description of each network with each abstract layer combination can be found in the Appendix, along with a table showing its performance for every training scheme.
\section{ Experimental Results }
\begin{figure*}[t]
\centering
\begin{subfigure}[b]{0.48\textwidth}
\centering
\plot{0.8}{0.75}{plots/resnet_tiny_acc.tex}
\end{subfigure}
\hspace{0.035\textwidth}%
\begin{subfigure}[b]{0.48\textwidth}
\centering
\plot{0.8}{0.75}{plots/resnet_tiny_hbox.tex}
\end{subfigure}
\vspace{-0.9cm}
\caption{A toy comparison of Accuracy (a) and Verified Robustness (b) of training schemes with similar parameters to Baseline on ResNet-Tiny with and without abstract layers. }
\label{resnet_tiny_graph}
\vspace{-0.6cm}
\end{figure*}
We now demonstrate how our training schemes shown in Table~\ref{tab:methods} (and discussed earlier) can be used to train provably robust networks of sizes an order of magnitude larger than prior work. We additionally show how abstract layers can be used to further push the envelope of provable robustness.
\paragraph{ Comparing Training Schemes and Abstract Layers }
To evaluate which combinations of training schemes and abstract layers provide the best results, we first trained ResNet-Tiny using four training schemes both without abstract layers, and with the abstract layer setup described by FewCombo. We trained each for 400 epochs. The complete results are included in the Appendix, here we show the accuracy and verified robustness in Figure~\ref{resnet_tiny_graph}. We can observe that using abstract layers improves both provable robustness and accuracy when a more complex training scheme is used, and that benefits exist for provable robustness as well. When the objective is only to maximize provable robustness, the training schemes (there are several) which utilize InSamp \emph{and} abstract layers, are optimal. Without abstract layers, inclusion sampling alone appears to have a benefit on accuracy without significant detriment to provable robustness.
To further investigate the effect of abstract layers, we compare the results of training three configurations of abstract layers on ResNet-Tiny.
These can be seen in Table~\ref{tab:tinyCompareAbsLayers}, which shows the results on the test set after training with Adv$_1$ISLPA for 350 epochs.
\begin{table}[]
\centering
\caption{ Comparison of abstract layers on ResNet-Tiny. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrrr@{}} \toprule
Layers
& s/epoch
& Acc\%
& Attck\%
& Ver\%
\\ \midrule
None & 130 & 29.4 & 21.4 & 17.7 \\
FewCombo & 220 & 29.0 & 21.9 & 19.6 \\
ManyFixed & 345 & 28.9 & 21.4 & 19.2 \\
\bottomrule
\end{tabu}}
\label{tab:tinyCompareAbsLayers}
\vspace{-0.5cm}
\end{table}
In this experiment, we can see that ManyFixed actually does not perform as well as FewCombo in any metric, while for verified robustness both networks with abstract layers outperform the network without abstract layers.
While ManyFixed contains many more abstract layers and uses more correlation (thus making it significantly slower to train), the layers in FewCombo have been chosen more selectively. ManyFix contains multiple iterations of CorrelateFix$_k$ immediately before DecorrelateMin$_k$ of decreasing size.
We hypothesize that placing a CorrelateFix immediately before DecorrelateMin diminishes the utility of DecorrelateMin's heuristic.
As uncorrelated error coefficients tend to accumulate and grow while correlated error terms tend to shrink, a saddle point is generated wherein the network would need to maximize error coefficients (and thus decreasing accuracy) for neurons decided previously to be important (which will become correlated) and minimize error coefficients (and thus increasing accuracy) for neurons that will be decided to be unimportant in order to keep the coefficients from switching.
In summary, FewCombo is more efficient and more accurate than ManyFixed and is a key example for the necessity of the ``programming to prove'' methodology.
\paragraph{ Scaling to SkipNet-18 }
In order to build a defense scheme capable of training SkipNet-18 we found it necessary to, at a minimum, use InSamp$_{18}$ training.
Table~\ref{tab:resnet18compareMethod} demonstrates the result of training SkipNet-18 with a variety of training schemes for 400 epochs\footnote{ Baseline was stopped early at 350 epochs as it had clearly failed to train.}.
One can observe that Baseline diverged and while InSamp$_{18}$ without abstract layers or DeepLoss
was able to train a SkipNet-18 model with highest provable robustness, the highest accuracy was obtained using the same training scheme and Combo abstract layers.
The training scheme Adv$_5$IS$_{18}$ achieved a better compromise between provable robustness and accuracy.
\paragraph{ Larger And More Complex Architectures }
\begin{table}
\centering
\caption{ Comparison of training instances on SkipNet-18. }
\vskip 0.1in
{ \footnotesize
\begin{tabu}{@{}rrrrrr@{}} \toprule
Scheme
& Layers
& s/epoch
& Acc\%
& Attck\%
& Ver\%
\\ \midrule
Baseline$_{18}$ & None & 152 & 10.2 & - & - \\
InSamp$_{18}$ & None & 102 & 28.5 & 23.4 & 20.5 \\
Adv$_5$IS$_{18}$ & None & 260 & 28.4 & 23.8 & 21.2 \\
InSamp$_{18}$ & Combo & 342 & 29.5 & 23 & 18.5 \\
\bottomrule
\end{tabu} }
\label{tab:resnet18compareMethod}
\vspace{-0.7cm}
\end{table}
While the majority of our comparisons were performed on ResNet-Tiny and SkipNet-18 we use our schemes to scale to significantly larger networks.
To show this, we designed a larger wide residual network, ResNet-Large, with 70k more neurons than SkipNet-18 and 66 million parameters (more than four times as many as SkipNet-18).
Here, we found it necessary to use a combination of previously evaluated techniques, in addition to two DeepLoss layers.
For this network we used the BiAdv$_L$ training scheme, which constructs abstract boxes from adversarial attacks.
As training this network was significantly more expensive, taking 527 seconds per epoch, we halted training after 100 epochs. The results for this network can be seen in Table~\ref{tab:simple_nets}.
While neither the accuracy nor verifiable robustness are particularly competitive with smaller networks, this is the deepest network (by shortest path from input to output) to have proved robust for a competitive epsilon value and that also comes with non-trivial accuracy. While ResNet-34 and DenseNet-100 have longer paths from input to output, they also have very short paths which means that they could potentially learn a small and provably robust network first as an easier sub-problem.
On deeper networks and larger networks that have more residual connections, we found that abstract layers were not as necessary for training.
Here, we hypothesize that the network can provably learn the smaller network without the residual layers first, and then use them as possible when they do not too
seriously hurt or provability. Table~\ref{tab:simple_nets} also shows the results of training ResNet-18, ResNet-34, and DenseNet-100. The largest, DenseNet-100 is 4.5 times the number of neurons to appear in any other paper at the time of this publication to have a non-trivial number of points
verified to be robust.
\section{Introduction}
\begin{figure*}[!t]
\centering
\includegraphics[width=\textwidth]{out/resnetTinyFewCombo.pdf}
\vspace{-1cm}
\caption{ResNet-Tiny with Abstract Layers. Layers with dark orange on their right include a ReLU, and the sphere with an $r$ in it is also a ReLU. Conv$^k_{s,p}$ is a convolution with a kernel size of $k$, a stride of $s$, and a padding $p$, This net has 311k neurons and 18m parameters.}
\label{Fi:resnetabslayer}
\end{figure*}
Recent work has shown that neural networks are susceptible to adversarial attacks \citet{adversarialDiscovery}: small, imperceptible perturbations which cause the network to misclassify the input. This has led to growing interest in training procedures to produce robust networks \cite{gu2014towards, zheng2016improving}, new adversarial attacks \cite{papernot2016limitations, moosavi2017universal, xiao2018generating, athalye2017synthesizing, evtimov2017robust}, as well as defenses which use these attacks during training \cite{goodfellow2014explaining, tramer2017ensemble, yuan2017adversarial, huang2015learning, madry2017towards, dong2018boosting}. While networks defended using attacks may be experimentally robust, it has been shown that in general more data is needed \cite{schmidt2018adversarially} and that this style of training is sample inefficient \cite{khoury2018geometry}.
Further, while detecting advarsarial attacks \cite{rozsa2016adversarial, bhagoji2017dimensionality, feinman2017detecting, grosse2017statistical} appears a promising contingency, \citet{CarliniW17} found that many of these techniques were insufficient.
The list of possible attacks is extensive (e.g., \cite{akhtar2018threat}) and constantly expanding, motivating the need for methods which can ensure that neural networks are \emph{provably} robust against these attacks. \citet{katz2017reluplex} developed a neural network verification system based on SMT solvers, however it only scaled to small networks. \citet{ai2} introduced abstract interpretation \cite{CC77} as a method for verifying much larger networks. However, as the size of networks that verification systems could handle increased, it became clear that verifiable robustness could be significantly improved by employing provably robust training. The first attempts for training provably robust networks \cite{raghunathan2018certified, kolter2017provable, dvijotham2018training} scaled to small sizes with at most two convolutional layers. Later work saw the development of two methods: (i) the dual-method in the case of \citet{wong2018scaling}, and (ii) differentiable abstract interpretation introduced by \citet{diffai} (DiffAI) and used in \citet{ibp} (IBP) and \citet{mixtrain} (MixTrain). While these pushed the boundary in terms of provable verified robustness and network size (with networks of up to $230$k neurons), scaling a provable defense to a full ImageNet sized network remains a key challenge. In particular, ResNet-34 represents an important milestone to achieving this goal as it is the smallest residual network proposed by \citet{resnet18}.
To address this challenge, we introduce a novel approach to robustness, one where the network itself is designed to be provably robust similar to attempts which aim to design networks to be experimentally robust by construction \cite{cisse2017parseval, sabour2017dynamic}. In particular, we introduce the paradigm of ``programming to prove'', long known to the programming languages community \cite{lf, fastver}, as a technique for creating provably robust architectures. We show how to integrate this idea with DiffAI, resulting in a system than can train a provably robust ResNet-34 (a smaller resnet is shown in \figref{resnetabslayer}).
\textbf{Main Contributions} Our main contributions are:
\begin{itemize}
\item {The concept of an abstract layer which has no effect on standard network execution but improves provably robust learning.}
\item {A domain specific language (DSL) for specifying sophisticated training objectives.}
\item {A complete implementation and evaluation of our method. Our experimental results indicate the approach can achieve provable robustness for networks an order of magnitude larger than prior work.}
\end{itemize}
\section{Specifying Training Objectives}
We introduce a domain specific language (DSL) for specifying training objectives and parameter scheduling. For example, it can capture the training loss and scheduling proposed by \citet{ibp}~(IBP).
\subsection{Specifying Schedules}
We describe two constructors for describing a schedule used to adjust the values of training parameters (e.g., size of the balls around images used in training) dynamically, leading to improved results. A schedule is a function which uses the current training time step corresponding to the fractional number of epochs completed (e.g., completing 25000 of the 50000 examples from the first epoch on CIFAR10 would provide a time-step value of 0.5). The constructors below describe how to (recursively) build this function.
\textbf{Lin($a, b, m, n$)} specifies the parameter value should be the start value $a$ for the first $m$ epochs. Then, linear parameter annealing between start value $a$ and end value $b$ over $n$ epochs should be used to determine the parameter value.
\textbf{Until($m,s_{1},s_{2}$)} specifies that the first-schedule constructor $s_{1}$ will be used to determine the parameter value until $m$ epochs are reached, and then the second-schedule $s_{2}$ will be used but will be given the time with $m$ epochs subtracted.
\subsection{Specifying Training Goals}
We next describe the goal-constructors for describing how to build the abstraction function and training loss. At timestep $s$ of training a network $N$ on an example $o$ with a target label ($t$), for each goal constructor ($g$) in the abstract syntax tree (AST), we build: (i) an abstraction function ($\alpha^g$) which takes the input box for training specified by the lower ($l$) and upper bound ($u$) vectors as input and returns an abstract element $d=\alpha^g(l,u)$, and (ii) a loss function loss$^g$($d,t$). Before training, the user provides the goal which is parsed into an AST ($g_U$) and a training width ($\epsilon$). The loss used to train is, for a dataset with values in the range of $a$ to $b$:
\[
\text{loss}^{g_U}(T^\#_N(\alpha^{g_U}(\text{max}(o - \epsilon, a),\text{min}(o + \epsilon, b))), t).
\]
\tabref{goal_constructors} formalizes our goal constructors, described below:
\textbf{Point} returns the center of the input box specified by $l$ and $u$ for training and uses the cross entropy loss.
\textbf{Normal} returns a point sampled from the normal distribution around the input box (via the function \textsc{normal\_rand}) and clipped to that box. It uses the cross entropy loss.
\textbf{IFGSM$_k$} uses $k$ iterations of FGSM to find an adversarial example in the input box and uses the corresponding point for training. The cross entropy loss is used.
\textbf{Box} returns a hybrid zonotope abstract element with no correlated error coefficients abstracting the box between the lower ($l$) and upper-bound ($u$).
The loss concretizes the abstract element $d$ and returns the maximum cross entropy loss on a point in the concretization \cite{ibp}.
\textbf{Mix($g_1, g_2, \lambda$)} takes two goal constructors $g_1$ and $g_2$ and a float $\lambda$ as inputs. The abstract element used for training is the cartesian product of the abstractions $d_{1}$ and $d_{2}$ of the input box in $g_1$ and $g_{2}$. The loss linearly combines the loss functions from $g_{1}$ and $g_{2}$ using $\lambda$.
\textbf{Sub($\delta, g$)} takes a float $\delta$ and a goal constructor $g$ as inputs. It computes the abstract element for training by calling the abstraction function $\alpha^{g}$ of the constructor $g$ using the new bounds $l'= 0.5 \cdot (u + l - \delta \cdot (u - l))$ and $u'= 0.5 \cdot (u + l + \delta \cdot (u - l))$. The insight behind this constructor is to use training elements constructed from boxes that overlap with the input box. The output loss is the loss from $g$.
\textbf{Sample($\delta, g_{s}, g_{t}$)} uses Sub($1 - \delta$, $g_{s}$) to find a point $b$, by taking the center of the returned training element,
and passes $l' = b - 0.5 \cdot \delta(u - l)$ and $u' = b + 0.5 \cdot \delta(u - l)$ to the abstraction function $\alpha^{g_{t}}$ of $g_{t}$. The output loss is from $g_{t}$. This is visualized in \figref{samples}.
\textbf{BiSample($g_{1},g_{2}$)} uses the abstract element $\alpha^{g_{1}}(l,u,t)$ for the input in $g_{1}$ and computes ${l'= 0.5 \cdot (l+u) - |(\textsc{UB}(\alpha^{g_{1}}(l,u)))- 0.5 \cdot (l+u)|}$ and $~u'=0.5 \cdot (l+u) + |(\textsc{UB}(\alpha^{g_{1}}(l,u)))- 0.5 \cdot (l+u)|~$. The output element is $\alpha^{g_{2}}(l',u',t)$. It uses $g_{2}$'s loss.
We note that floating point parameters such as $\delta$ and $\lambda$ used in the constructors above can use scheduling constructors.
\subsection{Example Training Schemes}
Earlier, we observed that our training DSL could be used to specify complex training schemes such as IBP. In particular, IBP uses linear parameter annealing on both the epsilon used in training and the weight of the provability loss, together with a cross entropy based loss function instead of the hinge loss designed by \citet{diffai}~(DiffAI). Using this customization, IBP improves on the results of DiffAI while still using the interval domain for training as done by DiffAI. In our DSL, this training scheme could be written as: Mix(Point, Sub(Lin(0,1,150,10), Box), Lin(0,0.5, 150,10)).
In Table~\ref{tab:methods}, we show a number of example training schemes captured as expressions in our DSL. We found the following schemes to be particularly useful (these are evaluated next):
\textbf{InSamp} interpolates between training on random points in the $L_\infty$ $\epsilon$-Ball and an abstract box surrounding the example. The idea is that it might be easier to train a network on a point to be $\epsilon$ robust if instead of it being only $\epsilon - \mu$ robust already, every point around in the $\epsilon$ box around it is also $\epsilon - \mu$ robust for small $\mu>0$.
\textbf{InSampLPA} is the same as InSamp, but also uses scheduling for the size of the sampling domain, by surrounding it with Sub. The idea is that using the sampling domain is a kind of adversarial training, and it might be easier for the network to learn the standard dataset first.
|
1,314,259,995,059 | arxiv | \section{Introduction}
The aim of relativistic heavy-ion collision experiments is to detect and understand the properties of the bulk QCD matter created in these collisions. Past experiments have shown that the ability to perform measurements differentially with respect to the identity of the final state hadrons is crucial to a full understanding of the evolution and dynamics of the produced fireball \cite{BraunMunzinger199543,BraunMunzinger19961,KolbRapp}. Such measurements have also revealed anomalies challenging the detailed modelling of the collision \cite{Adler:2003fk,Adams:2006wk}. The ALICE experiment was designed with the goal of maximising the particle identification capability using transition and Cherenkov radiation detectors, calorimetry and, in particular for the analysis presented here, identifying the most abundant species of charged hadrons over a wide range of \pT at mid-rapidity using $\dEdx$ and time-of-flight techniques \cite{1748-0221-3-08-S08002}. The excellent tracking down to low \pT also allows the reconstruction of weakly decaying neutral strange particles via their charged decay modes.
\section{Experiment}
The ALICE central barrel performs tracking of charged particles in a 0.5 T magnetic field using a Time Projection Chamber (TPC) and Inner Tracking System (ITS). Particles with large enough \pT pass through the outer wall of the TPC and can go on to hit a surrounding Time-of-Flight detector (TOF).
\PbPb events were collected using a minimum bias trigger and several million events are used in this analysis. The \PbPb data sample can be separated into centrality bins using the event-wise multiplicity in the VZERO forward scintillator detectors in combination with a Monte Carlo Glauber study \cite{Aamodt:2011qy}. Charged particle identification is achieved using two techniques. The specific energy loss, $\dEdx$, can be calculated for each track from the ionisation in the TPC gas (or ITS silicon) and compared to theoretical values from the Bethe-Bloch formula which predicts the regions in momentum where $\pi$, \mbox{$\mathrm {K}$}, and p signals can be separated. This separation between species can be used at low \pT~but near to the minimum of $\dEdx$ all three species are merged. In this range however the TOF can separate these species so a combined \pT~spectrum can be extracted \cite{Aamodt:2010uq}. In this analysis the primary yield of charged particles is reported; that is those emerging directly from the collision or the decay of short-lived resonances and not the charged particles from the weak decay of strange hadrons nor secondaries from the material. These are both excluded using the distribution of the distance of closest approach to the primary interaction vertex, which can be fitted to a template obtained from Monte Carlo events, where the origin of the particle is known. The decay of neutral strange particles decaying into charged daughters; $\Lambda\rightarrow p \mbox{$\mathrm {\pi^{-}}$}$ and $\mbox{$\mathrm {K^0_S}$} \rightarrow \mbox{$\mathrm {\pi^{+}}$} \mbox{$\mathrm {\pi^{-}}$}$, can be reconstructed and the invariant mass distributions used for identification. The analysis follows the method used for \pp~collisions but tighter cuts are made to further reduce the combinatorial background in \PbPb events \cite{Aamodt2011Strange}. For both neutral and charged particle analyses the spectra are corrected using efficiencies from Monte Carlo events having equivalent mean multiplicities.
\section{Results}
\subsection{Charged Particles}
The combined \pT~spectra are obtained for each of eight centrality bins for \mbox{$\mathrm {\pi^{\pm}}$}, \mbox{$\mathrm {K^{\pm}}$}, p and \pbar~and are shown, for positive particles only, in figure \ref{thespectra}. The most noticeable features are: the dramatic change in the shape of the spectra going from $\pi$ through \mbox{$\mathrm {K}$}~to p; and the shifting of the most probable values to higher $\pT$, particularly for p but also for \mbox{$\mathrm {K}$}. A direct comparison of the most central spectra to \AuAu data at \sqrtsNN~= 200 \gev~is made in figure \ref{spectraRHIC}. This shows how the spectra at LHC energy are much less steeply falling. A first attempt at quantifying the changes using a parameterised blast wave function \cite{Schnedermann:1993uq} was made. The resulting fit parameters for the freeze-out temperature, $T_{\mathrm{fo}}$, and mean transverse velocity, \mbox{$\mathrm {\beta}$}, are shown in figure \ref{bwave} as 1-$\sigma$ contours for each centrality class. Fits ranges 0.3--1.0 \gmom, 0.2--1.5 \gmom~and 0.3--3.0 \gmom~were used for $\pi$, \mbox{$\mathrm {K}$}~p respectively in order to avoid the region where a hard component of the spectum might be expected and, at low $\pT$, to avoid a strong contribution of resonances to $\pi$. There appears to be a larger \mbox{$\mathrm {\beta}$}, corresponding to stronger flow, than observed by STAR at lower energy \cite{Adams2005102}. $T_{\mathrm{fo}}$~is very sensitive to the fit range so any change with respect to RHIC needs further study. A blast wave was also fitted to each individual spectrum in order to obtain $\pT$-integrated yields, including the unmeasured part. These can be used to form the ratios p/$\pi$ and $\mbox{$\mathrm {K}$}/\pi$ for each centrality bin. The ratio p/$\pi$ is almost constant with centrality and is consistent with similar measurements in \AuAu collisions at RHIC \cite{Adler:2004fj}. The ratio $\mbox{$\mathrm {K}$}/\pi$ shows a small rise from \pp~and peripheral collisions to the most central collisions and is also consistent with previous lower energy data \cite{Abelev:2009kx}.
\begin{figure}
\begin{minipage}{\columnwidth}
\centering
\includegraphics[height=.27\textheight]{2011-May-20-spectrafit_pion_plus-mod}
\caption{The centrality-selected \pT spectra for identified \mbox{$\mathrm {\pi^{+}}$} (top) \mbox{$\mathrm {K^{+}}$} (middle) and p (bottom). Fits are to a parameterised blast wave.}
\includegraphics[height=.27\textheight]{2011-May-20-spectrafit_kaon_plus-mod}
\includegraphics[height=.27\textheight]{2011-May-20-spectrafit_proton_plus-mod}
\label{thespectra}
\end{minipage}
\end{figure}
\begin{figure}
\includegraphics[height=.30\textheight]{2011-Jun-30-central_neg_rhic_alice}
\caption{The \pT spectra for \mbox{$\mathrm {\pi^{-}}$}, \mbox{$\mathrm {K^0_S}$}, \mbox{$\mathrm {K^{-}}$}, and \pbar~for the most central \PbPb (0-5\%) collisions (solid markers) plotted with those measured in \sqrtsNN~= 200 \gev \AuAu collisions (open symbols.)}
\label{spectraRHIC}
\end{figure}
\begin{figure}
\includegraphics[height=.28\textheight]{2011-Jun-14-contour_PbPb-mod}
\caption{1-$\sigma$ contours in the T-\mbox{$\mathrm {\beta}$}~plane from a simultaneous fit of a parameterised blast wave function to the \mbox{$\mathrm {\pi^{\pm}}$}, \mbox{$\mathrm {K}$}, and p \pT spectra for various centrality classes. \PbPb collisions from the ALICE experiment in red, \AuAu collisions from the STAR experiment in blue. Most central data lie to the right.}
\label{bwave}
\end{figure}
\subsection{Neutral Particles}
The \pT spectra of \mbox{$\mathrm {\Lambda}$}~and \mbox{$\mathrm {K^0_S}$}~have also been extracted for each centrality bin. As the systematic uncertainties on the efficiency correction are still under study the preliminary spectra themselves are not yet ready. However the study reveals that the ratio of the efficiencies for each particle, as a function of \pT, is rather stable with respect to changing the centrality of the collision. In particular in the \pT range 2.5-5.5 \gmom~the variation of the efficiency ratio between the most central and the most peripheral centrality selections is below 2\%. This allows the \mbox{$\mathrm {\Lambda}$} / \mbox{$\mathrm {K^0_S}$}~ratio to be calculated with an estimated systematic uncertainty of 10\% and the resulting curves for each centrality are shown in figure \ref{LKRatio} (upper). Also shown are the ratios in \pp~collisions at $\sqrt{s} = 0.9$ and 7 \tev~\cite{Aamodt2011Strange}. The \pp~data demonstrate that in the \tev~range the maximum value of the ratio is almost constant and it is reasonable to assume that \pp~collisions at $\sqrt{s} = 2.76$ \tev~would show the same maximum. Taking this \pp~baseline the ratio is observed to have a maximum which rises strongly going to peripheral and then to central events, with a total increase up to a factor of three. The value of \pT at which the maximum is reached is also increasing by several hundred \mmom. The data are compared to a similar measurement previously made by STAR in figure \ref{LKRatio} (lower) \cite{0954-3899-32-12-S13}. To facilitate the comparison the lower energy data were scaled by the \mbox{$\mathrm {\overline{\Lambda}}$}/\mbox{$\mathrm {\Lambda}$}~ratio measured for each centrality \cite{Adams:2007lr}, assuming it is constant in $p_{\mathrm{T}}$, because it has previously been noted that there is a $\sqrt{s}$-dependence of the ratio \cite{Aggarwal:2011lr}, presumably due to the change in the baryo-chemical potential. The ALICE data were not scaled in this way because the anti-baryon/baryon ratio in LHC collisions is very close to one \cite{Aamodt:2010yq}. The ratio in peripheral 60-80\% collisions is very similar in shape for the two collision systems with only a small change in the magnitude. In the most central 0-10\% however the shape is quite different with the enhancement of the \mbox{$\mathrm {\Lambda}$}~extending to a much larger \pT in the higher energy data. This is qualitatively in agreement with some predictions \cite{springerlink:10.1140/epjcd/s2004-04-026-6}.
\begin{figure}
\begin{minipage}{\columnwidth}
\centering
\includegraphics[height=.29\textheight]{2011-Sep-15-ratios_ALICE-mod}
\includegraphics[height=.273\textheight]{2011-Sep-15-ratios_ALICE_STAR}
\caption{(Upper panel.) The ratio of \mbox{$\mathrm {\Lambda}$}~to \mbox{$\mathrm {K^0_S}$}~as a function of \pT for five centrality classes in \PbPb collisions. Also shown the same ratio in \pp~collisions at two energies. (Lower panel.) A comparison between the ratio measured by ALICE (solid markers) and STAR (open symbols) for selected centralities.}
\label{LKRatio}
\end{minipage}
\end{figure}
\section{Conclusions}
\PbPb collisions at \sqrtsNN~= 2.76 \tev~reveal a number of similarities to \AuAu collisions at RHIC; the ratios of the yields are the same within the experimental uncertainties, the spectra are compatible with a strong collective motion which increases going to more central collisions and there is a growth of the \mbox{$\mathrm {\Lambda}$} / \mbox{$\mathrm {K^0_S}$}~ratio in the \pT region 2-4 \gmom, also with centrality. There are however some notable differences; the \pT spectra are much flatter giving a transverse flow velocity in a blast wave parameterisation 10\% larger than that in \sqrtsNN~= 200 \gev \AuAu collisions and the enhanced baryon-to-meson ratio extends to a \pT of around 6 \gmom. This may imply that the influence of particles participating in the collective dynamics of the system extends to a higher \pT than has previously been observed.
\bibliographystyle{aipproc}
|
1,314,259,995,060 | arxiv | \section{Introduction}
Electronic structure and topology in materials dictate their classification. For example, a system with (without) a Fermi surface is a metal (an insulator). The systems with the Fermi surface have gapless low-lying excitations, that are usually not stable against perturbations, leading to various states of matter through phase transitions. Superconductivity arises in metals when there is Cooper-pair instability. Interacting with phonons~\cite{bardeen1957a, bardeen1957b}, electrons acquire the attractive force to form the bosonic pairs. The pairs, in particular, are robust in any directions. Namely, the two-particle pairing wavefunction has zero angular momentum, resulting in an isotropic superconducting gap. The presence of the Fermi surface is the necessary condition for Cooper instability. In other words, the gapless states are the nurturing cradle for superconductivity
That elegant scenario was challenged by the discovery of the high transition temperature superconductivity in copper oxides in 1986~\cite{bednorz1986}. The new superconductivity appears in families of compounds that mobile charge carriers need to be doped. Taking La$_{\text{2-}x}$Sr$_{x}$CuO$_{4}$ as an example, at $x$=0, the compound is an antiferromagnetic insulator with an antiferromagnetic transition temperature up to near 300 K~\cite{vaknin1987}. Increment of $x$ introduces the mobile holes as well as the gapless states in the $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$ directions~\cite{zhen2003}. They form the arc segments, dubbed as Fermi arcs. A closed Fermi surface can be completed at the doping level around 19\%~\cite{taillefer2007}. While the Fermi arcs grow in the $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$ directions, the directions along $(\pm\pi,0)$ and $(0,\pm\pi)$ remain gapped.
The coexistence of the gapless and gapped states blurs the boundary of metals and insulators. So far, the origin of the Fermi arc formation still stays at the numerical evidence~\cite{imada2009}. The clear physical mechanism is lacking. Meanwhile, in those very strange electronic structures, superconductivity arises. Two properties remain true. Namely, electrons form pairs, and most of their properties are well described by BCS wavefunction of $d$-wave pairing symmetry. However, the $d$-wave symmetry that electrons choose to pair is $d_{x^2-y^2}$, which is very odd. The $d_{x^2-y^2}$ state has the maximum amplitude in the $(\pm\pi,0)$ and $(0,\pm\pi)$ directions and are zero in the $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$ directions. That choice violates our general understanding. Based on Cooper, electrons pair up in the momentum space where there are gapless states. Then, $d_{xy}$ should be favored. The special choice of $d_{x^2-y^2}$ symmetry implies that the electron pairs in cuprates are \emph{not} Cooper pairs.
In this paper, we provide the physical mechanism of Fermi arc formation and the pairing mechanism that explains why $d_{x^2-y^2}$ symmetry is favored. We will show that the correlated electrons are in general not homogeneous. Namely, domains with Fermi arcs and the ones without can coexist. Likewise, $d_{x^2-y^2}$, $d_{x^2-y^2}\pm id_{xy}$, and $s$ symmetries of pairing can coexist in different domains \emph{in the same system}, as well. The physics measured depends on the experimental techniques. For examples, in the angle-resolved photoemission experiments, light often shines on the area of multiple domains. In this case, the photo-electrons from the gapless states always come out, if present. This technique often claims the $d_{x^2-y^2}$ symmetry. For the scanning tunneling microscopy experiments and other techniques of local probe, they can claim all cases, depending on which domain they measure~\cite{imada2013}. Different from Cooper pairs, the superconductivity in the strongly correlated electron system can arise \emph{without} a Fermi surface, {\it i.e.} fully gapped, because electrons pair up in the real space, not in the momentum space.
\section{Fermi arc formation}
The renowned electronic structure in cuprates, the Fermi arcs, is the combinations of the two mechanisms: pseudogap formation and quantum fluctuations of the correlation degrees of freedom. The first one is due to the interaction of electrons through exchanging massive gauge bosons of \emph{imaginary} wave vectors. The second one is the interaction between electrons and the gauge field of \emph{real} wave vectors. In comparison to the electrodynamics, the first one is similar to the Coulomb interaction of exchanging virtual photons, and the second one is the additional interaction with electromagnetic waves. The first mechanism results in the repulsive interaction between electrons and opens a gap. The second mechanism creates an attractive potential that cancels the repulsive interaction and closes the gap in some circumstances. Based on the form factors of the attractive potential, it closes the gap in an anisotropic manner, leading to the arc segments in the momentum space. In this section, we discuss the effect of the second mechanism. We will show that the attractive potentials have two types: $s$-type and $d$-type. The $s$-type remains the pseudogap to be a full gap, and the $d$-type closes the gap in the nodal directions.
Let us begin with a brief introduction of the pseudogap formation. The antiferromagnetic fluctuation in cuprates leads to divergent phenomena that seemingly do not have any relations~\cite{mendels1989, julien2011, keimer2012, kapitulnik2008}. Assuming its $XY$ nature at finite doping, it is disordered in high temperature and acquires a quasi-long-ranged order through the Kosterlitz-Thouless transition~\cite{chern2014}. In addition, the overlap of the electron spin wavefunction introduces the massless gauge field strongly coupling with the antiferromagnetic fluctuation, that provides the mass of the gauge field and becomes the longitudinal mode of the gauge field~\cite{wen1989, chern2014}. Electrons weakly coupling with the massive gauge field open a gap in the excitation spectrum, which is the origin of the pseudogap formation.
In the pseudogap phase, the quantum fluctuation of the anfiferromagnetic fluctuation can occur~\cite{lee2018}. It generates a propagating $\vec{E}$ field. In the domain that the fluctuation stably exists and assuming the $x$ direction to be the longitudinal direction, the propagating $\vec{E}$ field forms a standing wave given by
\begin{eqnarray}
&&\vec{E}=-\frac{A_0M^2_0}{k_L}\sin(k_Lx)\cos(\omega_L t)\hat{x}, \label{e-field}
\end{eqnarray}
where $A_0$ is the strength of the quantum fluctuation, $M_0$ is the mass of the gauge field, and $\omega^2_L=k_L^2+M_0^2$. The electric field in Eq.~(\ref{e-field}) drives the electrons and form the charge density modulations~\cite{lee2018}. Taking the onset temperature of the density modulation $T_{\text{CDW}}$, $A_0$ can be estimated as $A_0=\sqrt{\frac{16\pi^2k_BT_{\text{CDW}}}{4\pi^2+1}}$.
In general, the antiferromagnetic fluctuation can be equally excited in the $\hat{y}$ direction of the square lattice. In order to match the locations of the nodes in the $x$ direction, there are only two possibilities.
\begin{eqnarray}
&&\vec{E}_1(x,y)=-\frac{A_0M^2_0}{k_L}\cos(\omega_L t)[\sin(k_Lx)\hat{x}+\sin(k_Ly)\hat{y}], \nonumber\\
&&\vec{E}_2 (x,y)=-\frac{A_0M^2_0}{k_L}\cos(\omega_L t)[\sin(k_Lx)\hat{x}-\sin(k_Ly)\hat{y}],
\end{eqnarray}
that are equivalent to the potentials
\begin{eqnarray}
&&V_1(x,y)= -V_0\cos(\omega_L t)[\cos(k_Lx)+\cos(k_Ly)], \nonumber \\
&&V_2 (x,y)= -V_0\cos(\omega_L t)[\cos(k_Lx)-\cos(k_Ly)],
\end{eqnarray}
where $V_0=\sqrt{\frac{6m^*\Delta k_BT_{\text{CDW}}}{(4\pi^2+1)k_L}}$, $m^*$ is the effective electron mass, and $\Delta$ is the pseudogap magnitude. Taking $\Delta\sim 40$ meV~\cite{zhen2014} and $T_{\text{CDW}}\sim$ 100 K, $k_L$ for the 4 lattice spacings modulation, $V_0$ = 0.363 eV. The classical dynamics of the electrons in the presence of the quantum antiferromagnetic fluctuation is already given in Ref.~\cite{lee2018} in details. In addition, the rapidly oscillating $\vec{E}$ field drives the electrons to move around the nodes, which effectively create the attractive potentials~\cite{lee2018}
\begin{eqnarray}
&&\bar{V}_1 (x,y)= -V_0|\cos(k_Lx)+\cos(k_Ly)|, \nonumber \\
&&\bar{V}_2 (x,y)= V_0|\cos(k_Lx)-\cos(k_Ly)|,
\end{eqnarray}
as shown in Fig.~\ref{potential}. The potential minimum indicate the location of the nodes. In $\bar{V}_1 (x,y)$, the nodes are the local minimum. In $\bar{V}_2 (x,y)$, the nodes are surrounded by the lines of minimum.
\begin{figure}[htb]
\includegraphics[width=0.48\textwidth]{swave.pdf}
\includegraphics[width=0.48\textwidth]{dwave.pdf}
\caption{(Color online) The effective potential of the antiferromagnetic fluctuations. Brown lines indicate the lattice of copper atoms. The blue empty circle indicates the hole site. The blue solid circles indicate the electron sites. (a) $\bar{V}_1(x,y)$. (b) $\bar{V}_2(x,y)$.}\label{potential}
\end{figure}
Now, let us consider the doping where one of the electrons at the nodes is taken away as shown in Fig.~(\ref{potential}). The electrons at the anti-nodes, shown as the solid blue circles in Fig.~(\ref{potential}), then move toward each other and oscillate around the nodes~\cite{lee2018}. The attractive force in the potential is larger than the repulsive force that generates the pseudogap, which can be estimated as the following. The attractive force is roughly estimated from the depth of the effective potential 0.363 eV with a length scale $\frac{1}{4M_0}$, and the repulsive force can be done from the 40 meV pseudogap with a length scale $\frac{1}{M_0}$. Therefore, the attractive force cancels the repulsive force and closes the pseudogap dynamically. In the case of potential $\bar{V}_2$, two electrons can scatter off in any directions, as they collide. If they scatter off to the anti-nodes, they repeat the motion again. If they scatter off along the lines of minimum, the nodal quasiparticles appear, since there is no confining potential. The direction of the lines of the minimum is nothing but that of $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$, that provides the reason why Fermi arcs form in those directions. On the other hand, in the case of the potential $\bar{V}_1$, electrons always scatter off back to the same position and repeat the oscillations. There is no gapless excitation. Finally, we emphasize that the current scheme is different from the excitation process between bands where there is no force to cancel the repulsive many-body interaction. In conclusion, the prevailing antiferromagnetic fluctuation and its form factor dictate the Fermi arc formation in the $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$ directions.
Both $\bar{V}_1$ and $\bar{V}_2$ are doping dependent. $V_0$ is the function of pseudogap magnitude $\Delta$ and the onset temperature of the density modulation $T_{\text{CDW}}$. As the doping increases, $\Delta$ decreases. The depth of the potential decreases with the doping, and electrons can scatter off in wider directions, leading to the growth of the Fermi arcs. This mechanism implies that the potentials can be still finite when the Fermi arcs complete Fermi surfaces in both $\bar{V}_1$ and $\bar{V}_2$ cases. Namely, the completion of the Fermi surfaces does not implies that $\Delta$ is zero~\cite{zhen2014}.
The existence of the potential $\bar{V}_1$ and $\bar{V}_2$ should be of equal probability. Namely, they can coexist in different domains in the same system. The pseudogap structures are inhomogeneous in the real space and can be classified into two categories. (1) full pseudogap (from $\bar{V}_1$), and (2) Fermi-arced pseudogap (from $\bar{V}_2$). Although the domain sizes can not be estimated, we believe that physics measured highly depends on the experimental tools. For example, the photo-emission experiments and the transport measurements are sensitive to the gapless excitations. As their probes cover multi-domains, the physics from $\bar{V}_2$ dominates the signals. For the tools of local probes, especially STM, physics of both pseudogap structures should be faithfully claimed.
\begin{figure}[htb]
\includegraphics[width=0.48\textwidth]{band.pdf}
\caption{(Color online) The equipotential contours of the band in Eq.~(\ref{band}). The points of $(\pm\frac{\pi}{2},\pm\frac{\pi}{2})$ locate at 0.128 of the equipotential. }\label{contour}
\end{figure}
Next, let us construct the band structure observed in the photo-emission experiments. With no proof, we find that the bands can be phenomenologically described by $\mathscr{E}(k_x, k_y) = \mathscr{E}_1+\frac{1}{2}(\mathscr{E}_2+\mathscr{E}_3)$, where
\begin{eqnarray}
&&\mathscr{E}_1 = -2t(\cos k_x + \cos k_y), \nonumber \\
&&\mathscr{E}_2 = V_0|\cos \frac{k_x}{2} + \cos \frac{k_y}{2}|, \nonumber \\
&&\mathscr{E}_3 = -V_0|\cos \frac{k_x}{2} - \cos \frac{k_y}{2}|, \label{band}
\end{eqnarray}
$t$ = 0.25 eV, and $V_0$ = 0.363 eV. Eq.~(\ref{band}) satisfies the dispersion relation in the Fig.(1) of Ref.\cite{zhen2014}. The first term is from the tight-binding model in the square lattice. The second and third terms are from the antiferromagnetic fluctuation, where the periodicity is double in both $x$ and $y$ directions. It is not surprising that $\bar{V}_1$ and $\bar{V}_2$ contribute to the band structure. The energy contours of equipotentials are given in Fig.~(\ref{contour}). The introduction of the $\mathscr{E}_2$ and $\mathscr{E}_3$, namely the quantum fluctuation of the antiferromagnetic fluctuation, changes the electron-like band to the hole-like band. Further doping can change back to the electron-like band again. The change of the topology of the band structure can be observed in the measurements of the Hall coefficient~\cite{taillefer2011}.
\section{Superconductivity}
Superconductivity arises at finite doping where the Fermi-arcs appear. It sounds reasonable for the superconductivity in the BCS theory. However, superconductivity in correlated electron systems is not the case. Despite the reason given in the introduction, the superconductivity diminishes after Fermi surfaces complete. Namely, the superconductivity in cuprates does not favor gapless states. Furthermore, the electron pairs can be $d$-wave symmetry. Where does the angular momentum come from?
BCS theory conveys two concepts: (1) Cooper's instability due to the interaction with phonons (2) BCS wavefunction and its reduced Hamiltonian. Although the first one does not apply to the correlated electron systems, the second one remains true. Therefore, the question of the superconductivity in correlated electron systems is to find the new instability. Namely, electrons need an attractive force to pair up, especially in both full and Fermi-arced pseudogap phases.
The attractive force to pair electrons is nothing but the \emph{quantum fluctuated} antiferromagnetic fluctuation. Different from phonons, it is not the quantized entity of the antiferromagnetic fluctuation that is gapped of the energy $M_0$. It is the quantum fluctuation of the amplitude $A_0$ given in Eq.~(\ref{e-field}). The formation of the density modulation is the evidence of the attractive force to push electrons closer. The formation of the Fermi arcs serves the evidence that the attractive force is larger than the repulsive force.
However, the attractive force is not in the direction of separation as the pairs stay in the nodal lines. It is perpendicular to the separation. Without the attractive force, the pairs break up and become the single-particle excitations. Therefore, the form factor of the attractive potential in Fig.~(\ref{potential}b) indicates that the $d_{x^2-y^2}$, not the $d_{xy}$ or the $s$, symmetry is favored. On the contrary, in the full pseudogap domains, electron pairs should have a full superconducting gap.
Another important ingredient of the electron pairs is the finite angular momentum. To have it, a twist force must exist. As the antiferromagnetic fluctuation is the longitudinal mode of the gauge field, there is also the transverse mode, that is spin-Berry's phase fluctuation~\cite{lee2018}. The $B$ field of the spin-Berry's phase provides the twist force for the electron pairs in the way similar to the signal of the time-reversal-symmetry breaking fluctuation observed in the polar Kerr rotation experiments~\cite{lee2018}. Without the quantum fluctuation of the $B$ field, $s$-wave paired electrons can be found in the full pseudogap domains but still not in the Fermi-arced domains. With the quantum fluctuation of the $B$ field, $d_{x^2-y^2}\pm id_{xy}$-wave paired electrons can not be excluded from the theory in the full gap domains. However, in the real situations, quantum fluctuations of the antiferromagnetic fluctuation and the spin-Berry's phase usually occur simultaneously. The pairings of finite angular momentum are more likely than the $s$-wave.
Since the pairing is due to the attractive potential, the upper limit of the superconducting gap can be estimated. The effective attractive potential energy is the combination of the repulsive energy and the attractive energy, given by $\frac{\Delta}{4}-V_0$, where $\Delta$ is the pseudogap amplitude. In the superconducting phase, the effective attractive potential energy is roughly the sum of the binding energy and the rotational kinetic energy. The binding energy of the pair is $-2\Delta_s$, where $\Delta_s$ is the superconducting gap. The rotational kinetic energy of a $d$-wave pair is roughly $2\frac{\hbar^2 l(l+1)}{2m^*r^2}$. Taking $l=1$ and $r$ as one lattice constant, the rotational kinetic energy is 0.243 eV. Estimating through the equation, $2\Delta_s+2\frac{\hbar^2 l(l+1)}{2m^*r^2} = V_0-\frac{\Delta}{4}$, the upper limit of the superconducting gap $\Delta_s$ is obtained as 55 meV.
Since the two kinds of the pseudogap domains can co-exist, the superconducting domains of different pairing symmetry can co-exist, as well, which is the main prediction of the theory. The effective Hamiltonian can be given by
\begin{eqnarray}
\mathscr{H}_j = -\frac{1}{2m^*}[\frac{\vec{\nabla}}{i}-g\vec{a}(x,y,t)]^2+V_j(x,y,t), \label{hami}
\end{eqnarray}
where $j=1, 2$ for the full and the Fermi-arced gap domains respectively, and $\vec{a}$ is the vector potential of the $B$ field fluctuation. The interaction terms can be generated by using the standard perturbation techniques. For future directions, nonetheless, minimal models for different domains can be constructed in the lattice.
\section{Conclusion}
In general, insulators are believed to be stable against perturbations. The superconductivity in the many-body insulators demonstrates the exceptional instability that seems to be as universal as the BCS instability in metals~\cite{sidorov2002, hosono2008}. Quantum correlation, originating from the concept of wavefunction overlaps, plays the most important role. Both the spatial wavefunction overlap (antiferromagnetic fluctuation) and the spin wavefunction overlap (spin Berry's phase fluctuation) dominate the electronic properties. As the $d_{x^2-y^2}$ symmetry of the electron pairs reveals the most information in the mechanism of the superconductivity, the physics in the full pseudogap domains have been significantly ignored~\cite{yeh2001, feng2016}. In particular, a new $d_{x^2-y^2}\pm id_{xy}$ symmetry of pairing is possible in those domains. The new realm needs to be explored.
\section{Acknowledgement}
This work is funded by MOST 106-2112-M-002-007-MY3 of Taiwan.
|
1,314,259,995,061 | arxiv |
\section{Introduction}
Optimally relaying the signal from a source to a destination for enhancing the network coverage and improving the throughput rate is an active research area~\cite{Kariminezhad2017}. Furthermore, relaying is the only communication means in disaster scenarios if the direct source-destination link is not available. Exploiting a relay for improving communication throughput rate raises several questions to be answered. For instance, how should the relay process the received signal before dispatching it to the destination? Now, relay can receive a signal from the source, process it and transmit it towards the destination in a successive manner. This type of relaying technique is known as half-duplex relaying. However, while receiving a signal at a certain time instant, a relay can simultaneously transmit the previously received signals. This technique is known as full-duplex relaying~\cite{Bliss2007}.
As a consequence of transmitting and receiving at a common resource unit, the relay is confronted with self-interference (SI). Note that, full-duplex relaying potentially doubles the throughput rate of the communication compared to the half-duplex counterpart, only if the SI is removed completely at the relay input. By physically isolating the transmitter and receiver frontends of the relay, a significant portion of SI can be reduced~\cite{Sabharwal2014,Shankar2012}. Moreover, analog and/or digital signal processing at the relay input can be utilized to cancel a portion of SI~\cite{Shankar20122, Bliss2012, Eltawil2015,Vogt2018}. This can be realized if the estimate of the SI channel state information (CSI) can be obtained at the relay.
By exploiting multiple antennas at the relay, the throughput rate from the source to destination can be improved~\cite{Fan2007,Mo2012}. By using multiple antennas at the relay provides the feasibility of SI cancellation spatially by beamforming techniques such that the impact of SI can be mitigated~\cite{Riihonen2011,Lioliou2010}. For instance, zero-forcing (ZF) beamforming forces the SI to zero at the relay input, however, it is not an optimal scheme in weak SI regimes if the relay is equipped with a limited number of antennas. In contrast~\cite{Ngo2014,Kariminezhad2017SCC} investigate a relaying setup with massive number of antennas at the relay. Here, they show the optimality of ZF process at the relay with very large antenna array.
Further, exploiting multiple antennas at the source and destination can provide the opportunity for improving the communication throughput rate. In a MIMO multi-hop system, the authors in~\cite{Suraweera2014} investigate a amplify-and-forward (AF) relay, where the precoder at the relay and the decoder at the destination is jointly optimized for maximizing the source-destination throughput rate. However, the authors have assumed a single stream transmission, which is not always optimal. The authors in~\cite{Jeong2017} consider a MIMO decode-and-forward (DF) relaying scheme with energy harvesting demands at the relay provided by the source. These works mainly assume the availability of the SI channel for optimal MIMO pre- and post-processing tasks, where the RSI is simply treated as noise with estimated statistical moments. However, these estimates can not be guaranteed to be valid for all applications and scenarios. Hence, the study of a robust design becomes crucially important.
Robust transceiver design against the worst-case RSI channel provides the worst-case threshold for switching between HD and FD operating modes in hybrid relay systems. The authors in~\cite{Taghizadeh2014} investigate a robust design for multi-user full-duplex relaying with multi-antenna DF relay. In that work, the sources and destinations are equipped with single antennas. Moreover, the authors in~\cite{Cirik2016} investigate a robust transceiver design for multi-user MIMO systems for maximizing the weighted sum-rate of the network.
\textit{Contribution:} We consider a DF relay with multiple antennas at the source, relay and destination. In this system, we allow multi-stream beamforming for throughput rate maximization. The achievable rate of the DF full-duplex relaying is cast as a non-convex optimization problem. We propose an efficient algorithm to solve this problem in polynomial time. Finally, the transmit signal covariances at the source and the relay are designed efficiently to be robust against the worst-case RSI channel.
\vspace*{-0.5cm}
\section{System Model}
We consider a communication setup from a source equipped with $M$ antennas to a destination with $N$ antennas. The reliable communication is assumed to be only feasible by means of a relay with $K_\mathrm{t}$ transmitter and $K_\mathrm{r}$ receiver antennas at the output and input frontends, respectively. The received signals at the relay and destination are given by
\begin{align}
\mathbf{y}_\mathrm{r}&= \mathbf{H}_1\mathbf{x}_\mathrm{s}+\kappa \mathbf{H}_\mathrm{r}\mathbf{x}_\mathrm{r}+\mathbf{n}_\mathrm{r},\\
\mathbf{y}_\mathrm{d}&= \mathbf{H}_2\mathbf{x}_\mathrm{r}+\mathbf{n}_\mathrm{d},
\end{align}
respectively, where $\kappa\in\{0,1\}$. Notice that, $\kappa=0$ coincides with HD relaying and $\kappa=1$ denotes FD relaying. The transmit signal of the source is denoted by $\mathbf{x}_\mathrm{s}\in\mathbb{C}^{M}$ with the covariance matrix $\mathbf{Q}_\mathrm{s}=\mathbb{E}\{\mathbf{x}_\mathrm{s}\mathbf{x}^H_\mathrm{s}\}$, and the transmit signal of the relay is represented by $\mathbf{x}_\mathrm{r}\in\mathbb{C}^{K_{\mathrm{t}}}$, with the covariance matrix $\mathbf{Q}_\mathrm{r}=\mathbb{E}\{\mathbf{x}_\mathrm{r}\mathbf{x}^H_\mathrm{r}\}$. The additive noise vectors at the relay and destination are denoted by $\mathbf{n}_\mathrm{r}\in\mathbb{C}^{K_\mathrm{r}}$ and $\mathbf{n}_\mathrm{d}\in\mathbb{C}^N$, respectively, which are assumed to follow zero-mean Gaussian distributions with identity covariance matrices. The source-relay channel is represented by $\mathbf{H}_1\in\mathbb{C}^{K_\mathrm{t}\times M}$ and the relay-destination channel is denoted by $\mathbf{H}_2\in\mathbb{C}^{N\times K_\mathrm{r}}$, see~\figurename{ \ref{fig:SystemModel}}. These channels are assumed to be perfectly known. Furthermore, the self-interference (SI) channel at the relay is represented by $\mathbf{H}_\mathrm{r}$, which is assumed to be known only imperfectly. In what follows, we present the achievable throughput rates for the HD and FD relaying. In the next section, we start with the HD relay, where $\kappa=0$.
\begin{figure}
\centering
\tikzset{every picture/.style={scale=.95}, every node/.style={scale=0.7}}%
\input{SystemModel}
\vspace*{0.4cm}
\caption{System model of a full-duplex relay}
\label{fig:SystemModel}
\end{figure}
\vspace*{-0.5cm}
\section{Achievable Rate (Half-Duplex Relay)}
Assuming that the relay applies the decode-and-forward strategy, we consider a simple half-duplex relay where the source and the relay transmit in two subsequent time instances. We formulate the achievable rate between the source and destination nodes as
\begin{align}
R^{\mathrm{HD}}=\frac{1}{2}\min(R^{\mathrm{HD}}_\mathrm{sr},R^{\mathrm{HD}}_\mathrm{rd}),
\end{align}
in which $R^{\mathrm{HD}}_\mathrm{sr}$ and $R^{\mathrm{HD}}_\mathrm{rd}$ are the achievable rates on the source-relay and relay-destination links, respectively. Notice that, in half-duplex relaying the source and relay transmissions are conducted in separate channel uses. These rates are given by
\begin{align}
R^{\mathrm{HD}}_\mathrm{sr}&=\log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1\big|,\\
R^{\mathrm{HD}}_\mathrm{rd}&=\log_2\big|\mathbf{I}_{N}+\mathbf{H}_2\mathbf{Q}_\mathrm{r}\mathbf{H}^H_2\big|.
\end{align}
Now, the transmit covariance matrices $\mathbf{Q}_\mathrm{s}\in\mathbb{H}^{M\times M}$ and $\mathbf{Q}_\mathrm{r}\in\mathbb{H}^{K_\mathrm{t}\times K_\mathrm{t}}$ are optimized by maximizing the achievable rate from the source to the destination. Here, the convex cone of hermitian positive semidefinite matrices of dimensions $M\times M$ and $K_\mathrm{t}\times K_\mathrm{t}$ are represented by $\mathbb{H}^{M\times M}$ and $\mathbb{H}^{K_\mathrm{t}\times K_\mathrm{t}}$, respectively. The throughput rate maximization problem is cast as
\begin{subequations}\label{P:HDa}
\begin{align}
\max_{\mathbf{Q}_\mathrm{s},\mathbf{Q}_\mathrm{r}}\quad & \frac{1}{2}\min(R^{\mathrm{HD}}_\mathrm{sr},R^{\mathrm{HD}}_\mathrm{rd})\tag{\ref{P:HDa}}\\
\text{subject to}\quad\quad & \mathrm{Tr}(\mathbf{Q}_\mathrm{s})\leq P_\mathrm{s},\label{P:HDa:ConsA}\\
&\mathrm{Tr}(\mathbf{Q}_\mathrm{r})\leq P_\mathrm{r},\label{P:HDa:ConsB}
\end{align}
\end{subequations}
in which the constraints~\eqref{P:HDa:ConsA} and~\eqref{P:HDa:ConsB} are the transmit power constraints and $P_\mathrm{s}$ and $P_\mathrm{r}$ are the transmit power budgets at the source and relay, respectively. Let $\mathbf{Q}_\mathrm{s}=\mathbf{U}_\mathrm{s}\boldsymbol\Gamma_\mathrm{s}\mathbf{U}^H_\mathrm{s}$ and $\mathbf{Q}_\mathrm{r}=\mathbf{U}_\mathrm{r}\boldsymbol\Gamma_\mathrm{r}\mathbf{U}^H_\mathrm{r}$. Since, $R^{\mathrm{HD}}_\mathrm{sr}$ and $R^{\mathrm{HD}}_\mathrm{rd}$ are concave functions of $\mathbf{Q}_\mathrm{s}$ and $\mathbf{Q}_\mathrm{r}$, the solutions are given as~\cite{Telatar99}
\begin{align}
\mathbf{Q}^{\star}_\mathrm{s}={\bf U}^{\star}_\mathrm{s}{\bf\Gamma}^{\star}_\mathrm{s}{\bf U}^{{\star}^H}_\mathrm{s},\ \text{with}\ {\bf U}^\star_\mathrm{s}=\mathbf{R}_1,\label{eq:QsA}\\
\mathbf{Q}^{\star}_\mathrm{r}={\bf U}^{\star}_\mathrm{r}{\bf\Gamma}^{\star}_\mathrm{r}{\bf U}^{{\star}^H}_\mathrm{r},\ \text{with}\ {\bf U}^\star_\mathrm{r}=\mathbf{R}_2.\label{eq:QrA}
\end{align}
Notice that ${\bf R}_1$ and ${\bf R}_2$ correspond to the right singular matrices of ${\bf H}_1$ and ${\bf H}_2$, respectively, with
$
{\bf H}_1={\bf L}_1{\bf \Sigma}_1{\bf R}^H_1,$ and $
{\bf H}_2={\bf L}_2{\bf \Sigma}_2{\bf R}^H_2.
$
The diagonal matrices ${\bf\Gamma}^{\star}_\mathrm{s}$ and ${\bf\Gamma}^{\star}_\mathrm{r}$ are determined by the water-filling algorithm~\cite{Telatar99} as
\begin{align}
{\bf\Gamma}^{\star}_\mathrm{s}&=\left(\tau_\mathrm{s}\mathbf{I}-({\bf\Sigma}^{H}_1{\bf\Sigma}_1)^{-1} \right)^{+},\\
{\bf\Gamma}^{\star}_\mathrm{r}&=\left(\tau_\mathrm{r}\mathbf{I}-({\bf\Sigma}^{H}_2{\bf\Sigma}_2)^{-1} \right)^{+},
\end{align}
respectively. The water levels $\tau_\mathrm{s}$ and $\tau_\mathrm{r}$ are chosen such that they satisfy the power constraint, i.e., $\mathrm{Tr}\left(\tau_\mathrm{s}\mathbf{I}-({\bf\Sigma}_1{\bf\Sigma}^{H}_1)^{-1} \right)=P_\mathrm{s}$, and $\mathrm{Tr}\left(\tau_\mathrm{r}\mathbf{I}-({\bf\Sigma}_2{\bf\Sigma}^{H}_2)^{-1} \right)=P_\mathrm{r}$. Next, we determine the maximum achievable rate for the full-duplex relay.
\vspace*{-0.2cm}
\section{Achievable Rate (Full-Duplex Relay)}
We assume that an estimate of the self-interference (SI) channel $\mathbf{H}_\mathrm{r}$ is available at the relay denoted by $\tilde{\mathbf{H}}_\mathrm{r}$. Hence, the unknown channel estimation error (residual self-interference channel) represented by $\bar{\mathbf{H}}_\mathrm{r}$ is given as
\begin{align}
\bar{\mathbf{H}}_\mathrm{r}=\mathbf{H}_\mathrm{r}-\tilde{\mathbf{H}}_\mathrm{r}.
\end{align}
In this work, we assume that some portion of the SI is canceled based on the available estimate $\tilde{\mathbf{H}}_\mathrm{r}$, such that only a residual self-interference (RSI) remains. Here, we represent this portion by $\bar{\mathbf{H}}_\mathrm{r}\mathbf{x}_\mathrm{r}$. Considering a full-duplex decode-and-forward relay, the following rate is achievable
\begin{align}
R^{\mathrm{FD}}=\min(R^{\mathrm{FD}}_\mathrm{sr},R^{\mathrm{FD}}_\mathrm{rd}),
\end{align}
in which
\begin{align}
R^{\mathrm{FD}}_\mathrm{sr}&=\log_2\frac{\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1+\bar{\mathbf{H}}_\mathrm{r}\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\big|}{\big|\mathbf{I}_{K_\mathrm{r}}+\bar{\mathbf{H}}_\mathrm{r}\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\big|},\label{eq:FD_srA}\\
R^{\mathrm{FD}}_\mathrm{rd}&=\log_2\big|\mathbf{I}_{N}+\mathbf{H}_2\mathbf{Q}_\mathrm{r}\mathbf{H}^H_2\big|.
\end{align}
Notice that, with perfect SI channel state information, the SI could be completely removed from the received signal at the relay input-frontend. This doubles the achievable rate correspond to the half-duplex relay. Assuming that a RSI remain uncanceled, a robust transceiver against the worst-case RSI channel is required which is formulated as an optimization problem as follows
\begin{subequations}\label{P:FDa}
\begin{align}
\max_{\mathbf{Q}_\mathrm{s},\mathbf{Q}_\mathrm{r}}\ & \min_{\bar{\mathbf{H}}_\mathrm{r}}\quad \min(R^{\mathrm{FD}}_\mathrm{sr},R^{\mathrm{FD}}_\mathrm{rd})\tag{\ref{P:FDa}}\\
\text{subject to}\quad\quad\quad & \mathrm{Tr}(\mathbf{Q}_\mathrm{s})\leq P_\mathrm{s},\label{P:FDa:ConsA}\\
&\mathrm{Tr}(\mathbf{Q}_\mathrm{r})\leq P_\mathrm{r},\label{P:FDa:ConsB}\\
&\mathrm{Tr}(\bar{\mathbf{H}}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r})\leq T,\label{P:FDa:ConsC}
\end{align}
\end{subequations}
in which the throughput rate of the worst-case RSI channel is maximized. In constraint~\eqref{P:FDa:ConsC}, $T$ represents the RSI channel uncertainty bound. It is important to notice that, without this constraint, the worst-case achievable throughput rate is zero.
Next, we discuss the optimization problem~\eqref{P:FDa} in details.
\vspace*{-0.5cm}
\subsection{Robust Transceiver}
We can reformulate~\eqref{eq:FD_srA} as
\begin{align}
R^{\mathrm{FD}}_\mathrm{sr}&=\log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1\left(\mathbf{I}+\bar{\mathbf{H}}_\mathrm{r}\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\right)^{-1}\big|.\label{eq:FD_srB}
\end{align}
Now, by applying the binomial inverse theorem~\cite{Henderson1981}, we arrive at
\begin{align}
R^{\mathrm{FD}}_\mathrm{sr}&=\log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1-\nonumber\\
&\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1\bar{\mathbf{H}}_\mathrm{r}\left(\mathbf{I}_{K_\mathrm{t}}+\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\bar{\mathbf{H}}_\mathrm{r}\right)^{-1}\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\big|.\label{eq:FD_srC}
\end{align}
Next, we determine the optimal subspace of the transmit signal from the source and relay.
The RSI channel at the relay is decomposed as
\begin{align}
\bar{\mathbf{H}}_\mathrm{r}&={\bf L}_\mathrm{r}{\bf\Sigma}_\mathrm{r}{\bf R}^{H}_\mathrm{r}.
\end{align}
Interestingly, as given in~\eqref{eq:QsA}, with ${\bf U}_\mathrm{s}={\bf R}_1$ the amount of information extraction at the relay from the source is maximized in a SI-free case. Moreover, with ${\bf U}_\mathrm{r}={\bf R}_2$, the amount of information extraction is maximized at the destination independent of the SI, see~\eqref{eq:QrA}. Notice that, the negative term in the log-determinant expression in~\eqref{eq:FD_srC} is controlled by $\mathbf{Q}_\mathrm{s}$, $\mathbf{Q}_\mathrm{r}$ and $\bar{\mathbf{H}}_\mathrm{r}$. Interestingly, in the log-determinant expression in~\eqref{eq:FD_srC}, the subspace of the negative term can span the subspace of the positive term $\mathbf{H}_1\mathbf{Q}_\mathrm{s}\mathbf{H}^H_1$ by the worst-case RSI channel. This way the worst-case RSI channel can have the most harmful effect on the received signal at the relay.
In what follows, we determine the left and right singular matrices of the worst-case RSI channel, i.e., $\mathbf{L}_\mathrm{r}$ and $\mathbf{R}_\mathrm{r}$. First, we define the maximum number of independent parallel streams that could be supported by the source-relay and relay-destination links.
\begin{definition}
The degrees-of-freedom $\mathrm{(DoF)}$ supported by the source-relay link and the $\mathrm{DoF}$ of the relay-destination links are defined as
$
\mathrm{DoF}_\mathrm{sr}=\min\{M,K_\mathrm{r}\},$ and
$\mathrm{DoF}_\mathrm{rd}=\min\{K_\mathrm{t},N\},
$
respectively.
\end{definition}
The following lemma proves useful for the rest of the paper.
\begin{proposition}
Let $\mathrm{DoF}_\mathrm{rd}\geq\mathrm{DoF}_\mathrm{sr}$. Then, the achievable throughput rate from the source to the relay with the worst-case RSI with uncertainty bound $T$ is given by
\begin{subequations}\label{Lemma1}
\begin{align}
\min_{\sigma_{\mathrm{r}_i},\ \forall i}\ &\sum_{i=1}^{\min(M,K_\mathrm{r})} \log_2{\left(1+
\frac{\sigma_{1_i}^2\gamma_{\mathrm{s}_i}}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}\right)}\tag{\ref{Lemma1}}\\
\mathrm{s.t.}\quad&\sum_{i=1}^{\min(M,K_\mathrm{r})}\sigma^2_{\mathrm{r}_i}\leq T,
\end{align}
\end{subequations}
where $\sigma_{1_i}$, $\sigma_{\mathrm{r}_i}$, $\gamma_{\mathrm{s}_i}$ and $\gamma_{\mathrm{r}_i}$ are the $i$-th diagonal elements of $\boldsymbol{\Sigma}_1$ ,$\boldsymbol{\Sigma}_\mathrm{r}$, $\boldsymbol{\Gamma}_\mathrm{s}$ ,$\boldsymbol{\Gamma}_\mathrm{r}$, respectively.
\begin{proof}
First, we determine the subspace of the worst-case RSI channel. The left and right singular matrices of the worst-case RSI channel, i.e, ${\bf L}_\mathrm{r}$ and ${\bf R}_\mathrm{r}$, should project the transmit signal from the relay output on the dimensions spanned by the received signal from the source at the relay input. Let the left singular matrix of the RSI channel be ${\bf L}_\mathrm{r}= {\bf L}_1$. Then, the expression in~\eqref{eq:FD_srC} is formulated as
\begin{align}
\bar{R}^{\mathrm{FD}}_\mathrm{sr}&=\log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{L}_1\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1\mathbf{L}^H_1-\nonumber\\
&\mathbf{L}_1\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1\mathbf{\Sigma}_\mathrm{r}{\bf R}^{H}_\mathrm{r}\left(\mathbf{I}_{K_\mathrm{t}}+\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\bar{\mathbf{H}}_\mathrm{r}\right)^{-1}\mathbf{Q}_\mathrm{r}{\bf R}_\mathrm{r}{\bf\Sigma}^H_\mathrm{r}\mathbf{L}^H_1\big|\nonumber\\
&= \log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1-\nonumber\\
&\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1\mathbf{\Sigma}_\mathrm{r}{\bf R}^{H}_\mathrm{r}\left(\mathbf{I}_{K_\mathrm{t}}+\mathbf{Q}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r}\bar{\mathbf{H}}_\mathrm{r}\right)^{-1}\mathbf{Q}_\mathrm{r}{\bf R}_\mathrm{r}{\bf\Sigma}^H_\mathrm{r}\big|.\label{eq:FD_srD}
\end{align}
Notice that the optimal relay transmit covariance matrix $\mathbf{Q}_\mathrm{r}$ lies in the subspace spanned by the left singular matrices of $\mathbf{H}_2$, i.e, $\mathbf{Q}_\mathrm{r}={\bf R}_2{\bf\Gamma}_\mathrm{r}{\bf R}^H_2$, see~\eqref{eq:QrA}.
Now, by ${\bf R}_\mathrm{r}={\bf R}_2$, the negative term in the log-determinant expression spans the subspace of the positive term. Then, the expression in~\eqref{eq:FD_srD} is reformulated as
\begin{align}
\bar{R}^{\mathrm{FD}}_\mathrm{sr}&= \log_2\big|\mathbf{I}_{K_\mathrm{r}}+\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1-\nonumber\\
&\mathbf{\Sigma}_1\mathbf{\Gamma}_\mathrm{s}\mathbf{\Sigma}^{H}_1\mathbf{\Sigma}_\mathrm{r}\left(\mathbf{I}_{K_\mathrm{t}}+\mathbf{\Gamma}_\mathrm{r}\mathbf{\Sigma}^H_\mathrm{r}\mathbf{\Sigma}_\mathrm{r}\right)^{-1}\mathbf{\Gamma}_\mathrm{r} {\bf\Sigma}^H_\mathrm{r}\big|\nonumber\\
&=\sum_{i=1} \log_2{(1+\sigma_{1_i}^2\gamma_{\mathrm{s}_i}-\sigma_{1_i}^2\sigma_{\mathrm{r}_i}^2\gamma_{\mathrm{s}_i}\gamma_{\mathrm{r}_i}(1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2)^{-1})}\nonumber\\
&=\sum_{i=1}^{\min(M,K_\mathrm{r})} \log_2{\left(1+
\frac{\sigma_{1_i}^2\gamma_{\mathrm{s}_i}}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}\right)}.
\label{eq:FD_srE}
\end{align}
Now, having $\mathrm{DoF}_\mathrm{rd}\geq\mathrm{DoF}_\mathrm{sr}$, the throughput rate over the source-relay link with the worst-case RSI is given by the following optimization problem,
\begin{align}
\min_{\sigma_{\mathrm{r}_i},\ \forall i}\quad \bar{R}^{\mathrm{FD}}_\mathrm{sr}\quad
\mathrm{s.t.}\sum_{i=1}^{\min(M,K_\mathrm{r})}\sigma^2_{\mathrm{r}_i}\leq T,
\end{align}
\end{proof}
\end{proposition}
\begin{figure}
\tikzset{every picture/.style={scale=.95}, every node/.style={scale=0.8}}%
\input{WorstCaseA}
\caption{Comparison between the average worst-case achievable rate (WC) and the upperbound (UB). Hypothetically, the singular values of the RSI channel is given. We consider equal number of antennas at all transmitters and receivers. Solid curves: worst-case achievable rates, dashed curves: upperbounds.}
\label{fig:WorstCaseA}
\end{figure}
This shows that, for the worst-case SI channel, the achievable rate of the source-relay link is the sum of achievable rates of $\min(M,K_\mathrm{r})$ data-streams. Notice that by ${\bf L}_\mathrm{r}={\bf L}_1$ and ${\bf R}_\mathrm{r}={\bf R}_2$, the singular directions of the worst-case RSI channel align along the singular direction of the source-relay link. However, this is the worst-case RSI, only if $\mathrm{DoF}_\mathrm{rd}\geq\mathrm{DoF}_\mathrm{sr}$. Otherwise, the singular direction of the worst-case RSI should not align along the singular directions of the source-relay link. Rather, they should lay on the subspace spanned by all singular directions of the source-relay link. Hypothetically, given the singular values of the RSI channel, the comparison between the worst-case achievable rate and the upperbound is depicted in~\figurename{\ref{fig:WorstCaseA}} as a function of $\frac{T}{P}$ for $P=P_\mathrm{s}=P_\mathrm{r}=5$. Notice that, given the singular values of the RSI channel, the worst-case rate is a function of the worst-case singular directions of the RSI channel, which are ${\bf L}_\mathrm{r}={\bf L}_1$ and ${\bf R}_\mathrm{r}={\bf R}_2$. Furthermore, note that, the rates upperbound are for complete RSI channel knowledge.
\begin{remark}
The function $\frac{1}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}$ is a monotonically decaying function in both $\gamma_{\mathrm{r}_i}$ and $\sigma_{\mathrm{r}_i}$. Hence, for either $\sigma_{\mathrm{r}_i}=0,\ \forall i$ or $\gamma_{\mathrm{r}_i}=0,\ \forall i$, the achievable rate of the source-relay link is maximized. However, notice that the case with $\sigma_{\mathrm{r}_i}=0,\ \forall i$, represents zero RSI, hence, it is of our interest. However, with $\gamma_{\mathrm{r}_i}=0,\ \forall i$, the relay-destination link throughput rate is zero, hence, this case results in zero source-destination throughput rate. From~\eqref{eq:FD_srE}, allocating less power to the $i$-th stream of the relay-destination link, i.e., $\gamma_{\mathrm{r}_i},\ \forall i\in\{1,\min(M,K_\mathrm{r})\}$, results in an improved achievable rate of the source-relay link, while assuming the worst-case SI channel.
\end{remark}
Now, the remaining question is, how much information bits can be reliably transfered from the source to the destination, with the worst-case ${\sigma_{\mathrm{r}_i},\ \forall i}$? By determining the left- and right singular matrices of $\mathbf{Q}_\mathrm{s}$, $\mathbf{Q}_\mathrm{r}$ and $\mathbf{H}_\mathrm{r}$, the optimization problem~\eqref{P:FDa} becomes equivalent to
\begin{subequations}\label{P:FDb}
\begin{align}
\max_{\boldsymbol{\gamma}_\mathrm{s},\boldsymbol{\gamma}_\mathrm{r}}\quad & \min_{\boldsymbol{\sigma}_{\mathrm{r}}}\quad \min(\bar{R}^{\mathrm{FD}}_\mathrm{sr},R^{\mathrm{FD}}_\mathrm{rd})\tag{\ref{P:FDb}}\\
\text{subject to}\quad & \|\boldsymbol{\gamma}_\mathrm{s}\|_1\leq P_\mathrm{s},\label{P:FDb:ConsA}\\
&\|\boldsymbol{\gamma}_\mathrm{r}\|_1\leq P_\mathrm{r},\label{P:FDb:ConsB}\\
&\|\boldsymbol{\sigma}^2_\mathrm{r}\|_1\leq T,\label{P:FDb:ConsC}
\end{align}
\end{subequations}
in which, $\boldsymbol{\gamma}_\mathrm{s}=[\gamma_{\mathrm{s}_1},\cdots,\gamma_{\mathrm{s}_{\min(M,K_\mathrm{r}})}]$ and $\boldsymbol{\gamma}_\mathrm{r}=[\gamma_{\mathrm{r}_1},\cdots,\gamma_{\mathrm{r}_{\min(N,K_\mathrm{t}})}]$ and $\boldsymbol{\sigma}_\mathrm{r}=[\sigma_{\mathrm{r}_1},\cdots,\sigma_{\mathrm{r}_{\min(K_\mathrm{t},K_\mathrm{r}})}]$. The optimization problem~\eqref{P:FDb} can be reformulated as
\begin{subequations}\label{P:FDc}
\begin{align}
\max_{R,\boldsymbol{\gamma}_\mathrm{s},\boldsymbol{\gamma}_\mathrm{r}}\quad & \min_{\boldsymbol{\sigma}_{\mathrm{r}}}\quad R\tag{\ref{P:FDc}}\\
\text{s.t.}\quad & R\leq\nonumber\\ &\sum_{i=1}^{\min(M,K_\mathrm{r})} \log_2{\left(1+
\frac{\sigma_{1_i}^2\gamma_{\mathrm{s}_i}}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}\right)},\\
& R\leq\sum_{j=1}^{\min(N,K_\mathrm{t})} \log_2{(1+\sigma_{2_j}^2\gamma_{\mathrm{r}_j})},\\
&\eqref{P:FDb:ConsA}-\eqref{P:FDb:ConsC},\nonumber
\end{align}
\end{subequations}
For the purpose of simplification for further discussions, let $M<~K_\mathrm{r}=K_\mathrm{t}<N$. Then, the number of independent data-streams supported by the source-relay and relay-destination links are limited to $M$ and $K_\mathrm{t}$, respectively. Then, the vector of singular values of the worst-case SI channel, i.e., $\boldsymbol{\sigma}_\mathrm{r}$, is composed of $M$ non-zero values and $K_\mathrm{t}-M$ zero values. Interestingly, the robust power allocation at the relay maximizes the information rate of the $i$-th stream by maximizing the term $\frac{1}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}$, however the worst-case SI channel for the $i$-th stream represents a $\sigma_{\mathrm{r}_i}$ that minimizes the term $\frac{1}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}$. Now, define $\bar{\sigma}^2_{1_i}=
\frac{\sigma^2_{1_i}}{1+\gamma_{\mathrm{r}_i}\sigma_{\mathrm{r}_i}^2}$. Then, the optimization problem~\eqref{P:FDc} is reformulated as
\begin{subequations}\label{P:FDd}
\begin{align}
\max_{R,\boldsymbol{\gamma}_\mathrm{s},\boldsymbol{\gamma}_\mathrm{r}}\quad & \min_{\boldsymbol{\sigma}_{\mathrm{r}}}\quad R\tag{\ref{P:FDd}}\\
\text{s.t.}\quad & R\leq\sum_{i=1}^{M} \log_2{\left(1+\bar{\sigma}_{1_i}^2\gamma_{\mathrm{s}_i}\right)},\label{P:FDd:ConsA}\\
& R\leq\sum_{j=1}^{K_\mathrm{t}} \log_2{(1+\sigma_{2_j}^2\gamma_{\mathrm{r}_j})},\label{P:FDd:ConsB}\\
&\eqref{P:FDb:ConsA}-\eqref{P:FDb:ConsC}.\nonumber
\end{align}
\end{subequations}
The objective of this problem is an affine function. Moreover, the constraints~\eqref{P:FDd:ConsB} and~\eqref{P:FDb:ConsA}-\eqref{P:FDb:ConsC} are convex constraints. However, the constraint~\eqref{P:FDd:ConsA} is a non-convex constraint, since the RHS is not necessarily a concave function of the optimization parameters. Hence, the problem is a non-convex optimization problem. Furthermore, notice that the minimum of the objective function w.r.t. $\boldsymbol{\sigma}_{\mathrm{r}}$ is maximized w.r.t. $R,\boldsymbol{\gamma}_\mathrm{s},\boldsymbol{\gamma}_\mathrm{r}$. Next, we propose an efficient algorithm for obtaining a stationary point.
\begin{algorithm}
\caption{Robust Transceiver Design}
\begin{algorithmic}[1]
\State Set outer-iteration index $l=2$
\State Define $R^{(2)}=1$ and $R^{(1)}=0$
\State Define $\bar{P}_\mathrm{r}^{(l)}=P_\mathrm{r}$
\State Define scalar $c\in [0.9,1)$
\While {$|R^{(l)}-R^{(l-1)}|$ large}
\State Determine $\boldsymbol\gamma^{(l)}_\mathrm{r}=[\tau^{(l)}_\mathrm{r}-\frac{1}{\boldsymbol{\sigma}^2_2}]^{+}$, s.t. $\|\boldsymbol\gamma^{(l)}_\mathrm{r}\|_1=\bar{P}_\mathrm{r}^{(l)}$
\State Determine $\bar{\boldsymbol\gamma}^{(l)}_\mathrm{r}=\boldsymbol\gamma^{(l)}_\mathrm{r}(1:\min(M,K_\text{t}))$
\State Set inner-iteration index $q=2$
\State Define $\boldsymbol{\sigma}^{(2)}_\mathrm{r}=\mathbf{1}^T$ and $\boldsymbol{\sigma}^{(1)}_\mathrm{r}=\mathbf{0}^T$
\State Determine $\boldsymbol\gamma^{(q)}_\mathrm{s}=[\tau^{(q)}_\mathrm{s}-\frac{1}{\boldsymbol{\sigma}^2_1}]^{+}$, s.t. $\|\boldsymbol\gamma^{(q)}_\mathrm{s}\|_1=P_\mathrm{s}$
\While {$\|\boldsymbol{\sigma}^{(q)}_\mathrm{r}-\boldsymbol{\sigma}^{(q-1)}_\mathrm{r}\|_2$ large}
\State Define $\mathbf{u}^{(q)}=\frac{{\boldsymbol{\sigma}}^{(q)^2}_1\odot\boldsymbol\gamma^{(q)}_\mathrm{s}}{\bar{\boldsymbol\gamma}^{(l)}_\mathrm{r}}$
\State Obtain $\boldsymbol{\sigma}^{(q)^2}_\mathrm{r}=[\tau^{(q)}_\mathrm{SI}-\frac{1}{\mathbf{u}^{(q)}}]^{+}$, s.t. $\|\boldsymbol{\sigma}^{(q)^2}_\mathrm{r}\|_1=T$
\State Define $\mathbf{v}^{(q)}=\frac{\tilde{\boldsymbol{\sigma}}^{(q)^2}_\mathrm{1}}
{\mathbf{1}+\boldsymbol\gamma^{(l)}_\mathrm{r}\odot\boldsymbol{\sigma}^{(q)^2}_\mathrm{r}}$
\State Obtain $\boldsymbol\gamma^{(q)}_\mathrm{s}=[\tau^{(q)}_\mathrm{s}-\frac{1}{\mathbf{v}^{(q)}}]^{+}$, s.t. $\|\boldsymbol\gamma^{(q)}_\mathrm{s}\|_1=P_\mathrm{s}$
\State $q=q+1$
\EndWhile
\State Define $\bar{\boldsymbol\gamma}^{(l)}_\mathrm{s}=\boldsymbol\gamma^{(q)}_\mathrm{s}$
\State Define $\bar{\mathbf{v}}^{(l)}=\mathbf
v^{(q)}$
\State Calculate $R^{(l)}_\mathrm{sr}=\sum_{i=1}^{M} \log_2{\left(1+\bar{v}_{i}\bar{\gamma}_{\mathrm{s}_i}\right)} $
\State Calculate $R^{(l)}_\mathrm{rd}=\sum_{j=1}^{K_\mathrm{t}} \log_2{\left(1+\sigma^2_{2_i}{\gamma}_{\mathrm{r}_j}\right)} $
\State $l=l+1$
\State Obtain $R^{(l)}=\min(R^{(l)}_\mathrm{sr},R^{(l)}_\mathrm{rd})$
\State Set $\bar{P}^{(l)}_\mathrm{r}=c\bar{P}^{(l-1)}_\mathrm{r}$
\EndWhile
\end{algorithmic}
\label{alg:MaterialCharacterization}
\end{algorithm}
\begin{figure*}
\centering
\begin{minipage}{0.2\textwidth}
\subfigure[$\{M,K_\mathrm{r}\}=\{2,3\}$]{
\tikzset{every picture/.style={scale=.8}, every node/.style={scale=0.8}}%
\input{HdVsFdB}
\label{fig:HdVsFdB}
}
\end{minipage}
\quad\quad\quad\quad\quad\quad\quad
\begin{minipage}{0.2\textwidth}
\subfigure[$\{M,K_\mathrm{r}\}=\{10,15\}$]{
\tikzset{every picture/.style={scale=.8}, every node/.style={scale=0.8}}%
\input{HdVsFdD}
\label{fig:HdVsFdD}
}
\end{minipage}
\quad\quad\quad\quad\quad\quad
\begin{minipage}{0.2\textwidth}
\subfigure[$K_\mathrm{t}+K_\mathrm{r}=12$]{
\tikzset{every picture/.style={scale=.8}, every node/.style={scale=0.8}}%
\input{HdVsFdE}
\label{fig:HdVsFdE}
}
\end{minipage}
\caption{The transmit power budget at the source and the relay are assumed to be equal, i.e., $P_\mathrm{s}=P_\mathrm
r=P=5$.}
\label{fig:HdVsFdBD}
\end{figure*}
\subsection{Optimization Algorithm}
The proposed algorithm is based on the following intuitions,
\begin{enumerate}
\item given optimal $\bar{\sigma}_{1_i},\ \forall i,$ (genie-aided), the problem is solved by the water-filling algorithm.
\item the rate of the $i$-th stream of the source-relay link is reduced as $\gamma_{\mathrm{r}_i}$ and/or $\sigma_{\mathrm{r}_i}$ increase.
\item at the relay, transmitting with less power than the available power budget, reduces the throughput rate of the relay-destination link, but increases the throughput rate of the source-relay link.
\end{enumerate}
The algorithm is based on successive water-filling procedures with are iterated. This procedure is explained for the $l$-th iteration as follows
\begin{enumerate}[(I)]
\item the relay-destination rate is maximized by allocating more power to better channels, and obtaining a water level $\tau^{(l)}_r$, which satisfies the power budget $P^{(l)}_\mathrm{r}\leq P_\mathrm{r}$,
\item the worst-case SI channel is the one that interferes the strong data-streams (data-streams with high power) received at the relay more than the interference on the weak streams. This is realized by water-filling, and obtaining a water level $\tau^{(l)}_\mathrm{SI}$ that satisfies the water level $T$.
\item having the solutions from (I) and (II), the optimal power allocation at the source is fulfilled by water-filling with water level $\tau^{(l)}_\mathrm{s}$, which satisfies the power constraint $P_\mathrm{s}$,
\item having the solutions from (I), (II) and (III), we compute the achievable source-relay and relay-destination rates, i.e., $R^{(l)}_\mathrm{sr}$ and $R^{(l)}_\mathrm{rd}$. If $|R^{(l)}_\mathrm{rd}-R^{(l)}_\mathrm{sr}|$ is still large, we perform step (I) with a power budget less than $P^{(l)}_\mathrm{r}$.
\end{enumerate}
We present the algorithm pseudo-code in details in Algorithm 1. It is crucial to note that the parameters $\boldsymbol{\sigma}_\mathrm{r}$ and $\boldsymbol{\gamma}_\mathrm{s}$ play a non-collaborative game in the inner-loop for a fixed strategy $\boldsymbol{\gamma}_\mathrm{r}$ which is updated in the outer-loop.
\section{Numerical Results}
We assume equal transmit power budgets at the source and at the relay, $P=P_\mathrm{s}=P_\mathrm{r}=5$. Moreover, the receiver AWGN variance is assumed to be unity. We investigate the performance of full-duplex relaying with RSI channel uncertainty bound $T$, i.e., $\mathrm{Tr}(\bar{\mathbf{H}}_\mathrm{r}\bar{\mathbf{H}}^H_\mathrm{r})\leq T$. We consider the column vectors of the source-relay and the relay-destination channel matrices to be from zero-mean Gaussian distribution with identity covariance matrices. That means, by representing the $i$-th column of $\mathbf{H}_1$ and $j$-th column of $\mathbf{H}_2$ as $\mathbf{h}_{1i}$ and $\mathbf{h}_{2j}$, respectively, we assume $\mathbf{h}_{1i}\sim\mathcal{CN}(\mathbf{0},\mathbf{I})$ and $\mathbf{h}_{2j}\sim\mathcal{CN}(\mathbf{0},\mathbf{I})$. We perform Monte-Carlo simulations with $L=10^4$ realizations from random channels and noise vectors. Hence, the average worst-case throughput rate is defined as the average of worst-case rates for $L$ randomizations, i.e.,
$
R_{\mathrm{av}}=\frac{1}{L}\sum_{l=1}^{L}R_l.
$
Notice that, for each set of realizations, i.e., $\{\mathbf{H}_1,\mathbf{H}_2,\mathbf{n}_\mathrm{r},\mathbf{n}_\mathrm{d}\}$, we solve the robust transceiver design as is elaborated in Algorithm 1. We run two sets of simulations as described in two following subsections.
\subsection{Antenna Array Increment}
We consider two cases, where the source, relay and destination are equipped with (a)- small antenna array and (b)- large antenna arrays. For these cases, we have
\begin{enumerate}[(a)-]
\item $\{M,K_\mathrm{r}\}=\{2,3\}$ with\\ $\{K_\mathrm{t},N\}=\{2,3\}$ and $\{K_\mathrm{t},N\}=\{3,4\}$
\item $\{M,K_\mathrm{r}\}=\{10,15\}$ with\\ $\{K_\mathrm{t},N\}=\{10,15\}$ and $\{K_\mathrm{t},N\}=\{15,20\}$
\end{enumerate}
These cases are considered to highlight the performance of full-duplex DF relaying as a function of number of antennas with the worst-case RSI. Interestingly, as the number of antennas at the source, relay and destination increase, full-duplex relaying achieves a higher throughput rate even with strong RSI. Furthermore, notice that the worst-case RSI casts strong interference on the strong streams from the source to the destination. With very low RSI power $T\rightarrow 0$, full-duplex almost doubles the throughput rate compared to the half-duplex counterpart. This can be seen in~\figurename{~\ref{fig:HdVsFdBD}}, where the curves cross the vertical axis. However, as $T$ increases, the efficiency of full-duplex operation drops.
Consider the case $\{M,K_\mathrm{r}\}=\{2,3\}$. First let $\{K_\mathrm{t},N\}=\{2,3\}$, where the DoF at the source-relay and relay-destination links are both limited by $2$. In this case, on one hand, the worst-case RSI distributes $T$ over $2$ streams supported by the source-relay link in order to have the most destructive impact. However, on the other hand, the relay transmits with less power, in order to cast less interference on the source-relay link through the RSI channel. Now, with $\{K_\mathrm{t},N\}=\{3,4\}$, the relay-destination link supports $1$ streams more than the source-relay link. Hence, the power controller at the relay will distribute the transmit power over $3$ streams, and only $2$ of those streams cast interference at the relay input (due to the RSI channel). Hence, as can be seen in~\figurename{~\ref{fig:HdVsFdB}}, increasing the $\mathrm{DoF}$ of the relay-destination link does not have significant impact on the achievable rate. This is due to the fact that the source-relay link becomes the communication bottleneck. Similar phenomenon happens with a large antenna array at the source, relay and destination as can be seen in~\figurename{~\ref{fig:HdVsFdD}}
\subsection{Relay Tx/Rx Antenna allocation}
Let the relay have $K_\mathrm{t}+K_\mathrm{r}=12$ in total. Furthermore, suppose that the number of antenna at the source and relay are $\{M,N\}=\{2,10\}$. The question is, from $12$ antennas at the relay, how many should be used for reception for the robust design?. To answer this question, we consider the following scenarios
\begin{enumerate}[(a):]
\item $\{K_\mathrm{t},K_\mathrm{r}\}=\{4,8\}\Rightarrow
\mathrm{DoF}_\mathrm{sr}=2,\ \mathrm{DoF}_\mathrm{rd}=4$
\item $\{K_\mathrm{t},K_\mathrm{r}\}=\{6,6\}\Rightarrow
\mathrm{DoF}_\mathrm{sr}=2,\ \mathrm{DoF}_\mathrm{rd}=6$
\item $\{K_\mathrm{t},K_\mathrm{r}\}=\{8,4\}\Rightarrow
\mathrm{DoF}_\mathrm{sr}=2,\ \mathrm{DoF}_\mathrm{rd}=8$
\end{enumerate}
As can be seen in~\figurename{~\ref{fig:HdVsFdE}}, by using more antennas for reception than for transmission, i.e., $K_\mathrm{r}>K_\mathrm{t}$, at the relay, i.e., case (a), the throughput rate in maximized for both HD relay and worst-case FD relay. This is due to the fact that, increasing the signal-to-noise ratio (SNR) of the source-relay streams enhances the overall throughput rate more than the increase by the $\mathrm{DoF}$ of the relay-destination link. However, notice that in this setup the overall DoF from the source to destination is limited by the DoF of the source-relay link.
\section{Conclusion}
In this paper, we investigated a multi-antenna source communicating with a multi-antenna destination through a multi-antenna relay. The relay is assumed to exploit a decode-and-forward (DF) strategy. The transceivers are designed in order to be robust against the worst-case residual self-interference (RSI). To this end, the worst-case achievable throughput rate is maximized. This optimization problem turns out to be a non-convex problem. Assuming that the degrees-of-freedom (DoF) of the source-relay link is less than the DoF of the relay-destination link, we determined the left and right matrices of the singular vectors of the worst-case RSI channel. Then, the problem is simplified to the optimal power allocation at the transmitters, which guarantees robustness against the worst-case RSI singular values. This simplified problem is still non-convex. Based on the intuitions for optimal power allocation at the source and relay, we proposed an efficient algorithm to capture a stationary point. Hence, in a DF relay with multi-stream beamforming, we determine the critical point where the half-duplex relaying outperforms the full-duplex relaying. This critical point provides a mode-switching threshold in hybrid half-duplex full-duplex relay systems.
\bibliographystyle{IEEEtran}
|
1,314,259,995,062 | arxiv | \section{Introduction}
The analysis of the available high quality cosmological
data (supernovae type Ia, CMB, galaxy clustering, etc.)
have converged during the last decade towards a cosmic expansion
history that involves a spatial flat geometry and
a recent accelerating expansion of the
universe (Spergel et al. 2007; Davis et al. 2007;
Kowalski et al. 2008; Komatsu et al. 2009 and references therein).
This expansion has been attributed to an energy component
(dark energy) with negative pressure which dominates the universe at
late times and causes the observed accelerating expansion. The
simplest type of dark energy corresponds to the cosmological
constant (see for review Peebles \& Ratra 2003). The so called
concordance $\Lambda$ model fits accurately the current
observational data and thus it is an excellent candidate to be the
model which describes the observed universe.
However, the concordance model suffers from, among others
(cf. Perivolaropoulos 2008),
two fundamental problems: (a)
{\it the fine tuning problem} ie., the fact that the observed value of the
vacuum density ($\rho_{\Lambda}=\Lambda c^{2}/8\pi G$)
is more than 120 orders of magnitude below that
value found using quantum field theory (Weinberg 1989) and (b)
{\it the coincidence problem} ie., the matter energy density
and the vacuum energy density are of the same
order prior to the present epoch, despite the fact that the former
is a function of time but the latter not (Peebles \& Ratra 2003).
Attempts to solve the coincidence problem have been presented in the
literature (see Egan \& Lineweaver 2008
and references therein), in which
an easy way to overpass the coincidence problem is to replace the
constant vacuum energy with a dark energy that evolves with time.
The simplest approach is to consider a
tracker scalar field $\phi$ in which it
rolls down the potential energy $V(\phi)$ and therefore it
could mimic the dark energy
(see Ratra \& Peebles 1988; Weinberg 1989; Turner \& White 1997;
Caldwell, Dave \& Steinhardt 1998; Padmanabhan 2003).
Nevertheless, the latter consideration does not really solve the
problem because the initial value of the dark energy still needs to be
fine tuned (Padmanabhan 2003). Also, despite the fact that the current
observations do not rule out the possibility of a dynamical
dark energy (Tegmark et al. 2004), they strongly indicate that
the dark energy equation of state parameter $w\equiv P_{DE}/\rho_{DE}$
is close to -1 (Spergel et al. 2007; Davis et al. 2007;
Kowalski et al. 2008; Komatsu et al. 2009).
Alternatively, more than two decades ago,
Ozer \& Taha (1987) proposed a different pattern in which
a time varying $\Lambda$ parameter could be a possible
candidate to solve the two fundamental cosmological puzzles
(see also Bertolami 1986; Freese et al. 1987;
Peebles \& Ratra 1988;
Carvalho, Lima \& Waga 1992; Overduin \& Cooperstock 1998;
Bertolami \& Martins 2000; Opher \& Pellison 2004;
Bauer 2005; Barrow \& Clifton 2006;
Montenegro \& Carneiro 2007 and references therein).
In this cosmological paradigm,
the dark energy equation of state parameter $w$
is strictly equal to -1, but the vacuum energy density (or $\Lambda$)
is not a constant but
varies with time. Of course, the weak point in this ideology is the
unknown functional form of the $\Lambda(t)$ parameter. Also,
in the $\Lambda(t)$ cosmological model there is a coupling
between the time-dependent vacuum and matter
Wang \& Meng 2005; Alcaniz \& Lima 2005;
Carneiro S. et al. 2008; Basilakos 2009; Basilakos, Plionis \&
Sol\'a 2009).
Indeed, using the combination of the conservation of the total energy
with the variation of the vacuum energy, one can prove that
the $\Lambda(t)$ model provides either a particle production process
or that the mass of the dark matter particles increases (Basilakos
2009 and references therein).
Despite the fact that
most of the recent papers in dark energy studies are based
on the assumption that the dark energy evolves
independently of the dark matter,
the unknown nature of both dark matter and dark energy
implies that at the moment we can not exclude the possibility of
interactions in the dark sector
(eg., Zimdahl, Pav\'on \& Chimento 2001;
Amendola et al. 2003; Cai \& Wang 2005; Binder \& Kremer 2006; Das,
Corasaniti, \& Khoury 2006; Olivares,
Atrio-Barandela \& Pav\'on 2008 and references therein).
The aim of this work is along the same lines, attempting
to generalize the main cosmological properties of the traditional
$\Lambda$-cosmology by introducing
a time varying vacuum energy and specifically to
investigate whether such models can yield a late
accelerated phase of the cosmic expansion,
without the need of the required, in the classical $\Lambda$-model,
extreme fine tuning.
The plan of the paper is as follows.
The basic theoretical elements of the problem are
presented in section 2, 3 and 4, by solving analytically [for a spatially flat
Friedmann-Lemaitre-Robertson-Walker (FLRW) geometry]
the basic cosmological equations. Also in these sections we prove that
the concordance $\Lambda$-cosmology is as a particular solution
of the $\Lambda(t)$ models.
In section 5 we place constraints on the main parameters of our model by
performing a likelihood analysis utilizing the recent Union08 SnIa data
(Kowalski et al. 2008). Also, in section 5 we compare
the different time varying vacuum
models with the traditional $\Lambda$
cosmology. In this section we also treat analytically,
the basic cosmological puzzles (the fine
tuning and the cosmic coincidence problem) with the aid of
the time varying $\Lambda(t)$ parameter.
Finally, in section 6 we draw our conclusions.
\section{The time dependent vacuum in the Expanding Universe}
In the context of a spatially flat FLRW geometry the basic
cosmological equations are:
\begin{equation}
\rho_{tot}=\rho_{f}+\rho_{\Lambda}=3H^{2}
\label{frie1}
\end{equation}
and
\begin{equation}
\frac{d({\rho}_{f}+\rho_{\Lambda})}{dt}+3H(\rho_{f}+P_{f}+
\rho_{\Lambda}+P_{\Lambda})=0 \;\;,
\label{frie2}
\end{equation}
where $\rho_{f}$ is the density of the ''cosmic'' fluid:
\begin{equation}
\rho_{f}(t)=\left\{ \begin{array}{cc}
\rho_{m}(t) &
\mbox{Matter era}\\
\rho_{r}(t) & \mbox{Radiation era}
\end{array}
\right.
\end{equation}
and
\begin{equation}
P_{f}(t)=\beta\rho_{f}=\left\{ \begin{array}{cc}
0 &
\mbox{Matter era} \;\;\beta=0 \\
\frac{\rho_{r}}{3} & \mbox{Radiation era} \;\;\beta=1/3
\end{array}
\right.
\end{equation}
is the corresponding pressure. Also
$\rho_{\Lambda}$ and $P_{\Lambda}$ denote the
density and the pressure of the vacuum component respectively.
From a cosmological point
of view, at an early enough epoch, the above generalized cosmic fluid behaves
like radiation $P_{f}=P_{r}=\rho_{r}/3$ ($\beta=1/3$), then behaves as
matter $P_{f}=P_{m}=0$ ($\beta=0$) and as long as
$P_{\Lambda}=-\rho_{\Lambda}$ it creates
an accelerated phase of the cosmic expansion (see below).
Notice, that in order to simplify our formalism we use geometrical units
($8\pi G=c\equiv 1$) in which $\rho_{\Lambda}=\Lambda$.
In the present work, we would like to investigate the potential
of a time varying $\Lambda=\Lambda(t)$ parameter
to account for the observed
acceleration of the expansion of the Universe.
Within this framework it is interesting to mention
that the equation of state takes the usual form of
$P_{\Lambda}(t)=-\rho_{\Lambda}(t)=-\Lambda(t)$ [see Ozer \& Taha
1987; Peebles \& Ratra 1988].
Also, introducing in the global dynamics the idea
of the time-dependent vacuum,
it is possible to explain the physical properties of the
dark energy as well as the fine tuning and the coincidence
problem respectively (see sections 5.1 and 5.2). Using now
eq.(\ref{frie2}), we have the following useful formula:
\begin{equation}
\dot{\rho_{f}}+3(\beta+1) H\rho_{f}=-\dot{\Lambda}
\label{frie33}
\end{equation}
and considering eq.(\ref{frie1}) we find:
\begin{equation}
\dot{H}+\frac{3(\beta+1)}{2} H^{2}=\frac{\Lambda}{2}
\label{frie34}
\end{equation}
where the over-dot denotes derivatives with respect to time.
If the vacuum term is negligible, $\Lambda(t) \longrightarrow 0$, then
the solution of the above equation reduces to $H(t)=2(\beta+1)^{-1}/3t$.
Therefore, in the case of $\beta=0$ (matter era)
we get the Einstein de-Sitter model as we should, $H(t)=2/3t$, while
for $\beta=1/3$ we trace the radiation phase of the Universe ie.,
$H(t)=1/2t$.
On the other hand, if we consider the case of $\Lambda(t)\ne 0$ then
it becomes evident (see eq.\ref{frie33}) that there is a coupling between
the time-dependent vacuum and matter (or radiation) component.
Of course, in order to solve the above differential equation we need to
define explicitly the functional form of the $\Lambda(t)$ component.
Note, that the traditional $\Lambda=const$ cosmology
can be described directly by the integration of the eq.(\ref{frie34})
[for more details see section 3.1].
It is worth noting that the $\Lambda(t)$ scenario has the caveat of its unknown exact
functional form, which however is also the case for the vast majority
of the dark energy models.
Therefore, in the literature
there have
been different phenomenological parametrizations which treat
the time-dependent $\Lambda(t)$ function.
In particular, Freese et al. (1987) considered that
$\Lambda(t)=3c_{1}H^{2}$, with the constant $c_{1}$ being the ratio of the
vacuum to the sum of vacuum and matter density (see
also Arcuri \& Waga 1994). Chen \& Wu (1990) proposed a different
ansatz in which $\Lambda(t) \propto a^{-2}$.
Recently, many authors (see for example
Ray, Mukhopadhyay \& Meng 2007; Sil \& Som 2008 and references therein)
have investigated the global dynamical properties of the universe
considering that the vacuum energy density decreases linearly
either with the energy density or with the square Hubble parameter.
Attempts to provide a theoretical explanation for
the $\Lambda(t)$ have also been presented in the
literature (see
Shapiro \& Sol\'a 2000; Babi\'c et al. 2002; Grande et al. 2006; Sol\'a 2008
and references therein). These
authors found that a time dependent vacuum could
arise from the renormalization group (RG) in quantum field theory.
The corresponding solution for a running vacuum
is found to be $\Lambda(t)=c_{0}+c_{1}H^{2}(t)$ [where $c_{0}$ and
$c_{1}$ are constants; Grande et al. 2006]
and it can mimic the quintessence or phantom
behavior and a smoothly transition between the two.
Alternatively, Schutzahold (2002) used
a different pattern in which the vacuum term is proportional to
the Hubble parameter, $\Lambda(a) \propto H(a)$ [see
also Carneiro et al. 2008], while Basilakos (2009) considered
a power series form in $H$. It is worth noting, that the
linear pattern, $\Lambda(a) \propto H(a)$, has been motivated theoretically
through a possible connection of cosmology with
the QCD scale of strong interactions (Schutzhold 2002).
In this context, it has also been proposed that the
vacuum energy density can be defined from a possible link
of dark energy with QCD and
the topological structure of the universe (Urban \& Zhitnitsky 2009).
In this paper we have phenomenologically
identified a functional form of $\Lambda(a)$
for which we can solve the main differential equation
(see eq.\ref{frie34}) analytically. This is:
\begin{equation}
\Lambda_{\gamma m}(t)=3\gamma H^{2}(t)+2mH(t)+3n(\beta+1-\gamma){\rm e}^{2mt}
\label{frie444}
\end{equation}
where the constants $m$ and $n$ are included
for consistency of units (see below).
Although, the above functional form was not
motivated by some physical theory, but rather
phenomenologically by the fact that it provides analytical solutions
to the Friedmann equation, its exact form can be physically
justified {\em a posteriori}
within the framework of the previously mentioned theoretical
models (see appendix A).
Using now eq.\ref{frie444}, the generalized
Friedmann's equation (see eq.\ref{frie34}) becomes
\begin{equation}
\dot{H}=-\frac{3(\beta+1-\gamma)}{2}H^{2}+mH+\frac{3n(\beta+1-\gamma)}{2}{\rm e}^{2mt}
\label{frie344}
\end{equation}
and indeed, it is routine to perform the integration
of eq.(\ref{frie344}) to obtain (see appendix B):
\begin{equation}
\label{frie555a}
H(t)=\sqrt{n}{\rm e}^{mt}{\rm coth}\left[\frac{3(\beta+1-\gamma)\sqrt{n}}{2}S(t)\right]
\end{equation}
where
\begin{equation}
S(t)=\left\{ \begin{array}{cc}
({\rm e}^{mt}-1)/m & \;\;\;\;m\ne 0 \\
t & \;\;\;\;m=0
\end{array}
\right.
\label{SS}
\end{equation}
while the range of values for which the above integration
is valid is $n \in (0,+\infty)$ [for negative $n$ values see the appendix].
Using now the definition of the Hubble parameter $H\equiv {\dot a}/a$, the
scale factor of the universe $a(t)$, evolves with time as
\begin{equation}
a(t)=a_{1}
\sinh^{\frac{2}{3(\beta+1-\gamma)}}
\left[\frac{3(\beta+1-\gamma)\sqrt{n}}{2}S(t)\right] \;\;.
\label{frie565}
\end{equation}
It is worth noting, that the relevant
units of $m\ne 0$ should correspond to
$time^{-1}$, which implies that $m\propto H_{0}$.
The parameter $a_{1}$ is the constant of integration given by
\begin{equation}
a_{1}\equiv \left(\frac{\rho_{f0}}{\rho_{\Lambda 0}}\right)^{\frac{1}{3(\beta+1-\gamma)}}
\label{normden}
\end{equation}
where $\rho_{f0}$ and $\rho_{\Lambda 0}$ are the corresponding
densities at the present time [for which $a(t_{0})\equiv 1$].
In this context, the density of the cosmic fluid evolves with
time (see eq.\ref{frie1}) as:
\begin{equation}
\rho_{f}(t)=3H^{2}(t)-\Lambda_{\gamma m}(t)
\end{equation}
or
\begin{equation}
\rho_{f}(t)=3(1-\gamma) H^{2}(t)-2mH(t)-3n(\beta+1-\gamma){\rm e}^{mt} \;\;.
\label{den22}
\end{equation}
In the following sections, we investigate thoroughly
whether such a generalized vacuum component
in an expanding Universe allows
for a late accelerated phase of the Universe
and under which circumstances such
an approach provides a viable solution
to the fine tuning problem as well as to the
cosmic coincidence problem.
\section{The matter+vacuum scenario}
In a matter+vacuum expanding universe
($\rho_{f}\equiv \rho_{m}$), we attempt to investigate
the correspondence of the $\Lambda(t)$ pattern with the traditional
$\Lambda$-cosmology in order to show
the extent to which they compare. In particular,
we will prove that the Hubble expansion, provided
by the current time-dependent vacuum, is
a generalization to that of the traditional $\Lambda$ cosmology.
Note, that in the present formalism the matter era
corresponds to $\beta=0$.
\subsection{The standard $\Lambda$-Cosmology}
Let us first investigate the solution for $(\gamma,m)=(0,0)$.
The vacuum term eq.(\ref{frie444}) of the problem becomes constant and
is given by $\Lambda_{00}(a)=\Lambda=3n$.
In this framework, the Hubble function (see eq.\ref{frie555a}) is
\begin{equation}
H_{\Lambda}(t)=\sqrt{\frac{\Lambda}{3}}
\coth\left(\frac{3}{2}\sqrt{\frac{\Lambda}{3}}\;t \right) \;\;.
\label{frie556}
\end{equation}
Now, using the well know parametrization
\begin{equation}
\label{aln}
\Lambda=3n=3H^{2}_{0}\Omega_{\Lambda} \;\;\;\; \Omega_{\Lambda}=1-\Omega_{m}
\end{equation}
the scale factor of the universe is given by
\begin{eqnarray}
\label{all}
a_{\Lambda}(t)=a_{1}
\sinh^{\frac{2}{3}}\left(\frac{3H_{0}\sqrt{\Omega_{\Lambda}}t}{2}\right)
\end{eqnarray}
where (see eq.\ref{normden})
\begin{eqnarray}
\label{all1}
a_{1}=\left(\frac{\rho_{m0}}{\rho_{\Lambda 0}}\right)^{1/3}=
\left(\frac{\Omega_{m}}{\Omega_{\Lambda}}\right)^{1/3} \;\;.
\end{eqnarray}
The cosmic time is related with the scale factor as
\begin{equation}
t_{\Lambda}(a)=\frac{2}{3\sqrt{\Omega_{\Lambda}}H_{0} }
{\rm sin^{-1}h} \left(\sqrt{ \frac{\Omega_{\Lambda}} {\Omega_{m}}}
\;a^{3/2} \right) \;\;.
\end{equation}
Combining the above equations we can define the Hubble expansion as a function
of the scale factor:
\begin{eqnarray}
\label{hub1}
H_{\Lambda}(a)=H_{0}[\Omega_{\Lambda}+\Omega_{m}a^{-3}]^{1/2} \;\;\;.
\end{eqnarray}
In principle, $H_{0}$ and $\Omega_m$ are constrained by the recent WMAP data
combined with the distance measurements from the type Ia
supernovae (SNIa) and the Baryonic Acoustic Oscillations (BAOs)
in the distribution of galaxies. Following the
recent cosmological results of (Komatsu et al. 2009), we fix the
current cosmological parameters as $H_{0}=70.5$km/sec/Mpc and
$\Omega_{m}=1-\Omega_{\Lambda}=0.27$.
The current age of the universe ($a=1$) is $t_{0\Lambda}\simeq 13.77$Gyr,
while the inflection point takes place at
\begin{eqnarray}
\label{infle}
t_{I\Lambda}=\frac{2}{3\sqrt{\Omega_{\Lambda}}H_{0}}
{\rm sin^{-1}h} \left(\sqrt{ \frac{1} {2}} \right) \;,\;\;
a_{I\Lambda}=\left[\frac{\Omega_{m}}{2\Omega_{\Lambda}}\right]^{1/3} \;.
\end{eqnarray}
Therefore, we estimate $t_{I\Lambda}\simeq 0.51t_{0\Lambda}$
and $a_{I\Lambda}\simeq 0.56$.
Finally, due to the fact that the traditional $\Lambda$ cosmology
is a particular solution
of the current time varying vacuum models with $(\gamma,m)$
strictly equal to $(0,0)$,
the constant value $n$ is always
defined by eq.(\ref{aln}). Therefore, throughout the paper
all relevant cosmological quantities are parametrized according to
$n=\Omega_{\Lambda}H^{2}_{0}$.
\subsection{''The general'' $\Lambda(t)$ Model}
In this section, we examine a more general class of vacuum models with
$(\gamma,m)\ne (0,0)$ (hereafter $\Lambda_{\gamma m}$ model).
The Hubble expansion and the corresponding evolution of the
scale factor are (see eq.\ref{frie555a} and eq.\ref{frie565})
\begin{equation}
\label{frie755}
H(t)=\sqrt{\Omega_{\Lambda}}\;H_{0}\;{\rm e}^{mt}{\rm coth}
\left[\frac{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}{2m}({\rm e}^{mt}-1)\right]
\end{equation}
and
\begin{equation}
a(t)=a_{1}
\sinh^{\frac{2}{3(1-\gamma)}}
\left[\frac{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}{2m}({\rm e}^{mt}-1) \right]
\label{frie756}
\end{equation}
or
\begin{equation}
t(a)=\frac{1}{m}{\rm ln}\left[1+\frac{2m}{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}\;
{\rm sin^{-1}h}\left(\frac{a}{a_{1}}\right)^{3(1-\gamma)/2} \right] \;\;.
\label{frie656t2}
\end{equation}
Obviously, if $(\gamma,m) \longrightarrow (0,0)$
[or ${\rm e}^{mt}-1\approx mt$]
then the $\Lambda_{\gamma m}$ model
tends to the traditional $\Lambda$ cosmology, which
implies that the latter be considered as particular
solution of the general $\Lambda_{\gamma m}$ model.
Thus, this limit
together with eq.(\ref{normden}) provide that
\begin{equation}
a_{1}=\left(\frac{\Omega_{m}}{\Omega_{\Lambda}}\right)^{\frac{1}{3(1-\gamma)}} \;\;.
\end{equation}
Taking the above expressions into account, the basic cosmological quantities
as a function of the scale factor become
\begin{equation}
H(a)=H_{0}\left[1+g(a)\right][\Omega_{\Lambda}+\Omega_{m}a^{-3(1-\gamma)}]^{1/2}
\end{equation}
and
\begin{equation}
\Lambda_{\gamma m}(a)=3\gamma H^{2}+2mH+3H^{2}_{0}\Omega_{\Lambda}(1-\gamma)[1+g(a)]^{2}
\label{frie776}
\end{equation}
where
\begin{equation}
g(a)=\frac{2m}{3\sqrt{(1-\gamma)\Omega_{\Lambda}}H_{0}}\;
{\rm sin^{-1}h}\left(\sqrt{\frac{\Omega_{\Lambda}}{\Omega_{m}}}
\;a^{3(1-\gamma)/2} \right) \;\;.
\end{equation}
It is worth noting that if we take
$(\gamma,m)=(0,m)$
with $m \ne 0$ (hereafter mild vacuum model or $\Lambda_{0m}$), the
corresponding Hubble flow becomes:
\begin{equation}
H(a)=\left[1+g(a)\right]H_{\Lambda}(a) \;\;.
\end{equation}
Therefore,
as long as the function $g(a)$ takes small values [$g(a)\ll 1$],
the $\Lambda_{0m}$ model has exactly the constant vacuum
feature due to $H(a) \approx H_{\Lambda}(a)$.
In this context, utilizing eq.(\ref{frie776}) we simply have
\begin{equation}
\label{frie676}
\Lambda_{0m}(a)=2mH(a)+3H^{2}_{0}\Omega_{\Lambda}[1+g(a)]^{2} \;\;.
\end{equation}
Finally, the fact that the vacuum term has units of $time^{-2}$
implies that the vacuum term is proportional to $H^{2}_{0}$ or the
constant $m$ has to satisfy the following scaling relation:
$m \propto H_{0}$ (see also section 2). Therefore,
in the far future the condition
$m \propto H_{0}\ne 0$ represents a super-accelerated
expansion of the universe because
$a(t)\propto {\rm exp}({\frac{\sqrt{\Omega_{\Lambda}}H_{0}{\rm e}^{mt}}{m}})$.
\subsection{''The modified'' $\Lambda$ Model}
In this case we consider $(\gamma,m)=(\gamma,0)$ with $\gamma \ne 0$
(hereafter $\Lambda_{\gamma 0}$ model).
From eq.(\ref{frie555a}) we can
easily write the corresponding Hubble flow as a function of time
\begin{equation}
\label{frie455}
H(t)=\sqrt{\Omega_{\Lambda}}\;H_{0}
\;{\rm coth}\left[\frac{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}{2}\;t\right] \;\;.
\end{equation}
Using now eqs.(\ref{SS}, \ref{frie565}), the
scale factor of the universe $a(t)$, evolves with time as
\begin{equation}
a(t)=a_{1}
\sinh^{\frac{2}{3(1-\gamma)}}
\left[\frac{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}{2}\;t\right]
\label{frie456}
\end{equation}
where
\begin{eqnarray}
\label{all2}
a_{1}=\left(\frac{\Omega_{m}}{\Omega_{\Lambda}}\right)^{1/3(1-\gamma)} \;\;.
\end{eqnarray}
Inverting eq.(\ref{frie456}) we estimate the cosmic time:
\begin{equation}
t(a)=\frac{2}{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0} }
{\rm sin^{-1}h} \left(\sqrt{ \frac{\Omega_{\Lambda}} {\Omega_{m}}}
\;a^{3(1-\gamma)/2} \right) \;\;.
\label{frie456t}
\end{equation}
The corresponding inflection point [$\ddot{a}(t_{I})=0$] is found to be
\begin{equation}
\label{inflemod}
t_{I}=\frac{2}{3(1-\gamma)\sqrt{\Omega_{\Lambda}}H_{0}}
{\rm sin^{-1}h} \left(\sqrt{ \frac{1-3\gamma} {2}} \right)
\end{equation}
or
\begin{equation}
a_{I}=\left[\frac{(1-3\gamma)\Omega_{m}}{2\Omega_{\Lambda}}\right]^{1/3(1-\gamma)}
\end{equation}
which implies that the condition
for which an inflection point is present in the evolution of
the scale factor is $\gamma<1/3$.
As expected, for $\gamma \ll 1$ the
above solution tends to the concordance model,
$a_{\gamma 0}(t) \longrightarrow a_{\Lambda}(t)$.
Now from eqs.(\ref{frie455}, \ref{frie456}), using the well known
hyperbolic formula ${\rm coth^{2}}x-1=1/{\rm sinh^{2}x}$, we have after some algebra that:
\begin{equation}
H(a)=H_{0}[\Omega_{\Lambda}+\Omega_{m}a^{-3(1-\gamma)}]^{1/2} \;\;.
\end{equation}
From this analysis, it becomes
clear that the Hubble expansion predicted by the
$\Lambda_{\gamma 0}$ model extents well that of the usual $\Lambda$ cosmology.
To this end, utilizing eq.(\ref{frie776}) we can obtain the vacuum
energy density
\begin{equation}
\label{frie476}
\Lambda_{\gamma 0}(a)=3\gamma H^{2}(a)+
3\Omega_{\Lambda}H^{2}_{0}(1-\gamma) \;\;.
\end{equation}
As we have previously
mentioned in section 2, the above phenomenological functional form
(see eq.\ref{frie476})
is motivated theoretically by the renormalization group (RG) in the quantum
field theory (Shapiro \& Sol\'a 2000; Babi\'c et al. 2002; Sol\'a
2008). Moreover, recent studies (see Grande et al. 2006 and
Grande, Pelinson \& Sol\'a 2009) find that this solution
alleviates the cosmic coincidence problem (see section 5.1).
To conclude, it is worth noting that at late
enough times ($a\gg 1$) the above solution
asymptotically reaches the de-Sitter regime $\Lambda\sim H^{2}$,
as far as the global dynamics is concerned.
\section{The radiation+vacuum scenario}
In this section, we consider a universe that is a spatially flat but contains
both radiation and a time vacuum term.
This crucial period in the cosmic history corresponds to
$\beta=1/3$.
Therefore, for clarity reasons in the following sections we re-formulate our approach
by using $\rho_{f}\equiv \rho_{r}$ and $P_{f}\equiv \rho_{r}/3$. These restrictions imply that
$$\frac{\rho_{f 0}}{\rho_{\Lambda 0}}\equiv
\frac{\rho_{r 0}}{\rho_{\Lambda 0}}=\frac{\Omega_{r}}{\Omega_{\Lambda}}$$
where, $\Omega_{r}\simeq 10^{-4}$ is the radiation density parameter at the
present epoch derived by the CMB data (see Komatsu et al. 2009).
Within this context, based on eqs.(\ref{frie444}), (\ref{frie565}) and
(\ref{normden}) we present briefly the following cosmological situations:
\begin{itemize}
\item {\bf radiation+constant vacuum:} $(\gamma,m)=(0,0)$:
The scale factor is
\begin{equation}
a(t)=\left(\frac{\Omega_{r}}{\Omega_{\Lambda}}\right)^{\frac{1}{4}}\sinh^{\frac{1}{2}}
\left(\sqrt{\Omega_{\Lambda}}H_{0}t\right) \;\;.
\end{equation}
Owing to the fact that in this period $t\ll 1$, the above solution reduces to the
following simple analytic approximation:
\begin{equation}
\label{approxrad}
a(t)\approx (2\sqrt{\Omega_{r}}H_{0}t)^{1/2} \;\;\;{\rm with}\;\;\;
H(t)\equiv \frac{\dot{a}}{a}\approx \frac{1}{2t} \;\;.
\end{equation}
\item {\bf radiation+general vacuum:} $(\gamma,m)\ne (0,0)$:
This general scenario provides
\begin{equation}
a(t)=\left(\frac{\Omega_{r}}{\Omega_{\Lambda}}\right)^{\frac{1}{4\gamma_{1}}}
\sinh^{\frac{1}{2\gamma_{1}}}
\left[\frac{2\gamma_{1}\sqrt{\Omega_{\Lambda}}H_{0}}{m}({\rm e}^{mt}-1) \right]
\end{equation}
where $\gamma_{1}=1-3\gamma/4$.
The vacuum component as a function of time (see eq.\ref{frie444}) is
\begin{equation}
\Lambda_{\gamma m}(t)\approx \frac{4(1-\gamma_{1})}{4\gamma^{2}_{1}t^{2}}+\frac{m}{\gamma_{1}t}
\label{llapprox1}
\end{equation}
or
\begin{equation}
\label{llapprox2}
\Lambda_{\gamma m}(a)\approx \frac{4(1-\gamma_{1})\Omega_{r}H^{2}_{0}}{a^{4\gamma_{1}}}
+\frac{2m\sqrt{\Omega_{r}}H^{2}_{0}}{a^{2\gamma_{1}}}
\;\;.
\end{equation}
It is very interesting the fact that during the radiation epoch
$\Lambda_{\gamma m}(a) \propto a^{-4\gamma_{1}}$. For small values
of $\gamma$ or $\gamma_{1}\simeq {\cal O}(1)$, the latter relation
implies that as long as the scale factor tends to zero the
vacuum term goes rapidly to infinity
(see section 6).
In the case of $(\gamma,m)=(0,m)$ [or $\gamma_{1}=1$], the vacuum term
(see eqs.\ref{llapprox1} and \ref{llapprox2})
varies with time as
\begin{equation}
\Lambda_{0m}(t)\approx \frac{m}{t}\approx \frac{2m\sqrt{\Omega_{r}}H^{2}_{0}}{a^{2}} \;\;.
\end{equation}
Now the vacuum component evolves as $\Lambda_{\gamma 0}(a) \propto
a^{-2}$, in agreement with the Chen \& Wu (1990) model.
\item {\bf radiation+modified vacuum:} $(\gamma,m)=(\gamma,0)$, $\gamma\ne 0$:
In this cosmological model we have
\begin{equation}
a(t)=\left(\frac{\Omega_{r}}{\Omega_{\Lambda}}\right)^{\frac{1}{4\gamma_{1}}}
\sinh^{\frac{1}{2\gamma_{1}}}
\left[2\gamma_{1}\sqrt{\Omega_{\Lambda}}H_{0}\;t \right]
\end{equation}
where $\gamma_{1}=1-3\gamma/4$. The approximate solution now becomes
\begin{equation}
\label{approxrad1}
a(t)\approx (2\gamma_{1}\sqrt{\Omega_{r}}H_{0}t)^{1/2\gamma_{1}} \;\;\;{\rm with}\;\;\;
H(t)\approx \frac{1}{2\gamma_{1}t} \;\;.
\end{equation}
The vacuum component (see eq.\ref{frie444}) evolves with time as
\begin{equation}
\Lambda_{\gamma 0}(t)\approx \frac{4(1-\gamma_{1})}{4\gamma^{2}_{1}t^{2}}
\end{equation}
or
\begin{equation}
\Lambda_{\gamma 0}(a)\approx \frac{4(1-\gamma_{1})\Omega_{r}H^{2}_{0}}{a^{4\gamma_{1}}}
\simeq \Lambda_{\gamma m}(a)\;\;. \;\;.
\end{equation}
Obviously, for $a\longrightarrow 0$
[$\gamma_{1}\simeq {\cal O}(1)$]
the vacuum energy density
goes rapidly to infinity.
\end{itemize}
\begin{figure}
\includegraphics[angle=0,scale=0.45]{fig1.ps}
\caption{{\it Upper Panel:} The evolution of the proximity parameter
for the $\Lambda_{\gamma 0}$ cosmological model.
Note, that the scale factor is normalized to unity at the present time.
The lines correspond to $\gamma=0.004$ (solid) and $\gamma=0.03$
(dashed).
{\it Bottom Panel:} The evolution of the radiation, matter and vacuum density
considering different kind of vacuums
(after fitting the constants using the
Union08 SnIa data and $\Omega_{m}=0.27$,
$H_{0}=70.5$Km/s/Mpc).
I) traditional $\Lambda$-cosmology:
radiation density (open triangles), matter density (open
squares) and constant vacuum density (open circles).
II) modified $\Lambda$-cosmology, $\gamma \ne 0$, $\Lambda_{\gamma 0}$:
radiation density (dashed line), matter density (long-dashed line)
and vacuum density (solid line). III) The evolution of the
mild vacuum, $m\ne 0$, $\Lambda_{0m}$ and IV) the evolution of the general
vacuum, $\Lambda_{\gamma m}$ (open stars).}
\label{fig1}
\end{figure}
\begin{table}
\caption[]{Numerical results. The $1^{st}$ column
indicates the vacuum model used
(the last two rows correspond to the fine tuning problem).
Note, that the basic cosmological parameters were taken to be
$\Omega_{m}=0.27$ and $H_{0}=70.5$Km/sec/Mpc.
Finally, the current age of the universe $t_{0}$ has units of Gyr.}
\tabcolsep 9pt
\begin{tabular}{cccccc}
\hline
Model & $\gamma$ & $m/H_{0}$ & $t_{0}$&$\frac{\Lambda(t_{inf})}{\Lambda(t_{0})}$
&$\frac{\Lambda(t_{pl})}{\Lambda(t_{0})}$\\ \hline \hline
$\Lambda$ & 0& 0 & 13.77&1&1\\
$\Lambda_{\gamma 0}$ & $0.004$& 0 &
13.82&$10^{102}$&$10^{124}$\\
$\Lambda_{0m}$ & 0& $2.4\times 10^{-3}$ &
13.75&$10^{51}$&$10^{63}$\\
$\Lambda_{\gamma m}$ & $0.004$&$2.8\times 10^{-3}$ &
13.80&$10^{102}$&$10^{124}$\\
\end{tabular}
\end{table}
\section{Tackling the Cosmological puzzles}
As we have stated already in the introduction, there is a possibility
for the vacuum energy to be a function of time
rather than having a constant value. Therefore, in this section
we compare the cosmic phases of the
$\Lambda(t)$ scenarios (described in the previous sections)
and the concordance $\Lambda$-cosmology.
The aim here is to investigate the consequences
of such a comparison on the basic cosmological puzzles
namely cosmic coincidence problem and fine tuning problem.
\subsection{The coincidence problem}
In order to investigate the coincidence problem we
define the time-dependent proximity parameter of
$\rho_{m}(a)$ (see eq.\ref{den22}) and
$\rho_{\Lambda}(a)$ [see Egan \& Lineweaver 2008 and references therein]:
\begin{equation}
\label{prox}
r(a)\equiv {\rm min}\left[\frac{\rho_{\Lambda}(a)}{\rho_{m}(a)},
\frac{\rho_{m}(a)}{\rho_{\Lambda}(a)} \right] \;\;.
\end{equation}
Note, that in this work we use $\rho_{\Lambda}(a)\equiv \Lambda(a)$
[see eq.\ref{frie444}].
If the two densities differ by many orders of magnitude then
$r\simeq 0$. On the other hand if the two densities are equal the proximity
parameter is $r=1$. The current observational data shows
that the proximity parameter
at the present time ($a=1$) is
$r_{0}=\frac{\rho_{m}(1)}{\rho_{\Lambda}(1)}
=\frac{\Omega_{m}}{\Omega_{\Lambda}}\simeq 0.37$. Therefore, a
cosmological model may suffer from the so
called coincidence problem
if its proximity parameter is close to zero before the inflection
point, $r(a<a_{I})\sim 0$.
As an example, for the traditional $\Lambda$-cosmology we
have $r(a<0.56)\sim 0$. In contrast, if for a particular model
we find that $r(a<a_{I})={\cal O}(1)$ then
this model possibly does not suffer from
the cosmic coincidence problem.
In particular,
suppose that we have a cosmological model which
accommodates a late time accelerated expansion and it
contains $n$-free parameters, described by the vector
${\vec \epsilon}=(\epsilon_{1},\epsilon_{2},...,\epsilon_{n})$.
The main question that we should address here is 'what is the
range of input $(\epsilon_{1},\epsilon_{2},...,\epsilon_{n})$
parameters for which the coincidence problem
can be avoided?' Below we implement the following tests.
(i) We find the range of the free parameters of the considered
cosmological model that implies $r\simeq r_{0}$ for at least two different
epochs, one of which is precisely the present epoch.
(ii) We know that for epochs between the inflection point and the
present time $a_{I}\le a \le 1$, the proximity parameter is
$r(a)\ge r_{0}$. As an example, for the traditional $\Lambda$-cosmology we
have $r(a)\ge 0.37$. Thus, the goal here is to define the range of the
free parameters in which at least a second region
with $r(a<a_{I})\ge r_{0}$ occurs before the inflection point ($a<a_{I}$).
(ii) Once, steps (i) and (ii) are accomplished, we finally check
whether the remaining parameters fit the recent SnIa data, by
performing a standard $\chi^{2}$ minimization. In this work,
we use the so called Union08 sample of 307 supernovae of
Kowalski et al. (2008).
In particular, the $\chi^{2}$ function can be written as:
\begin{equation}
\label{chi22}
\chi^{2}({\vec \epsilon})=\sum_{j=1}^{307} \left[ \frac{ {\cal \mu}^{\rm th}
(a_{j},{\vec \epsilon})-{\cal \mu}^{\rm obs}(a_{j}) }
{\sigma_{j}} \right]^{2} \;\;.
\end{equation}
where $a_{j}=(1+z_{j})^{-1}$ is the observed scale factor of
the universe, $z_{j}$ is the observed redshift, ${\cal \mu}$ is the
distance modulus ${\cal \mu}=m-M=5{\rm log}d_{\rm L}+25$
and $d_{\rm L}(a,{\vec \epsilon})$ is the luminosity distance, given by
\begin{equation}
d_{\rm L}(a,{\vec \epsilon})=\frac{c}{H_{0}a} \int_{a}^{1} \frac{dx}{x^{2}E(x)} \;\;,
\end{equation}
where ${\vec \epsilon}$ is the vector containing the unknown free
parameters and
$c$ is the speed of light ($\equiv 1$ here).
A cosmological model for which the present tests are successfully passed
should not suffer of the coincidence problem.
Below, we apply our tests
to the current $\Lambda(t)$ cosmological models (see also Table 1).
\begin{itemize}
\item The modified vacuum model with ${\vec \epsilon}=(\gamma,0,...0)$:
We sample the unknown $\gamma$ parameter as follows: $\gamma \in
(-1,1/3)$ in steps of $10^{-4}$.
We confirm that in the range of $\gamma \in [0.004,0.03]$
the $\Lambda_{\gamma 0}$ model\footnote{Note, that from a theoretical
viewpoint the predicted value of the $\gamma$ parameter is
$|\gamma|=\frac{1}{12\pi}\,\frac{M^2}{M_P^2}$,
where $M_P$ is the Planck mass and $M$ is an effective mass
parameter representing the average mass of the heavy particles of
the Grand Unified Theory (GUT) near the Planck scale, after taking
into account their multiplicities. In the case of $M\sim M_{P}$
we can derive an upper limit of $|\gamma| \le 1/12\pi$
(for more details see Basilakos et al. 2009).}
satisfies both the criterion (i) and
(ii) respectively. Also, we verify that this range of values fits
very well the SnIa data,
$\chi^{2}_{min}/{\rm dof}\simeq 1.01$. Notice, that for
$\gamma>0.03$ the criterion (i) is not satisfied.
As an example, in the upper panel of figure 1 we present
the evolution of the proximity parameter
for $\gamma=0.004$ (solid line) and $0.03$ (dashed line).
It is worth noting, that
for $0.1\le a \le 0.34$ (or $2\le z \le 10$) the vacuum density
is low enough ($r\sim 0$) to allow galaxies and galaxy clusters to
form (Garriga, Livio \& Vilenkin 1999; Basilakos et al. 2009).
From now on, we will utilize $\gamma\simeq 0.004$ that corresponds to the best
fit parameter. To this end it becomes clear that
the $\Lambda_{\gamma 0}$ model passes the above criteria and
thus it does not suffer form the cosmic coincidence problem.
\item The mild vacuum model with ${\vec \epsilon}=(0,m,...0)$:
In this cosmological model, we find that for $m \ge 0.17H_{0}$,
the corresponding age of the universe is $t_{0}\le 12.7$Gyr. The latter
appears to be ruled out by the ages of the oldest
known globular clusters (Krauss 2003; Hansen et al. 2004).
Using this constrain the unknown $m$ parameter
has an upper limit of $0.17H_{0}$ and thus we perform the
following sampling:
$m \in [5\times 10^{-4}H_{0},0.17H_{0})$ in steps of $5\times
10^{-4}H_{0}$. Within this range,
we find that the required (i) and (ii) criteria are not satisfied.
Thus, the $\Lambda_{0m}$ cosmological model suffers of the
coincidence problem. The resulting minimization provides:
$m=2.4^{+6}_{-1}\times 10^{-3}H_{0}$ with
$\chi^{2}_{min}/{\rm dof}\simeq 1.01$.
Note that the errors of the fitted
parameters represent $1\sigma$ uncertainties.
\item The general vacuum model with ${\vec \epsilon}=(\gamma,m,...0)$:
This vacuum cosmological model contains 2 free parameters. Using the previous mentioned
sampling, we obtain that our main criteria for the $\Lambda_{\gamma
m}$ scenario are full-filled for $\gamma \in [0.004,0.02]$,
$m \in [1.4\times 10^{-3}H_{0},9\times 10^{-3}H_{0}]$ with
$\chi^{2}_{min}/{\rm dof}\in [1.01,1.02]$.
Throughout the rest of the paper we will use the best fit parameters. These are:
$m\simeq 2.8 \times 10^{-3}H_{0}$ and $\gamma\simeq 0.004$
\end{itemize}
In addition to the SnIa data, we further check our statistical
results using the dimensionless distance to the surface of the last scattering
$R=1.71 \pm 0.019$ (Komatsu et al. 2009), and the baryon acoustic oscillation
(BAO) distance at $z=0.35$, $A=0.469\pm 0.017$
(Eisenstein et al. 2005; Padmanabhan, et al. 2007).
We find that the above results remain unaltered.
\subsection{The cosmic evolution - fine tuning problem}
Using now our best fit parameters for the different kind of vacuums,
we present in figure 1
the corresponding normalized energy densities, vacuum
$\Lambda(a)/H^{2}_{0}$, matter $\rho_{m}(a)/H^{2}_{0}$ and radiation
$\rho_{r}(a)/H^{2}_{0}$ as a function of the scale factor.
We verify that both the $\Lambda_{\gamma 0}$ (solid line) and
$\Lambda_{\gamma m}$ (open stars) solutions are models that provide large
values for the vacuum energy density at early epochs, in contrast
with the usual $\Lambda$ cosmology (open circles),
in which the vacuum energy density remains constant everywhere.
Also, within a Hubble time ($0<a\le 1$) and for each $(\gamma,
m)$ pair, we find the well known cosmic behavior
for the matter density, $\rho_{m}(a)\propto a^{-3}$ and the radiation
density, $\rho_{r}(a)\propto a^{-4}$ respectively.
As an example, in figure 1 we present the density evolution of the cosmic
fluid for the $\Lambda_{\gamma 0}$ cosmological model: matter (dashed line)
and radiation (dot-dashed line). For comparison we also plot the
predictions of the traditional $\Lambda$ cosmology:
matter (open squares) and radiation (open triangles). From figure 1,
it becomes clear that the radiation-matter
equality takes place close to
$a_{rm}\simeq 3.7\times 10^{-4}\simeq \Omega_{r}/\Omega_{m}$.
For those vacuum models where $m\ne 0$ ($\Lambda_{0m}$ and
$\Lambda_{\gamma m}$), we verify that the
behavior of their cosmic fluid (matter+radiation) deviates
from the $\Lambda$ solution in the far future ($t\gg t_{0}$), since
the exponential term ${\rm e}^{mt}$ in eq.(\ref{den22}) plays an important
role in the global dynamics (see section 3.4 and below).
In particular, for the $\Lambda_{\gamma 0}$
vacuum scenario (the same behavior holds for $\Lambda_{\gamma m}$)
we have revealed the following phases:
(a) at early enough times ($\alpha<a_{rm}$) the scale factor of the
universe tends to its minimum value,
$a\longrightarrow 0$, which means that the vacuum energy density
initially goes quickly to infinity.
So, as long as the scale factor increases
the vacuum energy rolls down rapidly as $\Lambda_{\gamma 0}(a)\propto
a^{-4\gamma_{1}}$ [where $\gamma_{1}\sim {\cal O}(1)$].
This evolution may treat the fine tuning problem. Indeed,
for $\gamma \in (0,1/3)$, we find that prior to the inflation point
($t_{inf}\sim 10^{-32}$sec), the vacuum energy density
divided by its present value is
$\Lambda(t_{inf})/\Lambda(t_{0})\sim 10^{102}$
Finally, if we consider that the functional form of
$\Lambda(a)\propto a^{-4\gamma_{1}}$ is still valid during the
Planck time ($t_{pl}\sim 10^{-43}$sec), then
$\Lambda(t_{pl})/\Lambda(t_{0})\sim 10^{124}$
(see the last rows in Table 1), and (b) in the matter era the vacuum density
continues to roll down but with a
different power law $\Lambda_{\gamma 0}(a)\propto
a^{-3(1-\gamma)}$ and it tends to a constant value
close to $a\sim 0.25$ ($z\sim 3$). Finally,
for $a\ge 0.25$ the vacuum energy density is effectively frozen
to its nominal value,
$\Lambda_{\gamma 0}(a)\simeq \Lambda=3\Omega_{\Lambda}H^{2}_{0}$,
which implies that the considered time varying vacuum model explains
why the matter energy density and the dark energy density are of the same
order prior to the present epoch.
The moment of radiation-vacuum equality occurs at $a_{rv}\simeq 0.1\simeq
(\Omega_{r}/\Omega_{\Lambda})^{1/4}$.
Similarly, the moment of matter-vacuum equality takes place at
$a_{mv}\simeq 0.72\simeq (\Omega_{m}/\Omega_{\Lambda})^{1/3}$.
From the observational viewpoint,
in order to investigate whether the vacuum
energy density follows the above evolution, we need a robust
cosmological probe at redshifts $z\ge 3$.
In a recent paper (Basilakos et al. 2009), we have investigated
how realistic it would be to detect differences among the vacuum models.
In particular, we have found that the Sunayev-Zeldovich cluster
number-counts (as expected from the survey of the South Pole
Telescope, Staniszewski et al. 2009, and the Atacama Cosmology
Telescope, Hincks et al. 2009)
indicate that we maybe able to detect significant
differences among the vacuum models in the redshift range $2.5 \le z \le 3$
at a level of $\sim 6-12\%$, which translates in number count
differences, over the whole sky, of $\sim 100$ clusters
(see figure 6 in Basilakos et al. 2009).
\begin{figure}
\includegraphics[angle=0,scale=0.4]{fig2.ps}
\caption{{\it Upper Panel:} Comparison of the
scale factor provided by our $\Lambda_{\gamma 0}$ model
with the traditional $\Lambda$ cosmology (open points). Note, that we use
$\Omega_{m}=0.27$ and $H_{0}=70.5$Km/s/Mpc model.
In the bottom panel we present the
deviation of the scale factors between the
$\Lambda_{\gamma 0}$ and $\Lambda_{\gamma m}$ model respectively.
Note, that the scale factor is normalized to unity at the present time.}
\end{figure}
Finally, in figure 1 we also show the evolution of the
mild vacuum model $\Lambda_{0m}(a)$ (dot line), in which $\gamma=0$.
Briefly, we get the following dependence:
(a) $\Lambda_{0m}\propto
a^{-2\gamma_{1}}$ for $a<a_{rm}$,
while we estimate that $\Lambda_{0m}(t_{inf})/\Lambda_{0m}(t_{0})
\sim 10^{51}$ and $\Lambda_{0m}(t_{pl})/\Lambda_{0m}(t_{0})
\sim 10^{63}$, (b) between $a_{rm}\le a \le
0.08$ we have $\Lambda_{0m}\propto
a^{-3/2}$ and (c)
for $a\ge 0.08$ the $\Lambda_{0m}$ becomes constant.
We would like to end this section with a
discussion on the evolution of the scale factor.
In particular, our approach provides an evolution of the
scale factor in the $\Lambda_{\gamma 0}$
model seen in the upper panel of figure 2 as the solid line,
which mimics the corresponding scale factor of the
$\Lambda$ cosmological model (open points), despite the
fact that they describe differently the vacuum term.
On the other hand, in the bottom panel of
figure 2 we present the corresponding deviation
$[(a_{\gamma m}-a_{\gamma 0})/a_{\gamma 0}]\%$, of the growth factors.
It becomes evident, that within the range $0 < H_{0}t< 5$
the evolution of the
scale factor provided by the $\Lambda_{\gamma m}$ model
closely resembles, the corresponding scale factor of the
$\Lambda_{\gamma 0}$ model
(the same result holds also for the $\Lambda$ cosmology).
However, for models where $m\ne 0$, the situation is somewhat different in the far future.
Indeed, for $H_{0}t\ge 5$ the $\Lambda_{\gamma m}$ (or $\Lambda_{0m}$)
cosmological scenario
deviates from the $\Lambda_{\gamma 0}$ (or $\Lambda$) model by
$\sim 5-10\%$. Thus, we conclude that the
models with $m\ne 0$ give a super-accelerated expansion of the
universe in the far future with respect to those vacuum models where $m=0$.
\section{Conclusions}
The reason for which a cosmological constant
leads to a late cosmic acceleration is because it introduces in
Friedmann's equation a component which has an equation of state with negative
pressure, $P_{\Lambda}=-\rho_{\Lambda}$. In the last decade the so
called concordance $\Lambda$-cosmology
is considered to be the model which describes the cosmological
properties of the observed universe because it fits
accurately the current observational data.
However, the traditional $\Lambda$ cosmology suffers
from two fundamental puzzles. These are the fine tuning
and the cosmic coincidence problems. An
avenue through which the above cosmological problems
could be solved is via the time varying vacuum energy
which has the same equation of state as the traditional $\Lambda$-cosmology.
We wish to spell out clearly which are the basic
assumptions and conclusions of our analysis.
\begin{itemize}
\item We are assuming a time varying vacuum pattern in which
the specific functional form is:
$\Lambda(t)=3\gamma H^{2}(t)+2mH(t)+3n(\beta+1-\gamma){\rm e}^{2mt}$,
where $\beta=0$ (matter era) or $\beta=1/3$ (radiation era),
$n=3\Omega_{\Lambda}H^{2}_{0}$, while the pair $(\gamma,m)$
characterizes the different types of vacuum. It is worth noting,
that the above functional form includes the effect of the quantum
field theory (for $m=0$) [Shapiro \& Sol\'a 2000; Babi\'c et al. 2002;
Grande et al. 2006; Sol\'a 2008] and it also extents recent studies
(see for example Ray et al. 2007; Carneiro et al. 2008; Sil \& Som 2008; Basilakos 2009).
Notice, that we can easily prove that the cosmological constant is
a particular solution of the general vacuum, that $(\gamma,m)=(0,0)$.
Also we have investigated the following models: (a) modified vacuum in
which $(\gamma,m)=(\gamma,0)$, mild vacuum with $(\gamma,m)=(0,m)$ and
general vacuum in which $(\gamma,m)\ne (0,0)$.
In this framework, we find that the time evolution of the basic cosmological
functions (scale factor and Hubble flow) are described in terms of
hyperbolic functions which can accommodate a late time accelerated
expansion, equivalent to the standard $\Lambda$ model.
\item We find that that within the framework of either the
modified or general vacuum models the corresponding vacuum term in the
radiation era varies as $\Lambda(a) \propto a^{-4}$ while in the matter
dominated era
we have $\Lambda(a) \sim a^{-3}$ up to $z=a^{-1}-1 \simeq 3$
while $\Lambda(a)\simeq \Lambda=3\Omega_{\Lambda}H^{2}_{0}$ for $z\le 3$.
This vacuum mechanism simultaneously
sets (a) the value of $\Lambda$ at the present time to its
observed value and (b) at the Planck time to a value which is
$10^{124}$ its present value
[$\Lambda(t_{pl})/\Lambda(t_{0})\sim 10^{124}$].
Additionally, we verify that our models appear to overcome
the cosmic coincidence problem.
Finally, in order to
confirm the above results, we need to define a robust
cosmological probe at high redshifts ($z\ge 3$).
In Basilakos
et al. (2009) we propose that the future
cluster surveys based on the Sunayev-Zeldovich
detection method give some hope to distinguish the closely resembling
vacuum models at high redshifts.
\end{itemize}
\section*{Appendix}
In this appendix we provide a physical justification
of the functional form of $\Lambda(a)$ used in our paper.
As we have already mentioned in the section 2, the vacuum
energy density can take several forms, depending on the
theoretical approach. Briefly,
the renormalization group from the quantum field
theory introduces only even powers of $H$ out of which the
$H^{2}$ is the leading term (Grande et al. 2006; Sol\'a 2008 and
references therein).
In another vein, the aforementioned possibility that the vacuum energy
could be evolving linearly with $H$ has been motivated theoretically
through a possible connection of cosmology with
the QCD scale of strong interactions (see Schutzhold 2002;
Carneiro et al. 2008). In this framework, it
has also been proposed a possible link of dark energy with QCD and
the topological structure of the universe
(Urban \& Zhitnitsky 2009).
The simplest approach therefore to introduce the effects of the
DE is to consider a potential $V(\phi) \simeq V_{0}+m^{2}\phi^{2}/2$,
where the homogeneous scalar field $\phi$
obeys the Klein-Gordon equation. It is well known that
for $H \simeq const$ the corresponding $\phi$ evolves
with time as $\phi(t)\simeq \phi_{0}{\rm e}^{mt}$
(where in general $m$ is a complex number).
In this context, one would expect that
the functional form of the $\Lambda(t)$ should contains
also an additional term of $\phi^{2}(t) \propto {\rm e}^{2mt}$ in order
to take into account the possible link between dark energy and QCD.
All the above options have merits and demerits.
In the current paper, the functional form of $\Lambda(t)$
is motivated by a combination
of the above possibilities namely
$H^{2}(t)$ [RG], $H(t)$ [QCD] and ${\rm e}^{2mt}$ (dark energy).
In particular, the linear combination reads as follows:
$$
\Lambda(t)=n_{1}H^{2}(t)+n_{2}H(t)+n_{3}{\rm e}^{2mt}
$$
which obviously is very similar to the original (phenomenologically selected)
form of $\Lambda(t)$ (equation 7). Finally, from a mathematical
point of view we can select the constants $n_{1}$, $n_{2}$ and $n_{3}$
to match with those presented in the original equation 7.
\section*{Appendix}
With the aid of the differential equation theory we present
solutions that are relevant to our eq.(\ref{frie344}).
If one is able to have a Riccati differential equation which is given by
the following special form
\begin{equation}
\frac{dy}{dx}=f(x)y^{2}(x)+my(x)-n{\rm e}^{2mx}f(x)
\label{frie355}
\end{equation}
then the general solution of eq.(\ref{frie355}) for $n>0$ is
\begin{equation}
y(x)=\sqrt{n}{\rm e}^{mx}{\rm coth}\left[-\sqrt{n}\int_{x_{0}}^{x}
{\rm e}^{mu}f(u)du \right] \;\;.
\end{equation}
On the other hand, if $n<0$ then the solution of
eq.(\ref{frie355}) is
\begin{equation}
y(x)=\sqrt{|n|}{\rm e}^{mx}{\rm cot}\left[-\sqrt{|n|}\int_{x_{0}}^{x}
{\rm e}^{mu}f(u)du \right] \;\;.
\end{equation}
Note, that in our formulation the function $f(x)$ is a constant:
$f(x)=-3(\beta+1-\gamma)/2$. Also, $n<0$ implies that $\Omega_{m}>1$
(or $\Lambda<0$).
\acknowledgements{}
I would like to thank the anonymous referee
for his/her useful comments and suggestions.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.